merge user-abstraction into webui work

This commit is contained in:
Kevin Froman 2018-09-03 14:10:59 -05:00
commit 389b1a10ce
No known key found for this signature in database
GPG Key ID: 0D414D0FE405B63B
29 changed files with 1202 additions and 301 deletions

4
.dockerignore Normal file
View File

@ -0,0 +1,4 @@
onionr/data/**/*
onionr/data
RUN-WINDOWS.bat
MY-RUN.sh

1
.gitignore vendored
View File

@ -13,3 +13,4 @@ onionr/data-encrypted.dat
onionr/.onionr-lock onionr/.onionr-lock
core core
.vscode/* .vscode/*
venv/*

3
.gitmodules vendored
View File

@ -1,3 +0,0 @@
[submodule "onionr/bitpeer"]
path = onionr/bitpeer
url = https://github.com/beardog108/bitpeer.py

28
Dockerfile Normal file
View File

@ -0,0 +1,28 @@
FROM ubuntu:bionic
#Base settings
ENV HOME /root
#Install needed packages
RUN apt update && apt install -y python3 python3-dev python3-pip tor locales nano sqlite3
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
locale-gen
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
WORKDIR /srv/
ADD ./requirements.txt /srv/requirements.txt
RUN pip3 install -r requirements.txt
WORKDIR /root/
#Add Onionr source
COPY . /root/
VOLUME /root/data/
#Set upstart command
CMD bash
#Expose ports
EXPOSE 8080

View File

@ -1,3 +1,5 @@
PREFIX = /usr/local
.DEFAULT_GOAL := setup .DEFAULT_GOAL := setup
setup: setup:
@ -5,16 +7,15 @@ setup:
-@cd onionr/static-data/ui/; ./compile.py -@cd onionr/static-data/ui/; ./compile.py
install: install:
sudo rm -rf /usr/share/onionr/ cp -rfp ./onionr $(DESTDIR)$(PREFIX)/share/onionr
sudo rm -f /usr/bin/onionr echo '#!/bin/sh' > $(DESTDIR)$(PREFIX)/bin/onionr
sudo cp -rp ./onionr /usr/share/onionr echo 'cd $(DESTDIR)$(PREFIX)/share/onionr' > $(DESTDIR)$(PREFIX)/bin/onionr
sudo sh -c "echo \"#!/bin/sh\ncd /usr/share/onionr/\n./onionr.py \\\"\\\$$@\\\"\" > /usr/bin/onionr" echo './onionr "$$@"' > $(DESTDIR)$(PREFIX)/bin/onionr
sudo chmod +x /usr/bin/onionr chmod +x $(DESTDIR)$(PREFIX)/bin/onionr
sudo chown -R `whoami` /usr/share/onionr/
uninstall: uninstall:
sudo rm -rf /usr/share/onionr rm -rf $(DESTDIR)$(PREFIX)/share/onionr
sudo rm -f /usr/bin/onionr rm -f $(DESTDIR)$(PREFIX)/bin/onionr
test: test:
@./RUN-LINUX.sh stop @./RUN-LINUX.sh stop
@ -27,7 +28,7 @@ test:
soft-reset: soft-reset:
@echo "Soft-resetting Onionr..." @echo "Soft-resetting Onionr..."
rm -f onionr/data/blocks/*.dat onionr/data/*.db | true > /dev/null 2>&1 rm -f onionr/data/blocks/*.dat onionr/data/*.db onionr/data/block-nonces.dat | true > /dev/null 2>&1
@./RUN-LINUX.sh version | grep -v "Failed" --color=always @./RUN-LINUX.sh version | grep -v "Failed" --color=always
reset: reset:

View File

@ -86,6 +86,12 @@ Blocks are stored indefinitely until the allocated space is filled, at which poi
## Block Timestamping ## Block Timestamping
Onionr can provide evidence when a block was inserted by requesting other users to sign a hash of the current time with the block data hash: sha3_256(time + sha3_256(block data)). Onionr can provide evidence of when a block was inserted by requesting other users to sign a hash of the current time with the block data hash: sha3_256(time + sha3_256(block data)).
This can be done either by the creator of the block prior to generation, or by any node after insertion. This can be done either by the creator of the block prior to generation, or by any node after insertion.
In addition, randomness beacons such as the one operated by [NIST](https://beacon.nist.gov/home) or the hash of the latest blocks in a cryptocurrency network could be used to affirm that a block was at least not *created* before a given time.
# Direct Connections
We propose a system to

View File

@ -20,11 +20,11 @@
import flask import flask
from flask import request, Response, abort, send_from_directory from flask import request, Response, abort, send_from_directory
from multiprocessing import Process from multiprocessing import Process
from gevent.wsgi import WSGIServer from gevent.pywsgi import WSGIServer
import sys, random, threading, hmac, hashlib, base64, time, math, os, json import sys, random, threading, hmac, hashlib, base64, time, math, os, json
from core import Core from core import Core
from onionrblockapi import Block from onionrblockapi import Block
import onionrutils, onionrcrypto, blockimporter, onionrevents as events, logger, config import onionrutils, onionrexceptions, onionrcrypto, blockimporter, onionrevents as events, logger, config
class API: class API:
''' '''
@ -119,9 +119,7 @@ class API:
''' '''
Simply define the request as not having yet failed, before every request. Simply define the request as not having yet failed, before every request.
''' '''
self.requestFailed = False self.requestFailed = False
return return
@app.after_request @app.after_request
@ -241,16 +239,6 @@ class API:
resp = Response('Goodbye') resp = Response('Goodbye')
elif action == 'ping': elif action == 'ping':
resp = Response('pong') resp = Response('pong')
elif action == 'site':
block = data
siteData = self._core.getData(data)
response = 'not found'
if siteData != '' and siteData != False:
self.mimeType = 'text/html'
response = siteData.split(b'-', 2)[-1]
resp = Response(response)
elif action == 'info':
resp = Response(json.dumps({'pubkey' : self._core._crypto.pubKey, 'host' : self._core.hsAdder}))
elif action == "insertBlock": elif action == "insertBlock":
response = {'success' : False, 'reason' : 'An unknown error occurred'} response = {'success' : False, 'reason' : 'An unknown error occurred'}
@ -399,13 +387,57 @@ class API:
pass pass
else: else:
if sys.getsizeof(data) < 100000000: if sys.getsizeof(data) < 100000000:
if blockimporter.importBlockFromData(data, self._core): try:
resp = 'success' if blockimporter.importBlockFromData(data, self._core):
else: resp = 'success'
logger.warn('Error encountered importing uploaded block') else:
logger.warn('Error encountered importing uploaded block')
except onionrexceptions.BlacklistedBlock:
logger.debug('uploaded block is blacklisted')
pass
resp = Response(resp) resp = Response(resp)
return resp return resp
@app.route('/public/announce/', methods=['POST'])
def acceptAnnounce():
self.validateHost('public')
resp = 'failure'
powHash = ''
randomData = ''
newNode = ''
ourAdder = self._core.hsAddress.encode()
try:
newNode = request.form['node'].encode()
except KeyError:
logger.warn('No block specified for upload')
pass
else:
try:
randomData = request.form['random']
randomData = base64.b64decode(randomData)
except KeyError:
logger.warn('No random data specified for upload')
else:
nodes = newNode + self._core.hsAddress.encode()
nodes = self._core._crypto.blake2bHash(nodes)
powHash = self._core._crypto.blake2bHash(randomData + nodes)
try:
powHash = powHash.decode()
except AttributeError:
pass
if powHash.startswith('0000'):
try:
newNode = newNode.decode()
except AttributeError:
pass
if self._core.addAddress(newNode):
resp = 'Success'
else:
logger.warn(newNode.decode() + ' failed to meet POW: ' + powHash)
resp = Response(resp)
return resp
@app.route('/public/') @app.route('/public/')
def public_handler(): def public_handler():
# Public means it is publicly network accessible # Public means it is publicly network accessible
@ -430,20 +462,11 @@ class API:
resp = Response(self._utils.getBlockDBHash()) resp = Response(self._utils.getBlockDBHash())
elif action == 'getBlockHashes': elif action == 'getBlockHashes':
resp = Response('\n'.join(self._core.getBlockList())) resp = Response('\n'.join(self._core.getBlockList()))
elif action == 'announce':
if data != '':
# TODO: require POW for this
if self._core.addAddress(data):
resp = Response('Success')
else:
resp = Response('')
else:
resp = Response('')
# setData should be something the communicator initiates, not this api # setData should be something the communicator initiates, not this api
elif action == 'getData': elif action == 'getData':
resp = '' resp = ''
if self._utils.validateHash(data): if self._utils.validateHash(data):
if not os.path.exists('data/blocks/' + data + '.db'): if os.path.exists('data/blocks/' + data + '.dat'):
block = Block(hash=data.encode(), core=self._core) block = Block(hash=data.encode(), core=self._core)
resp = base64.b64encode(block.getRaw().encode()).decode() resp = base64.b64encode(block.getRaw().encode()).decode()
if len(resp) == 0: if len(resp) == 0:
@ -477,7 +500,6 @@ class API:
def authFail(err): def authFail(err):
self.requestFailed = True self.requestFailed = True
resp = Response("403") resp = Response("403")
return resp return resp
@app.errorhandler(401) @app.errorhandler(401)
@ -490,11 +512,13 @@ class API:
logger.info('Starting client on ' + self.host + ':' + str(bindPort) + '...', timestamp=False) logger.info('Starting client on ' + self.host + ':' + str(bindPort) + '...', timestamp=False)
try: try:
while len(self._core.hsAddress) == 0:
self._core.refreshFirstStartVars()
time.sleep(0.5)
self.http_server = WSGIServer((self.host, bindPort), app) self.http_server = WSGIServer((self.host, bindPort), app)
self.http_server.serve_forever() self.http_server.serve_forever()
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
#app.run(host=self.host, port=bindPort, debug=False, threaded=True)
except Exception as e: except Exception as e:
logger.error(str(e)) logger.error(str(e))
logger.fatal('Failed to start client on ' + self.host + ':' + str(bindPort) + ', exiting...') logger.fatal('Failed to start client on ' + self.host + ':' + str(bindPort) + ', exiting...')

View File

@ -20,6 +20,12 @@
import core, onionrexceptions, logger import core, onionrexceptions, logger
def importBlockFromData(content, coreInst): def importBlockFromData(content, coreInst):
retData = False retData = False
dataHash = coreInst._crypto.sha3Hash(content)
if coreInst._blacklist.inBlacklist(dataHash):
raise onionrexceptions.BlacklistedBlock('%s is a blacklisted block' % (dataHash,))
if not isinstance(coreInst, core.Core): if not isinstance(coreInst, core.Core):
raise Exception("coreInst must be an Onionr core instance") raise Exception("coreInst must be an Onionr core instance")
@ -30,11 +36,15 @@ def importBlockFromData(content, coreInst):
metas = coreInst._utils.getBlockMetadataFromData(content) # returns tuple(metadata, meta), meta is also in metadata metas = coreInst._utils.getBlockMetadataFromData(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0] metadata = metas[0]
if coreInst._utils.validateMetadata(metadata): # check if metadata is valid if coreInst._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid
if coreInst._crypto.verifyPow(content): # check if POW is enough/correct if coreInst._crypto.verifyPow(content): # check if POW is enough/correct
logger.info('Block passed proof, saving.') logger.info('Block passed proof, saving.')
blockHash = coreInst.setData(content) try:
blockHash = coreInst.addToBlockDB(blockHash, dataSaved=True) blockHash = coreInst.setData(content)
coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database except onionrexceptions.DiskAllocationReached:
retData = True pass
else:
coreInst.addToBlockDB(blockHash, dataSaved=True)
coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
retData = True
return retData return retData

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
''' '''
Onionr - P2P Microblogging Platform & Social network. Onionr - P2P Anonymous Storage Network
This file contains both the OnionrCommunicate class for communcating with peers This file contains both the OnionrCommunicate class for communcating with peers
and code to operate as a daemon, getting commands from the command queue database (see core.Core.daemonQueue) and code to operate as a daemon, getting commands from the command queue database (see core.Core.daemonQueue)
@ -21,11 +21,14 @@
''' '''
import sys, os, core, config, json, requests, time, logger, threading, base64, onionr import sys, os, core, config, json, requests, time, logger, threading, base64, onionr
import onionrexceptions, onionrpeers, onionrevents as events, onionrplugins as plugins, onionrblockapi as block import onionrexceptions, onionrpeers, onionrevents as events, onionrplugins as plugins, onionrblockapi as block
import onionrdaemontools
from defusedxml import minidom from defusedxml import minidom
class OnionrCommunicatorDaemon: class OnionrCommunicatorDaemon:
def __init__(self, debug, developmentMode): def __init__(self, debug, developmentMode):
self.isOnline = True # Assume we're connected to the internet
# list of timer instances # list of timer instances
self.timers = [] self.timers = []
@ -48,6 +51,8 @@ class OnionrCommunicatorDaemon:
# lists of connected peers and peers we know we can't reach currently # lists of connected peers and peers we know we can't reach currently
self.onlinePeers = [] self.onlinePeers = []
self.offlinePeers = [] self.offlinePeers = []
self.cooldownPeer = {}
self.connectTimes = {}
self.peerProfiles = [] # list of peer's profiles (onionrpeers.PeerProfile instances) self.peerProfiles = [] # list of peer's profiles (onionrpeers.PeerProfile instances)
# amount of threads running by name, used to prevent too many # amount of threads running by name, used to prevent too many
@ -69,28 +74,34 @@ class OnionrCommunicatorDaemon:
# Loads in and starts the enabled plugins # Loads in and starts the enabled plugins
plugins.reload() plugins.reload()
# daemon tools are misc daemon functions, e.g. announce to online peers
# intended only for use by OnionrCommunicatorDaemon
#self.daemonTools = onionrdaemontools.DaemonTools(self)
self.daemonTools = onionrdaemontools.DaemonTools(self)
if debug or developmentMode: if debug or developmentMode:
OnionrCommunicatorTimers(self, self.heartbeat, 10) OnionrCommunicatorTimers(self, self.heartbeat, 10)
# Print nice header thing :)
if config.get('general.display_header', True) and not self.shutdown:
self.header()
# Set timers, function reference, seconds # Set timers, function reference, seconds
# requiresPeer True means the timer function won't fire if we have no connected peers # requiresPeer True means the timer function won't fire if we have no connected peers
OnionrCommunicatorTimers(self, self.daemonCommands, 5) OnionrCommunicatorTimers(self, self.daemonCommands, 5)
OnionrCommunicatorTimers(self, self.detectAPICrash, 5) OnionrCommunicatorTimers(self, self.detectAPICrash, 5)
peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60) peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60, maxThreads=1)
OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1) OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks'), requiresPeer=True, maxThreads=1)
OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True) OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks'), requiresPeer=True)
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65)
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True) OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True) OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
OnionrCommunicatorTimers(self, self.daemonTools.cooldownPeer, 30, requiresPeer=True)
netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1)
cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True) cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True)
# set loop to execute instantly to load up peer pool (replaced old pool init wait) # set loop to execute instantly to load up peer pool (replaced old pool init wait)
peerPoolTimer.count = (peerPoolTimer.frequency - 1) peerPoolTimer.count = (peerPoolTimer.frequency - 1)
cleanupTimer.count = (cleanupTimer.frequency - 60) cleanupTimer.count = (cleanupTimer.frequency - 60)
announceTimer.count = (cleanupTimer.frequency - 60)
# Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking # Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking
try: try:
@ -105,14 +116,14 @@ class OnionrCommunicatorDaemon:
pass pass
logger.info('Goodbye.') logger.info('Goodbye.')
self._core._utils.localCommand('shutdown') self._core._utils.localCommand('shutdown') # shutdown the api
time.sleep(0.5) time.sleep(0.5)
def lookupKeys(self): def lookupKeys(self):
'''Lookup new keys''' '''Lookup new keys'''
logger.debug('Looking up new keys...') logger.debug('Looking up new keys...')
tryAmount = 1 tryAmount = 1
for i in range(tryAmount): for i in range(tryAmount): # amount of times to ask peers for new keys
# Download new key list from random online peers # Download new key list from random online peers
peer = self.pickOnlinePeer() peer = self.pickOnlinePeer()
newKeys = self.peerAction(peer, action='kex') newKeys = self.peerAction(peer, action='kex')
@ -139,6 +150,12 @@ class OnionrCommunicatorDaemon:
existingBlocks = self._core.getBlockList() existingBlocks = self._core.getBlockList()
triedPeers = [] # list of peers we've tried this time around triedPeers = [] # list of peers we've tried this time around
for i in range(tryAmount): for i in range(tryAmount):
# check if disk allocation is used
if not self.isOnline:
break
if self._core._utils.storageCounter.isFull():
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
break
peer = self.pickOnlinePeer() # select random online peer peer = self.pickOnlinePeer() # select random online peer
# if we've already tried all the online peers this time around, stop # if we've already tried all the online peers this time around, stop
if peer in triedPeers: if peer in triedPeers:
@ -153,7 +170,7 @@ class OnionrCommunicatorDaemon:
if newDBHash != self._core.getAddressInfo(peer, 'DBHash'): if newDBHash != self._core.getAddressInfo(peer, 'DBHash'):
self._core.setAddressInfo(peer, 'DBHash', newDBHash) self._core.setAddressInfo(peer, 'DBHash', newDBHash)
try: try:
newBlocks = self.peerAction(peer, 'getBlockHashes') newBlocks = self.peerAction(peer, 'getBlockHashes') # get list of new block hashes
except Exception as error: except Exception as error:
logger.warn("could not get new blocks with " + peer, error=error) logger.warn("could not get new blocks with " + peer, error=error)
newBlocks = False newBlocks = False
@ -164,20 +181,31 @@ class OnionrCommunicatorDaemon:
# if newline seperated string is valid hash # if newline seperated string is valid hash
if not i in existingBlocks: if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue # if block does not exist on disk and is not already in block queue
if i not in self.blockQueue: if i not in self.blockQueue and not self._core._blacklist.inBlacklist(i):
self.blockQueue.append(i) self.blockQueue.append(i) # add blocks to download queue
self.decrementThreadCount('lookupBlocks') self.decrementThreadCount('lookupBlocks')
return return
def getBlocks(self): def getBlocks(self):
'''download new blocks in queue''' '''download new blocks in queue'''
for blockHash in self.blockQueue: for blockHash in self.blockQueue:
if self.shutdown: removeFromQueue = True
if self.shutdown or not self.isOnline:
# Exit loop if shutting down or offline
break break
# Do not download blocks being downloaded or that are already saved (edge cases)
if blockHash in self.currentDownloading: if blockHash in self.currentDownloading:
logger.debug('ALREADY DOWNLOADING ' + blockHash) logger.debug('ALREADY DOWNLOADING ' + blockHash)
continue continue
self.currentDownloading.append(blockHash) if blockHash in self._core.getBlockList():
logger.debug('%s is already saved' % (blockHash,))
self.blockQueue.remove(blockHash)
continue
if self._core._blacklist.inBlacklist(blockHash):
continue
if self._core._utils.storageCounter.isFull():
break
self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
logger.info("Attempting to download %s..." % blockHash) logger.info("Attempting to download %s..." % blockHash)
peerUsed = self.pickOnlinePeer() peerUsed = self.pickOnlinePeer()
content = self.peerAction(peerUsed, 'getData', data=blockHash) # block content from random peer (includes metadata) content = self.peerAction(peerUsed, 'getData', data=blockHash) # block content from random peer (includes metadata)
@ -197,16 +225,25 @@ class OnionrCommunicatorDaemon:
metas = self._core._utils.getBlockMetadataFromData(content) # returns tuple(metadata, meta), meta is also in metadata metas = self._core._utils.getBlockMetadataFromData(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0] metadata = metas[0]
#meta = metas[1] #meta = metas[1]
if self._core._utils.validateMetadata(metadata): # check if metadata is valid if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
if self._core._crypto.verifyPow(content): # check if POW is enough/correct if self._core._crypto.verifyPow(content): # check if POW is enough/correct
logger.info('Block passed proof, saving.') logger.info('Block passed proof, attempting save.')
self._core.setData(content) try:
self._core.addToBlockDB(blockHash, dataSaved=True) self._core.setData(content)
self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database except onionrexceptions.DiskAllocationReached:
logger.error("Reached disk allocation allowance, cannot save this block.")
removeFromQueue = False
else:
self._core.addToBlockDB(blockHash, dataSaved=True)
self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
else: else:
logger.warn('POW failed for block ' + blockHash) logger.warn('POW failed for block ' + blockHash)
else: else:
logger.warn('Metadata for ' + blockHash + ' is invalid.') if self._core._blacklist.inBlacklist(realHash):
logger.warn('%s is blacklisted' % (realHash,))
else:
logger.warn('Metadata for ' + blockHash + ' is invalid.')
self._core._blacklist.addToDB(blockHash)
else: else:
# if block didn't meet expected hash # if block didn't meet expected hash
tempHash = self._core._crypto.sha3Hash(content) # lazy hack, TODO use var tempHash = self._core._crypto.sha3Hash(content) # lazy hack, TODO use var
@ -217,7 +254,8 @@ class OnionrCommunicatorDaemon:
# Punish peer for sharing invalid block (not always malicious, but is bad regardless) # Punish peer for sharing invalid block (not always malicious, but is bad regardless)
onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50) onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50)
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
self.blockQueue.remove(blockHash) # remove from block queue both if success or false if removeFromQueue:
self.blockQueue.remove(blockHash) # remove from block queue both if success or false
self.currentDownloading.remove(blockHash) self.currentDownloading.remove(blockHash)
self.decrementThreadCount('getBlocks') self.decrementThreadCount('getBlocks')
return return
@ -260,7 +298,7 @@ class OnionrCommunicatorDaemon:
'''Manages the self.onlinePeers attribute list, connects to more peers if we have none connected''' '''Manages the self.onlinePeers attribute list, connects to more peers if we have none connected'''
logger.info('Refreshing peer pool.') logger.info('Refreshing peer pool.')
maxPeers = 6 maxPeers = int(config.get('peers.maxConnect'))
needed = maxPeers - len(self.onlinePeers) needed = maxPeers - len(self.onlinePeers)
for i in range(needed): for i in range(needed):
@ -278,8 +316,9 @@ class OnionrCommunicatorDaemon:
def addBootstrapListToPeerList(self, peerList): def addBootstrapListToPeerList(self, peerList):
'''Add the bootstrap list to the peer list (no duplicates)''' '''Add the bootstrap list to the peer list (no duplicates)'''
for i in self._core.bootstrapList: for i in self._core.bootstrapList:
if i not in peerList and i not in self.offlinePeers and i != self._core.hsAdder: if i not in peerList and i not in self.offlinePeers and i != self._core.hsAddress:
peerList.append(i) peerList.append(i)
self._core.addAddress(i)
def connectNewPeer(self, peer='', useBootstrap=False): def connectNewPeer(self, peer='', useBootstrap=False):
'''Adds a new random online peer to self.onlinePeers''' '''Adds a new random online peer to self.onlinePeers'''
@ -300,7 +339,9 @@ class OnionrCommunicatorDaemon:
self.addBootstrapListToPeerList(peerList) self.addBootstrapListToPeerList(peerList)
for address in peerList: for address in peerList:
if len(address) == 0 or address in tried or address in self.onlinePeers: if not config.get('tor.v3onions') and len(address) == 62:
continue
if len(address) == 0 or address in tried or address in self.onlinePeers or address in self.cooldownPeer:
continue continue
if self.shutdown: if self.shutdown:
return return
@ -309,6 +350,7 @@ class OnionrCommunicatorDaemon:
time.sleep(0.1) time.sleep(0.1)
if address not in self.onlinePeers: if address not in self.onlinePeers:
self.onlinePeers.append(address) self.onlinePeers.append(address)
self.connectTimes[address] = self._core._utils.getEpoch()
retData = address retData = address
# add peer to profile list if they're not in it # add peer to profile list if they're not in it
@ -323,6 +365,17 @@ class OnionrCommunicatorDaemon:
logger.debug('Failed to connect to ' + address) logger.debug('Failed to connect to ' + address)
return retData return retData
def removeOnlinePeer(self, peer):
'''Remove an online peer'''
try:
del self.connectTimes[peer]
except KeyError:
pass
try:
self.onlinePeers.remove(peer)
except ValueError:
pass
def peerCleanup(self): def peerCleanup(self):
'''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)''' '''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)'''
onionrpeers.peerCleanup(self._core) onionrpeers.peerCleanup(self._core)
@ -354,8 +407,9 @@ class OnionrCommunicatorDaemon:
if retData == False: if retData == False:
try: try:
self.getPeerProfileInstance(peer).addScore(-10) self.getPeerProfileInstance(peer).addScore(-10)
self.onlinePeers.remove(peer) self.removeOnlinePeer(peer)
self.getOnlinePeers() # Will only add a new peer to pool if needed if action != 'ping':
self.getOnlinePeers() # Will only add a new peer to pool if needed
except ValueError: except ValueError:
pass pass
else: else:
@ -437,17 +491,10 @@ class OnionrCommunicatorDaemon:
def announce(self, peer): def announce(self, peer):
'''Announce to peers our address''' '''Announce to peers our address'''
announceCount = 0 if self.daemonTools.announceNode():
announceAmount = 2 logger.info('Successfully introduced node to ' + peer)
for peer in self.onlinePeers: else:
announceCount += 1 logger.warn('Could not introduce node.')
if self.peerAction(peer, 'announce', self._core.hsAdder) == 'Success':
logger.info('Successfully introduced node to ' + peer)
break
else:
if announceCount == announceAmount:
logger.warn('Could not introduce node. Try again soon')
break
def detectAPICrash(self): def detectAPICrash(self):
'''exit if the api server crashes/stops''' '''exit if the api server crashes/stops'''
@ -463,13 +510,6 @@ class OnionrCommunicatorDaemon:
self.shutdown = True self.shutdown = True
self.decrementThreadCount('detectAPICrash') self.decrementThreadCount('detectAPICrash')
def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'):
if os.path.exists('static-data/header.txt'):
with open('static-data/header.txt', 'rb') as file:
# only to stdout, not file or log or anything
sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', onionr.ONIONR_VERSION))
logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n')
class OnionrCommunicatorTimers: class OnionrCommunicatorTimers:
def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5, requiresPeer=False): def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5, requiresPeer=False):
self.timerFunction = timerFunction self.timerFunction = timerFunction

View File

@ -1,5 +1,5 @@
''' '''
Onionr - P2P Microblogging Platform & Social network Onionr - P2P Anonymous Storage Network
Core Onionr library, useful for external programs. Handles peer & data processing Core Onionr library, useful for external programs. Handles peer & data processing
''' '''
@ -21,7 +21,8 @@ import sqlite3, os, sys, time, math, base64, tarfile, getpass, simplecrypt, hash
from onionrblockapi import Block from onionrblockapi import Block
import onionrutils, onionrcrypto, onionrproofs, onionrevents as events, onionrexceptions, onionrvalues import onionrutils, onionrcrypto, onionrproofs, onionrevents as events, onionrexceptions, onionrvalues
import onionrblacklist
import dbcreator
if sys.version_info < (3, 6): if sys.version_info < (3, 6):
try: try:
import sha3 import sha3
@ -40,13 +41,18 @@ class Core:
self.blockDB = 'data/blocks.db' self.blockDB = 'data/blocks.db'
self.blockDataLocation = 'data/blocks/' self.blockDataLocation = 'data/blocks/'
self.addressDB = 'data/address.db' self.addressDB = 'data/address.db'
self.hsAdder = '' self.hsAddress = ''
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt' self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
self.bootstrapList = [] self.bootstrapList = []
self.requirements = onionrvalues.OnionrValues() self.requirements = onionrvalues.OnionrValues()
self.torPort = torPort self.torPort = torPort
self.dataNonceFile = 'data/block-nonces.dat'
self.dbCreate = dbcreator.DBCreator(self)
self.usageFile = 'data/disk-usage.txt' self.usageFile = 'data/disk-usage.txt'
self.config = config
self.maxBlockSize = 10000000 # max block size in bytes
if not os.path.exists('data/'): if not os.path.exists('data/'):
os.mkdir('data/') os.mkdir('data/')
@ -57,7 +63,7 @@ class Core:
if os.path.exists('data/hs/hostname'): if os.path.exists('data/hs/hostname'):
with open('data/hs/hostname', 'r') as hs: with open('data/hs/hostname', 'r') as hs:
self.hsAdder = hs.read().strip() self.hsAddress = hs.read().strip()
# Load bootstrap address list # Load bootstrap address list
if os.path.exists(self.bootstrapFileLocation): if os.path.exists(self.bootstrapFileLocation):
@ -71,6 +77,7 @@ class Core:
self._utils = onionrutils.OnionrUtils(self) self._utils = onionrutils.OnionrUtils(self)
# Initialize the crypto object # Initialize the crypto object
self._crypto = onionrcrypto.OnionrCrypto(self) self._crypto = onionrcrypto.OnionrCrypto(self)
self._blacklist = onionrblacklist.OnionrBlackList(self)
except Exception as error: except Exception as error:
logger.error('Failed to initialize core Onionr library.', error=error) logger.error('Failed to initialize core Onionr library.', error=error)
@ -78,6 +85,12 @@ class Core:
sys.exit(1) sys.exit(1)
return return
def refreshFirstStartVars(self):
'''Hack to refresh some vars which may not be set on first start'''
if os.path.exists('data/hs/hostname'):
with open('data/hs/hostname', 'r') as hs:
self.hsAddress = hs.read().strip()
def addPeer(self, peerID, powID, name=''): def addPeer(self, peerID, powID, name=''):
''' '''
Adds a public key to the key database (misleading function name) Adds a public key to the key database (misleading function name)
@ -92,7 +105,7 @@ class Core:
conn = sqlite3.connect(self.peerDB) conn = sqlite3.connect(self.peerDB)
hashID = self._crypto.pubKeyHashID(peerID) hashID = self._crypto.pubKeyHashID(peerID)
c = conn.cursor() c = conn.cursor()
t = (peerID, name, 'unknown', hashID, powID) t = (peerID, name, 'unknown', hashID, powID, 0)
for i in c.execute("SELECT * FROM PEERS where id = '" + peerID + "';"): for i in c.execute("SELECT * FROM PEERS where id = '" + peerID + "';"):
try: try:
@ -103,7 +116,7 @@ class Core:
pass pass
except IndexError: except IndexError:
pass pass
c.execute('INSERT INTO peers (id, name, dateSeen, pow, hashID) VALUES(?, ?, ?, ?, ?);', t) c.execute('INSERT INTO peers (id, name, dateSeen, pow, hashID, trust) VALUES(?, ?, ?, ?, ?, ?);', t)
conn.commit() conn.commit()
conn.close() conn.close()
@ -125,7 +138,6 @@ class Core:
for i in c.execute("SELECT * FROM adders where address = '" + address + "';"): for i in c.execute("SELECT * FROM adders where address = '" + address + "';"):
try: try:
if i[0] == address: if i[0] == address:
logger.warn('Not adding existing address')
conn.close() conn.close()
return False return False
except ValueError: except ValueError:
@ -158,14 +170,15 @@ class Core:
conn.close() conn.close()
events.event('address_remove', data = {'address': address}, onionr = None) events.event('address_remove', data = {'address': address}, onionr = None)
return True return True
else: else:
return False return False
def removeBlock(self, block): def removeBlock(self, block):
''' '''
remove a block from this node remove a block from this node (does not automatically blacklist)
**You may want blacklist.addToDB(blockHash)
''' '''
if self._utils.validateHash(block): if self._utils.validateHash(block):
conn = sqlite3.connect(self.blockDB) conn = sqlite3.connect(self.blockDB)
@ -174,97 +187,36 @@ class Core:
c.execute('Delete from hashes where hash=?;', t) c.execute('Delete from hashes where hash=?;', t)
conn.commit() conn.commit()
conn.close() conn.close()
blockFile = 'data/blocks/' + block + '.dat'
dataSize = 0
try: try:
os.remove('data/blocks/' + block + '.dat') ''' Get size of data when loaded as an object/var, rather than on disk,
to avoid conflict with getsizeof when saving blocks
'''
with open(blockFile, 'r') as data:
dataSize = sys.getsizeof(data.read())
self._utils.storageCounter.removeBytes(dataSize)
os.remove(blockFile)
except FileNotFoundError: except FileNotFoundError:
pass pass
def createAddressDB(self): def createAddressDB(self):
''' '''
Generate the address database Generate the address database
types:
1: I2P b32 address
2: Tor v2 (like facebookcorewwwi.onion)
3: Tor v3
''' '''
conn = sqlite3.connect(self.addressDB) self.dbCreate.createAddressDB()
c = conn.cursor()
c.execute('''CREATE TABLE adders(
address text,
type int,
knownPeer text,
speed int,
success int,
DBHash text,
powValue text,
failure int,
lastConnect int,
lastConnectAttempt int,
trust int
);
''')
conn.commit()
conn.close()
def createPeerDB(self): def createPeerDB(self):
''' '''
Generate the peer sqlite3 database and populate it with the peers table. Generate the peer sqlite3 database and populate it with the peers table.
''' '''
# generate the peer database self.dbCreate.createPeerDB()
conn = sqlite3.connect(self.peerDB)
c = conn.cursor()
c.execute('''CREATE TABLE peers(
ID text not null,
name text,
adders text,
blockDBHash text,
forwardKey text,
dateSeen not null,
bytesStored int,
trust int,
pubkeyExchanged int,
hashID text,
pow text not null);
''')
conn.commit()
conn.close()
return
def createBlockDB(self): def createBlockDB(self):
''' '''
Create a database for blocks Create a database for blocks
hash - the hash of a block
dateReceived - the date the block was recieved, not necessarily when it was created
decrypted - if we can successfully decrypt the block (does not describe its current state)
dataType - data type of the block
dataFound - if the data has been found for the block
dataSaved - if the data has been saved for the block
sig - optional signature by the author (not optional if author is specified)
author - multi-round partial sha3-256 hash of authors public key
dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is
''' '''
if os.path.exists(self.blockDB): self.dbCreate.createBlockDB()
raise Exception("Block database already exists")
conn = sqlite3.connect(self.blockDB)
c = conn.cursor()
c.execute('''CREATE TABLE hashes(
hash text not null,
dateReceived int,
decrypted int,
dataType text,
dataFound int,
dataSaved int,
sig text,
author text,
dateClaimed int
);
''')
conn.commit()
conn.close()
return
def addToBlockDB(self, newHash, selfInsert=False, dataSaved=False): def addToBlockDB(self, newHash, selfInsert=False, dataSaved=False):
''' '''
@ -304,16 +256,26 @@ class Core:
return data return data
def setData(self, data): def _getSha3Hash(self, data):
'''
Set the data assciated with a hash
'''
data = data
hasher = hashlib.sha3_256() hasher = hashlib.sha3_256()
if not type(data) is bytes: if not type(data) is bytes:
data = data.encode() data = data.encode()
hasher.update(data) hasher.update(data)
dataHash = hasher.hexdigest() dataHash = hasher.hexdigest()
return dataHash
def setData(self, data):
'''
Set the data assciated with a hash
'''
data = data
dataSize = sys.getsizeof(data)
if not type(data) is bytes:
data = data.encode()
dataHash = self._getSha3Hash(data)
if type(dataHash) is bytes: if type(dataHash) is bytes:
dataHash = dataHash.decode() dataHash = dataHash.decode()
blockFileName = self.blockDataLocation + dataHash + '.dat' blockFileName = self.blockDataLocation + dataHash + '.dat'
@ -321,15 +283,19 @@ class Core:
pass # TODO: properly check if block is already saved elsewhere pass # TODO: properly check if block is already saved elsewhere
#raise Exception("Data is already set for " + dataHash) #raise Exception("Data is already set for " + dataHash)
else: else:
blockFile = open(blockFileName, 'wb') if self._utils.storageCounter.addBytes(dataSize) != False:
blockFile.write(data) blockFile = open(blockFileName, 'wb')
blockFile.close() blockFile.write(data)
blockFile.close()
conn = sqlite3.connect(self.blockDB) conn = sqlite3.connect(self.blockDB)
c = conn.cursor() c = conn.cursor()
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';") c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';")
conn.commit() conn.commit()
conn.close() conn.close()
with open(self.dataNonceFile, 'a') as nonceFile:
nonceFile.write(dataHash + '\n')
else:
raise onionrexceptions.DiskAllocationReached
return dataHash return dataHash
@ -411,18 +377,22 @@ class Core:
''' '''
Add a command to the daemon queue, used by the communication daemon (communicator.py) Add a command to the daemon queue, used by the communication daemon (communicator.py)
''' '''
retData = True
# Intended to be used by the web server # Intended to be used by the web server
date = self._utils.getEpoch() date = self._utils.getEpoch()
conn = sqlite3.connect(self.queueDB) conn = sqlite3.connect(self.queueDB)
c = conn.cursor() c = conn.cursor()
t = (command, data, date) t = (command, data, date)
c.execute('INSERT INTO commands (command, data, date) VALUES(?, ?, ?)', t) try:
conn.commit() c.execute('INSERT INTO commands (command, data, date) VALUES(?, ?, ?)', t)
conn.close() conn.commit()
conn.close()
except sqlite3.OperationalError:
retData = False
self.daemonQueue()
events.event('queue_push', data = {'command': command, 'data': data}, onionr = None) events.event('queue_push', data = {'command': command, 'data': data}, onionr = None)
return return retData
def clearDaemonQueue(self): def clearDaemonQueue(self):
''' '''
@ -456,19 +426,23 @@ class Core:
conn.close() conn.close()
return addressList return addressList
def listPeers(self, randomOrder=True, getPow=False): def listPeers(self, randomOrder=True, getPow=False, trust=0):
''' '''
Return a list of public keys (misleading function name) Return a list of public keys (misleading function name)
randomOrder determines if the list should be in a random order randomOrder determines if the list should be in a random order
trust sets the minimum trust to list
''' '''
conn = sqlite3.connect(self.peerDB) conn = sqlite3.connect(self.peerDB)
c = conn.cursor() c = conn.cursor()
payload = "" payload = ""
if trust not in (0, 1, 2):
logger.error('Tried to select invalid trust.')
return
if randomOrder: if randomOrder:
payload = 'SELECT * FROM peers ORDER BY RANDOM();' payload = 'SELECT * FROM peers where trust >= %s ORDER BY RANDOM();' % (trust,)
else: else:
payload = 'SELECT * FROM peers;' payload = 'SELECT * FROM peers where trust >= %s;' % (trust,)
peerList = [] peerList = []
for i in c.execute(payload): for i in c.execute(payload):
try: try:
@ -592,7 +566,7 @@ class Core:
if unsaved: if unsaved:
execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();' execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();'
else: else:
execute = 'SELECT hash FROM hashes ORDER BY dateReceived DESC;' execute = 'SELECT hash FROM hashes ORDER BY dateReceived ASC;'
rows = list() rows = list()
for row in c.execute(execute): for row in c.execute(execute):
for i in row: for i in row:
@ -677,6 +651,18 @@ class Core:
''' '''
retData = False retData = False
# check nonce
dataNonce = self._utils.bytesToStr(self._crypto.sha3Hash(data))
try:
with open(self.dataNonceFile, 'r') as nonces:
if dataNonce in nonces:
return retData
except FileNotFoundError:
pass
# record nonce
with open(self.dataNonceFile, 'a') as nonceFile:
nonceFile.write(dataNonce + '\n')
if meta is None: if meta is None:
meta = dict() meta = dict()
@ -688,6 +674,7 @@ class Core:
signature = '' signature = ''
signer = '' signer = ''
metadata = {} metadata = {}
# metadata is full block metadata, meta is internal, user specified metadata
# only use header if not set in provided meta # only use header if not set in provided meta
if not header is None: if not header is None:

108
onionr/dbcreator.py Normal file
View File

@ -0,0 +1,108 @@
'''
Onionr - P2P Anonymous Data Storage & Sharing
DBCreator, creates sqlite3 databases used by Onionr
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import sqlite3, os
class DBCreator:
def __init__(self, coreInst):
self.core = coreInst
def createAddressDB(self):
'''
Generate the address database
types:
1: I2P b32 address
2: Tor v2 (like facebookcorewwwi.onion)
3: Tor v3
'''
conn = sqlite3.connect(self.core.addressDB)
c = conn.cursor()
c.execute('''CREATE TABLE adders(
address text,
type int,
knownPeer text,
speed int,
success int,
DBHash text,
powValue text,
failure int,
lastConnect int,
lastConnectAttempt int,
trust int
);
''')
conn.commit()
conn.close()
def createPeerDB(self):
'''
Generate the peer sqlite3 database and populate it with the peers table.
'''
# generate the peer database
conn = sqlite3.connect(self.core.peerDB)
c = conn.cursor()
c.execute('''CREATE TABLE peers(
ID text not null,
name text,
adders text,
forwardKey text,
dateSeen not null,
bytesStored int,
trust int,
pubkeyExchanged int,
hashID text,
pow text not null);
''')
conn.commit()
conn.close()
return
def createBlockDB(self):
'''
Create a database for blocks
hash - the hash of a block
dateReceived - the date the block was recieved, not necessarily when it was created
decrypted - if we can successfully decrypt the block (does not describe its current state)
dataType - data type of the block
dataFound - if the data has been found for the block
dataSaved - if the data has been saved for the block
sig - optional signature by the author (not optional if author is specified)
author - multi-round partial sha3-256 hash of authors public key
dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is
'''
if os.path.exists(self.core.blockDB):
raise Exception("Block database already exists")
conn = sqlite3.connect(self.core.blockDB)
c = conn.cursor()
c.execute('''CREATE TABLE hashes(
hash text not null,
dateReceived int,
decrypted int,
dataType text,
dataFound int,
dataSaved int,
sig text,
author text,
dateClaimed int
);
''')
conn.commit()
conn.close()
return

View File

@ -18,7 +18,7 @@
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
import subprocess, os, random, sys, logger, time, signal import subprocess, os, random, sys, logger, time, signal, config
from onionrblockapi import Block from onionrblockapi import Block
class NetController: class NetController:
@ -33,6 +33,7 @@ class NetController:
self.hsPort = hsPort self.hsPort = hsPort
self._torInstnace = '' self._torInstnace = ''
self.myID = '' self.myID = ''
config.reload()
''' '''
if os.path.exists(self.torConfigLocation): if os.path.exists(self.torConfigLocation):
torrc = open(self.torConfigLocation, 'r') torrc = open(self.torConfigLocation, 'r')
@ -47,11 +48,15 @@ class NetController:
''' '''
Generate a torrc file for our tor instance Generate a torrc file for our tor instance
''' '''
hsVer = '# v2 onions'
if config.get('tor.v3onions'):
hsVer = 'HiddenServiceVersion 3'
logger.info('Using v3 onions :)')
if os.path.exists(self.torConfigLocation): if os.path.exists(self.torConfigLocation):
os.remove(self.torConfigLocation) os.remove(self.torConfigLocation)
torrcData = '''SocksPort ''' + str(self.socksPort) + ''' torrcData = '''SocksPort ''' + str(self.socksPort) + '''
HiddenServiceDir data/hs/ HiddenServiceDir data/hs/
\n''' + hsVer + '''\n
HiddenServicePort 80 127.0.0.1:''' + str(self.hsPort) + ''' HiddenServicePort 80 127.0.0.1:''' + str(self.hsPort) + '''
DataDirectory data/tordata/ DataDirectory data/tordata/
''' '''
@ -97,7 +102,7 @@ DataDirectory data/tordata/
elif 'Opening Socks listener' in line.decode(): elif 'Opening Socks listener' in line.decode():
logger.debug(line.decode().replace('\n', '')) logger.debug(line.decode().replace('\n', ''))
else: else:
logger.fatal('Failed to start Tor. Try killing any other Tor processes owned by this user.') logger.fatal('Failed to start Tor. Maybe a stray instance of Tor used by Onionr is still running?')
return False return False
except KeyboardInterrupt: except KeyboardInterrupt:
logger.fatal("Got keyboard interrupt.") logger.fatal("Got keyboard interrupt.")

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
''' '''
Onionr - P2P Microblogging Platform & Social network. Onionr - P2P Anonymous Storage Network
Onionr is the name for both the protocol and the original/reference software. Onionr is the name for both the protocol and the original/reference software.
@ -32,7 +32,7 @@ import onionrutils
from onionrutils import OnionrUtils from onionrutils import OnionrUtils
from netcontroller import NetController from netcontroller import NetController
from onionrblockapi import Block from onionrblockapi import Block
import onionrproofs import onionrproofs, onionrexceptions, onionrusers
try: try:
from urllib3.contrib.socks import SOCKSProxyManager from urllib3.contrib.socks import SOCKSProxyManager
@ -40,7 +40,7 @@ except ImportError:
raise Exception("You need the PySocks module (for use with socks5 proxy to use Tor)") raise Exception("You need the PySocks module (for use with socks5 proxy to use Tor)")
ONIONR_TAGLINE = 'Anonymous P2P Platform - GPLv3 - https://Onionr.VoidNet.Tech' ONIONR_TAGLINE = 'Anonymous P2P Platform - GPLv3 - https://Onionr.VoidNet.Tech'
ONIONR_VERSION = '0.1.1' # for debugging and stuff ONIONR_VERSION = '0.2.0' # for debugging and stuff
ONIONR_VERSION_TUPLE = tuple(ONIONR_VERSION.split('.')) # (MAJOR, MINOR, VERSION) ONIONR_VERSION_TUPLE = tuple(ONIONR_VERSION.split('.')) # (MAJOR, MINOR, VERSION)
API_VERSION = '4' # increments of 1; only change when something fundemental about how the API works changes. This way other nodes know how to communicate without learning too much information about you. API_VERSION = '4' # increments of 1; only change when something fundemental about how the API works changes. This way other nodes know how to communicate without learning too much information about you.
@ -135,18 +135,18 @@ class Onionr:
self.onionrCore.createAddressDB() self.onionrCore.createAddressDB()
# Get configuration # Get configuration
if type(config.get('client.hmac')) is type(None):
config.set('client.hmac', base64.b16encode(os.urandom(32)).decode('utf-8'), savefile=True)
if type(config.get('client.port')) is type(None):
randomPort = 0
while randomPort < 1024:
randomPort = self.onionrCore._crypto.secrets.randbelow(65535)
config.set('client.port', randomPort, savefile=True)
if type(config.get('client.participate')) is type(None):
config.set('client.participate', True, savefile=True)
if type(config.get('client.api_version')) is type(None):
config.set('client.api_version', API_VERSION, savefile=True)
if not data_exists:
# Generate default config
# Hostname should only be set if different from 127.x.x.x. Important for DNS rebinding attack prevention.
if self.debug:
randomPort = 8080
else:
while True:
randomPort = random.randint(1024, 65535)
if self.onionrUtils.checkPort(randomPort):
break
config.set('client', {'participate': True, 'hmac': base64.b16encode(os.urandom(32)).decode('utf-8'), 'port': randomPort, 'api_version': API_VERSION}, True)
self.cmds = { self.cmds = {
'': self.showHelpSuggestion, '': self.showHelpSuggestion,
@ -186,6 +186,8 @@ class Onionr:
'addaddress': self.addAddress, 'addaddress': self.addAddress,
'list-peers': self.listPeers, 'list-peers': self.listPeers,
'blacklist-block': self.banBlock,
'add-file': self.addFile, 'add-file': self.addFile,
'addfile': self.addFile, 'addfile': self.addFile,
'listconn': self.listConn, 'listconn': self.listConn,
@ -208,7 +210,9 @@ class Onionr:
'getpass': self.printWebPassword, 'getpass': self.printWebPassword,
'get-pass': self.printWebPassword, 'get-pass': self.printWebPassword,
'getpasswd': self.printWebPassword, 'getpasswd': self.printWebPassword,
'get-passwd': self.printWebPassword 'get-passwd': self.printWebPassword,
'friend': self.friendCmd
} }
self.cmdhelp = { self.cmdhelp = {
@ -230,7 +234,9 @@ class Onionr:
'listconn': 'list connected peers', 'listconn': 'list connected peers',
'kex': 'exchange keys with peers (done automatically)', 'kex': 'exchange keys with peers (done automatically)',
'pex': 'exchange addresses with peers (done automatically)', 'pex': 'exchange addresses with peers (done automatically)',
'blacklist-block': 'deletes a block by hash and permanently removes it from your node',
'introduce': 'Introduce your node to the public Onionr network', 'introduce': 'Introduce your node to the public Onionr network',
'friend': '[add|remove] [public key/id]'
} }
# initialize plugins # initialize plugins
@ -258,6 +264,68 @@ class Onionr:
def getCommands(self): def getCommands(self):
return self.cmds return self.cmds
def friendCmd(self):
'''List, add, or remove friend(s)
Changes their peer DB entry.
'''
friend = ''
try:
# Get the friend command
action = sys.argv[2]
except IndexError:
logger.info('Syntax: friend add/remove/list [address]')
else:
action = action.lower()
if action == 'list':
# List out peers marked as our friend
for friend in self.onionrCore.listPeers(randomOrder=False, trust=1):
if friend == self.onionrCore._crypto.pubKey: # do not list our key
continue
friendProfile = onionrusers.OnionrUser(self.onionrCore, friend)
logger.info(friend + ' - ' + friendProfile.getName())
elif action in ('add', 'remove'):
try:
friend = sys.argv[3]
if not self.onionrUtils.validatePubKey(friend):
raise onionrexceptions.InvalidPubkey('Public key is invalid')
if friend not in self.onionrCore.listPeers():
raise onionrexceptions.KeyNotKnown
friend = onionrusers.OnionrUser(self.onionrCore, friend)
except IndexError:
logger.error('Friend ID is required.')
except onionrexceptions.KeyNotKnown:
logger.error('That peer is not in our database')
else:
if action == 'add':
friend.setTrust(1)
logger.info('Added %s as friend.' % (friend.publicKey,))
else:
friend.setTrust(0)
logger.info('Removed %s as friend.' % (friend.publicKey,))
else:
logger.info('Syntax: friend add/remove/list [address]')
def banBlock(self):
try:
ban = sys.argv[2]
except IndexError:
ban = logger.readline('Enter a block hash:')
if self.onionrUtils.validateHash(ban):
if not self.onionrCore._blacklist.inBlacklist(ban):
try:
self.onionrCore._blacklist.addToDB(ban)
self.onionrCore.removeBlock(ban)
except Exception as error:
logger.error('Could not blacklist block', error=error)
else:
logger.info('Block blacklisted')
else:
logger.warn('That block is already blacklisted')
else:
logger.error('Invalid block hash')
return
def listConn(self): def listConn(self):
self.onionrCore.daemonQueueAdd('connectedPeers') self.onionrCore.daemonQueueAdd('connectedPeers')
@ -543,22 +611,39 @@ class Onionr:
Starts the Onionr communication daemon Starts the Onionr communication daemon
''' '''
communicatorDaemon = './communicator2.py' communicatorDaemon = './communicator2.py'
if not os.environ.get("WERKZEUG_RUN_MAIN") == "true":
if self._developmentMode:
logger.warn('DEVELOPMENT MODE ENABLED (THIS IS LESS SECURE!)', timestamp = False)
net = NetController(config.get('client.port', 59496))
logger.info('Tor is starting...')
if not net.startTor():
sys.exit(1)
logger.info('Started .onion service: ' + logger.colors.underline + net.myID)
logger.info('Our Public key: ' + self.onionrCore._crypto.pubKey)
time.sleep(1)
#TODO make runable on windows
subprocess.Popen([communicatorDaemon, "run", str(net.socksPort)])
logger.debug('Started communicator')
events.event('daemon_start', onionr = self)
self.api = api.API(self.debug)
apiThread = Thread(target=api.API, args=(self.debug,))
apiThread.start()
try:
time.sleep(3)
except KeyboardInterrupt:
logger.info('Got keyboard interrupt')
time.sleep(1)
self.onionrUtils.localCommand('shutdown')
else:
if apiThread.isAlive():
if self._developmentMode:
logger.warn('DEVELOPMENT MODE ENABLED (THIS IS LESS SECURE!)', timestamp = False)
net = NetController(config.get('client.port', 59496))
logger.info('Tor is starting...')
if not net.startTor():
sys.exit(1)
logger.info('Started .onion service: ' + logger.colors.underline + net.myID)
logger.info('Our Public key: ' + self.onionrCore._crypto.pubKey)
time.sleep(1)
#TODO make runable on windows
subprocess.Popen([communicatorDaemon, "run", str(net.socksPort)])
# Print nice header thing :)
if config.get('general.display_header', True):
self.header()
logger.debug('Started communicator')
events.event('daemon_start', onionr = self)
try:
while True:
time.sleep(5)
except KeyboardInterrupt:
self.onionrCore.daemonQueueAdd('shutdown')
self.onionrUtils.localCommand('shutdown')
return return
def killDaemon(self): def killDaemon(self):
@ -722,5 +807,12 @@ class Onionr:
print('Opening %s ...' % url) print('Opening %s ...' % url)
webbrowser.open(url, new = 1, autoraise = True) webbrowser.open(url, new = 1, autoraise = True)
def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'):
if os.path.exists('static-data/header.txt'):
with open('static-data/header.txt', 'rb') as file:
# only to stdout, not file or log or anything
sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', ONIONR_VERSION))
logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n')
if __name__ == "__main__": if __name__ == "__main__":
Onionr() Onionr()

115
onionr/onionrblacklist.py Normal file
View File

@ -0,0 +1,115 @@
'''
Onionr - P2P Anonymous Storage Network
This file handles maintenence of a blacklist database, for blocks and peers
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import sqlite3, os, logger
class OnionrBlackList:
def __init__(self, coreInst):
self.blacklistDB = 'data/blacklist.db'
self._core = coreInst
if not os.path.exists(self.blacklistDB):
self.generateDB()
return
def inBlacklist(self, data):
hashed = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(data))
retData = False
if not hashed.isalnum():
raise Exception("Hashed data is not alpha numeric")
for i in self._dbExecute("select * from blacklist where hash='%s'" % (hashed,)):
retData = True # this only executes if an entry is present by that hash
break
return retData
def _dbExecute(self, toExec):
conn = sqlite3.connect(self.blacklistDB)
c = conn.cursor()
retData = c.execute(toExec)
conn.commit()
return retData
def deleteBeforeDate(self, date):
# TODO, delete blacklist entries before date
return
def deleteExpired(self, dataType=0):
'''Delete expired entries'''
deleteList = []
curTime = self._core._utils.getEpoch()
try:
int(dataType)
except AttributeError:
raise TypeError("dataType must be int")
for i in self._dbExecute('select * from blacklist where dataType=%s' % (dataType,)):
if i[1] == dataType:
if (curTime - i[2]) >= i[3]:
deleteList.append(i[0])
for thing in deleteList:
self._dbExecute("delete from blacklist where hash='%s'" % (thing,))
def generateDB(self):
self._dbExecute('''CREATE TABLE blacklist(
hash text primary key not null,
dataType int,
blacklistDate int,
expire int
);
''')
return
def clearDB(self):
self._dbExecute('''delete from blacklist;);''')
def getList(self):
data = self._dbExecute('select * from blacklist')
myList = []
for i in data:
myList.append(i[0])
return myList
def addToDB(self, data, dataType=0, expire=0):
'''Add to the blacklist. Intended to be block hash, block data, peers, or transport addresses
0=block
1=peer
2=pubkey
'''
# we hash the data so we can remove data entirely from our node's disk
hashed = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(data))
if self.inBlacklist(hashed):
return
if not hashed.isalnum():
raise Exception("Hashed data is not alpha numeric")
try:
int(dataType)
except ValueError:
raise Exception("dataType is not int")
try:
int(expire)
except ValueError:
raise Exception("expire is not int")
#TODO check for length sanity
insert = (hashed,)
blacklistDate = self._core._utils.getEpoch()
self._dbExecute("insert into blacklist (hash, dataType, blacklistDate, expire) VALUES('%s', %s, %s, %s);" % (hashed, dataType, blacklistDate, expire))

View File

@ -1,5 +1,5 @@
''' '''
Onionr - P2P Microblogging Platform & Social network. Onionr - P2P Anonymous Storage Network
This class contains the OnionrBlocks class which is a class for working with Onionr blocks This class contains the OnionrBlocks class which is a class for working with Onionr blocks
''' '''

View File

@ -155,26 +155,6 @@ class OnionrCrypto:
decrypted = anonBox.decrypt(data, encoder=encoding) decrypted = anonBox.decrypt(data, encoder=encoding)
return decrypted return decrypted
def symmetricPeerEncrypt(self, data, peer):
'''Salsa20 encrypt data to peer (with mac)
this function does not accept a key, it is a wrapper for encryption with a peer
'''
key = self._core.getPeerInfo(4)
if type(key) != bytes:
key = self._core.getPeerInfo(2)
encrypted = self.symmetricEncrypt(data, key, encodedKey=True)
return encrypted
def symmetricPeerDecrypt(self, data, peer):
'''Salsa20 decrypt data from peer (with mac)
this function does not accept a key, it is a wrapper for encryption with a peer
'''
key = self._core.getPeerInfo(4)
if type(key) != bytes:
key = self._core.getPeerInfo(2)
decrypted = self.symmetricDecrypt(data, key, encodedKey=True)
return decrypted
def symmetricEncrypt(self, data, key, encodedKey=False, returnEncoded=True): def symmetricEncrypt(self, data, key, encodedKey=False, returnEncoded=True):
'''Encrypt data to a 32-byte key (Salsa20-Poly1305 MAC)''' '''Encrypt data to a 32-byte key (Salsa20-Poly1305 MAC)'''
if encodedKey: if encodedKey:
@ -247,6 +227,10 @@ class OnionrCrypto:
return result return result
def sha3Hash(self, data): def sha3Hash(self, data):
try:
data = data.encode()
except AttributeError:
pass
hasher = hashlib.sha3_256() hasher = hashlib.sha3_256()
hasher.update(data) hasher.update(data)
return hasher.hexdigest() return hasher.hexdigest()

105
onionr/onionrdaemontools.py Normal file
View File

@ -0,0 +1,105 @@
'''
Onionr - P2P Anonymous Storage Network
Contains the CommunicatorUtils class which contains useful functions for the communicator daemon
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import onionrexceptions, onionrpeers, onionrproofs, base64, logger
from dependencies import secrets
class DaemonTools:
def __init__(self, daemon):
self.daemon = daemon
self.announceCache = {}
def announceNode(self):
'''Announce our node to our peers'''
retData = False
# Announce to random online peers
for i in self.daemon.onlinePeers:
if not i in self.announceCache:
peer = i
break
else:
peer = self.daemon.pickOnlinePeer()
ourID = self.daemon._core.hsAddress.strip()
url = 'http://' + peer + '/public/announce/'
data = {'node': ourID}
combinedNodes = ourID + peer
if peer in self.announceCache:
data['random'] = self.announceCache[peer]
else:
proof = onionrproofs.DataPOW(combinedNodes, forceDifficulty=4)
data['random'] = base64.b64encode(proof.waitForResult()[1])
self.announceCache[peer] = data['random']
logger.info('Announcing node to ' + url)
if self.daemon._core._utils.doPostRequest(url, data) == 'Success':
retData = True
self.daemon.decrementThreadCount('announceNode')
return retData
def netCheck(self):
'''Check if we are connected to the internet or not when we can't connect to any peers'''
if len(self.daemon.onlinePeers) != 0:
if not self.daemon._core._utils.checkNetwork(torPort=self.daemon.proxyPort):
logger.warn('Network check failed, are you connected to the internet?')
self.daemon.isOnline = False
self.daemon.decrementThreadCount('netCheck')
def cleanOldBlocks(self):
'''Delete old blocks if our disk allocation is full/near full'''
while self.daemon._core._utils.storageCounter.isFull():
oldest = self.daemon._core.getBlockList()[0]
self.daemon._core._blacklist.addToDB(oldest)
self.daemon._core.removeBlock(oldest)
logger.info('Deleted block: %s' % (oldest,))
self.daemon.decrementThreadCount('cleanOldBlocks')
def cooldownPeer(self):
'''Randomly add an online peer to cooldown, so we can connect a new one'''
onlinePeerAmount = len(self.daemon.onlinePeers)
minTime = 300
cooldownTime = 600
toCool = ''
tempConnectTimes = dict(self.daemon.connectTimes)
# Remove peers from cooldown that have been there long enough
tempCooldown = dict(self.daemon.cooldownPeer)
for peer in tempCooldown:
if (self.daemon._core._utils.getEpoch() - tempCooldown[peer]) >= cooldownTime:
del self.daemon.cooldownPeer[peer]
# Cool down a peer, if we have max connections alive for long enough
if onlinePeerAmount >= self.daemon._core.config.get('peers.maxConnect'):
finding = True
while finding:
try:
toCool = min(tempConnectTimes, key=tempConnectTimes.get)
if (self.daemon._core._utils.getEpoch() - tempConnectTimes[toCool]) < minTime:
del tempConnectTimes[toCool]
else:
finding = False
except ValueError:
break
else:
self.daemon.removeOnlinePeer(toCool)
self.daemon.cooldownPeer[toCool] = self.daemon._core._utils.getEpoch()
self.daemon.decrementThreadCount('cooldownPeer')

View File

@ -1,5 +1,5 @@
''' '''
Onionr - P2P Microblogging Platform & Social network. Onionr - P2P Anonymous Storage Network
This file contains exceptions for onionr This file contains exceptions for onionr
''' '''
@ -34,10 +34,19 @@ class OnlinePeerNeeded(Exception):
class InvalidPubkey(Exception): class InvalidPubkey(Exception):
pass pass
class KeyNotKnown(Exception):
pass
# block exceptions # block exceptions
class InvalidMetadata(Exception): class InvalidMetadata(Exception):
pass pass
class BlacklistedBlock(Exception):
pass
class DataExists(Exception):
pass
class InvalidHexHash(Exception): class InvalidHexHash(Exception):
'''When a string is not a valid hex string of appropriate length for a hash value''' '''When a string is not a valid hex string of appropriate length for a hash value'''
pass pass
@ -52,3 +61,8 @@ class MissingPort(Exception):
class InvalidAddress(Exception): class InvalidAddress(Exception):
pass pass
# file exceptions
class DiskAllocationReached(Exception):
pass

View File

@ -1,5 +1,5 @@
''' '''
Onionr - P2P Microblogging Platform & Social network. Onionr - P2P Anonymous Storage Network
This file contains both the PeerProfiles class for network profiling of Onionr nodes This file contains both the PeerProfiles class for network profiling of Onionr nodes
''' '''
@ -17,7 +17,7 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
import core, config, logger import core, config, logger, sqlite3
class PeerProfiles: class PeerProfiles:
''' '''
PeerProfiles PeerProfiles
@ -72,7 +72,7 @@ def getScoreSortedPeerList(coreInst):
return peerList return peerList
def peerCleanup(coreInst): def peerCleanup(coreInst):
'''Removes peers who have been offline too long''' '''Removes peers who have been offline too long or score too low'''
if not type(coreInst is core.Core): if not type(coreInst is core.Core):
raise TypeError('coreInst must be instance of core.Core') raise TypeError('coreInst must be instance of core.Core')
@ -89,4 +89,17 @@ def peerCleanup(coreInst):
# Remove peers that go below the negative score # Remove peers that go below the negative score
if PeerProfiles(address, coreInst).score < minScore: if PeerProfiles(address, coreInst).score < minScore:
coreInst.removeAddress(address) coreInst.removeAddress(address)
try:
if (int(coreInst._utils.getEpoch()) - int(coreInst.getPeerInfo(address, 'dateSeen'))) >= 600:
expireTime = 600
else:
expireTime = 86400
coreInst._blacklist.addToDB(address, dataType=1, expire=expireTime)
except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue
pass
except ValueError:
pass
logger.warn('Removed address ' + address + '.') logger.warn('Removed address ' + address + '.')
# Unban probably not malicious peers TODO improve
coreInst._blacklist.deleteExpired(dataType=1)

75
onionr/onionrusers.py Normal file
View File

@ -0,0 +1,75 @@
'''
Onionr - P2P Anonymous Storage Network
Contains abstractions for interacting with users of Onionr
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import onionrblockapi, logger, onionrexceptions
class OnionrUser:
def __init__(self, coreInst, publicKey):
self.trust = 0
self._core = coreInst
self.publicKey = publicKey
self.trust = self._core.getPeerInfo(self.publicKey, 'trust')
return
def setTrust(self, newTrust):
'''Set the peers trust. 0 = not trusted, 1 = friend, 2 = ultimate'''
self._core.setPeerInfo(self.publicKey, 'trust', newTrust)
def isFriend(self):
if self._core.getPeerInfo(self.publicKey, 'trust') == 1:
return True
return False
def getName(self):
retData = 'anonymous'
name = self._core.getPeerInfo(self.publicKey, 'name')
try:
if len(name) > 0:
retData = name
except ValueError:
pass
return retData
def encrypt(self, data):
encrypted = coreInst._crypto.pubKeyEncrypt(data, self.publicKey, encodedData=True)
return encrypted
def decrypt(self, data):
decrypted = coreInst._crypto.pubKeyDecrypt(data, self.publicKey, encodedData=True)
return decrypted
def forwardEncrypt(self, data):
return
def forwardDecrypt(self, encrypted):
return
def findAndSetID(self):
'''Find any info about the user from existing blocks and cache it to their DB entry'''
infoBlocks = []
for bHash in self._core.getBlocksByType('userInfo'):
block = onionrblockapi.Block(bHash, core=self._core)
if block.signer == self.publicKey:
if block.verifySig():
newName = block.getMetadata('name')
if newName.isalnum():
logger.info('%s is now using the name %s.' % (self.publicKey, self._core._utils.escapeAnsi(newName)))
self._core.setPeerInfo(self.publicKey, 'name', newName)
else:
raise onionrexceptions.InvalidPubkey

View File

@ -23,7 +23,7 @@ import nacl.signing, nacl.encoding
from onionrblockapi import Block from onionrblockapi import Block
import onionrexceptions import onionrexceptions
from defusedxml import minidom from defusedxml import minidom
import pgpwords import pgpwords, onionrusers, storagecounter
if sys.version_info < (3, 6): if sys.version_info < (3, 6):
try: try:
import sha3 import sha3
@ -40,10 +40,10 @@ class OnionrUtils:
self._core = coreInstance self._core = coreInstance
self.timingToken = '' self.timingToken = ''
self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions
self.peerProcessing = {} # dict of current peer actions: peer, actionList self.peerProcessing = {} # dict of current peer actions: peer, actionList
self.storageCounter = storagecounter.StorageCounter(self._core)
config.reload()
return return
def getTimeBypassToken(self): def getTimeBypassToken(self):
@ -95,8 +95,10 @@ class OnionrUtils:
except IndexError: except IndexError:
logger.warn('No pow token') logger.warn('No pow token')
continue continue
#powHash = self._core._crypto.blake2bHash(base64.b64decode(key[1]) + self._core._crypto.blake2bHash(key[0].encode())) try:
value = base64.b64decode(key[1]) value = base64.b64decode(key[1])
except binascii.Error:
continue
hashedKey = self._core._crypto.blake2bHash(key[0]) hashedKey = self._core._crypto.blake2bHash(key[0])
powHash = self._core._crypto.blake2bHash(value + hashedKey) powHash = self._core._crypto.blake2bHash(value + hashedKey)
try: try:
@ -106,6 +108,7 @@ class OnionrUtils:
if powHash.startswith(b'0000'): if powHash.startswith(b'0000'):
if not key[0] in self._core.listPeers(randomOrder=False) and type(key) != None and key[0] != self._core._crypto.pubKey: if not key[0] in self._core.listPeers(randomOrder=False) and type(key) != None and key[0] != self._core._crypto.pubKey:
if self._core.addPeer(key[0], key[1]): if self._core.addPeer(key[0], key[1]):
onionrusers.OnionrUser(self._core, key[0]).findAndSetID()
retVal = True retVal = True
else: else:
logger.warn("Failed to add key") logger.warn("Failed to add key")
@ -126,10 +129,17 @@ class OnionrUtils:
retVal = False retVal = False
if newAdderList != False: if newAdderList != False:
for adder in newAdderList.split(','): for adder in newAdderList.split(','):
if not adder in self._core.listAdders(randomOrder = False) and adder.strip() != self.getMyAddress(): adder = adder.strip()
if not adder in self._core.listAdders(randomOrder = False) and adder != self.getMyAddress() and not self._core._blacklist.inBlacklist(adder):
if not config.get('tor.v3onions') and len(adder) == 62:
continue
if self._core.addAddress(adder): if self._core.addAddress(adder):
logger.info('Added %s to db.' % adder, timestamp = True) # Check if we have the maxmium amount of allowed stored peers
retVal = True if config.get('peers.maxStoredPeers') > len(self._core.listAdders()):
logger.info('Added %s to db.' % adder, timestamp = True)
retVal = True
else:
logger.warn('Reached the maximum amount of peers in the net database as allowed by your config.')
else: else:
pass pass
#logger.debug('%s is either our address or already in our DB' % adder) #logger.debug('%s is either our address or already in our DB' % adder)
@ -261,9 +271,24 @@ class OnionrUtils:
if myBlock.isEncrypted: if myBlock.isEncrypted:
myBlock.decrypt() myBlock.decrypt()
blockType = myBlock.getMetadata('type') # we would use myBlock.getType() here, but it is bugged with encrypted blocks blockType = myBlock.getMetadata('type') # we would use myBlock.getType() here, but it is bugged with encrypted blocks
signer = self.bytesToStr(myBlock.signer)
try: try:
if len(blockType) <= 10: if len(blockType) <= 10:
self._core.updateBlockInfo(blockHash, 'dataType', blockType) self._core.updateBlockInfo(blockHash, 'dataType', blockType)
if blockType == 'userInfo':
if myBlock.verifySig():
peerName = myBlock.getMetadata('name')
try:
if len(peerName) > 20:
raise onionrexceptions.InvalidMetdata('Peer name specified is too large')
except TypeError:
pass
except onionrexceptions.InvalidMetadata:
pass
else:
self._core.setPeerInfo(signer, 'name', peerName)
logger.info('%s is now using the name %s.' % (signer, self.escapeAnsi(peerName)))
except TypeError: except TypeError:
pass pass
@ -333,7 +358,7 @@ class OnionrUtils:
return retVal return retVal
def validateMetadata(self, metadata): def validateMetadata(self, metadata, blockData):
'''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string''' '''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string'''
# TODO, make this check sane sizes # TODO, make this check sane sizes
retData = False retData = False
@ -363,7 +388,20 @@ class OnionrUtils:
break break
else: else:
# if metadata loop gets no errors, it does not break, therefore metadata is valid # if metadata loop gets no errors, it does not break, therefore metadata is valid
retData = True # make sure we do not have another block with the same data content (prevent data duplication and replay attacks)
nonce = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(blockData))
try:
with open(self._core.dataNonceFile, 'r') as nonceFile:
if nonce in nonceFile.read():
retData = False # we've seen that nonce before, so we can't pass metadata
raise onionrexceptions.DataExists
except FileNotFoundError:
retData = True
except onionrexceptions.DataExists:
# do not set retData to True, because nonce has been seen before
pass
else:
retData = True
else: else:
logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object') logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object')
@ -553,6 +591,7 @@ class OnionrUtils:
''' '''
Do a get request through a local tor or i2p instance Do a get request through a local tor or i2p instance
''' '''
retData = False
if proxyType == 'tor': if proxyType == 'tor':
if port == 0: if port == 0:
raise onionrexceptions.MissingPort('Socks port required for Tor HTTP get request') raise onionrexceptions.MissingPort('Socks port required for Tor HTTP get request')
@ -597,6 +636,35 @@ class OnionrUtils:
self.powSalt = retData self.powSalt = retData
return retData return retData
def strToBytes(self, data):
try:
data = data.encode()
except AttributeError:
pass
return data
def bytesToStr(self, data):
try:
data = data.decode()
except AttributeError:
pass
return data
def checkNetwork(self, torPort=0):
'''Check if we are connected to the internet (through Tor)'''
retData = False
connectURLs = []
try:
with open('static-data/connect-check.txt', 'r') as connectTest:
connectURLs = connectTest.read().split(',')
for url in connectURLs:
if self.doGetRequest(url, port=torPort) != False:
retData = True
break
except FileNotFoundError:
pass
return retData
def size(path='.'): def size(path='.'):
''' '''
Returns the size of a folder's contents in bytes Returns the size of a folder's contents in bytes

View File

@ -0,0 +1,5 @@
{
"name" : "cliui",
"version" : "1.0",
"author" : "onionr"
}

View File

@ -0,0 +1,133 @@
'''
Onionr - P2P Anonymous Storage Network
This is an interactive menu-driven CLI interface for Onionr
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
# Imports some useful libraries
import logger, config, threading, time, uuid, subprocess
from onionrblockapi import Block
plugin_name = 'cliui'
PLUGIN_VERSION = '0.0.1'
class OnionrCLIUI:
def __init__(self, apiInst):
self.api = apiInst
self.myCore = apiInst.get_core()
return
def subCommand(self, command):
try:
subprocess.run(["./onionr.py", command])
except KeyboardInterrupt:
pass
def refresh(self):
for i in range(100):
print('')
def start(self):
'''Main CLI UI interface menu'''
showMenu = True
isOnline = "No"
firstRun = True
if self.myCore._utils.localCommand('ping') == 'pong':
firstRun = False
while showMenu:
if firstRun:
print("please wait while Onionr starts...")
daemon = subprocess.Popen(["./onionr.py", "start"], stdin=subprocess.PIPE, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
time.sleep(30)
firstRun = False
if self.myCore._utils.localCommand('ping') == 'pong':
isOnline = "Yes"
else:
isOnline = "No"
print('''
Daemon Running: ''' + isOnline + '''
1. Flow (Anonymous public chat, use at your own risk)
2. Mail (Secure email-like service)
3. File Sharing
4. User Settings
5. Start/Stop Daemon
6. Quit (Does not shutdown daemon)
''')
try:
choice = input(">").strip().lower()
except (KeyboardInterrupt, EOFError):
choice = "quit"
if choice in ("flow", "1"):
self.subCommand("flow")
elif choice in ("2", "mail"):
self.subCommand("mail")
elif choice in ("3", "file sharing", "file"):
print("Not supported yet")
elif choice in ("4", "user settings", "settings"):
try:
self.setName()
except (KeyboardInterrupt, EOFError) as e:
pass
elif choice in ("5", "daemon"):
if isOnline == "Yes":
print("Onionr daemon will shutdown...")
#self.myCore._utils.localCommand("shutdown")
self.myCore.daemonQueueAdd('shutdown')
try:
daemon.kill()
except UnboundLocalError:
pass
else:
print("Starting Daemon...")
daemon = subprocess.Popen(["./onionr.py", "start"], stdin=subprocess.PIPE, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
elif choice in ("6", "quit"):
showMenu = False
elif choice == "":
pass
else:
print("Invalid choice")
return
def setName(self):
try:
name = input("Enter your name: ")
if name != "":
self.myCore.insertBlock("userInfo-" + str(uuid.uuid1()), sign=True, header='userInfo', meta={'name': name})
except KeyboardInterrupt:
pass
return
def on_init(api, data = None):
'''
This event is called after Onionr is initialized, but before the command
inputted is executed. Could be called when daemon is starting or when
just the client is running.
'''
# Doing this makes it so that the other functions can access the api object
# by simply referencing the variable `pluginapi`.
pluginapi = api
ui = OnionrCLIUI(api)
api.commands.register('interactive', ui.start)
api.commands.register_help('interactive', 'Open the CLI interface')
return

View File

@ -21,7 +21,9 @@
# Imports some useful libraries # Imports some useful libraries
import logger, config, threading, time, readline, datetime import logger, config, threading, time, readline, datetime
from onionrblockapi import Block from onionrblockapi import Block
import onionrexceptions import onionrexceptions, onionrusers
import locale
locale.setlocale(locale.LC_ALL, '')
plugin_name = 'pms' plugin_name = 'pms'
PLUGIN_VERSION = '0.0.1' PLUGIN_VERSION = '0.0.1'
@ -79,8 +81,19 @@ class OnionrMail:
continue continue
blockCount += 1 blockCount += 1
pmBlockMap[blockCount] = blockHash pmBlockMap[blockCount] = blockHash
block = pmBlocks[blockHash]
senderKey = block.signer
try:
senderKey = senderKey.decode()
except AttributeError:
pass
senderDisplay = onionrusers.OnionrUser(self.myCore, senderKey).getName()
if senderDisplay == 'anonymous':
senderDisplay = senderKey
blockDate = pmBlocks[blockHash].getDate().strftime("%m/%d %H:%M") blockDate = pmBlocks[blockHash].getDate().strftime("%m/%d %H:%M")
print('%s. %s: %s' % (blockCount, blockDate, blockHash)) print('%s. %s - %s: %s' % (blockCount, blockDate, senderDisplay[:12], blockHash))
try: try:
choice = logger.readline('Enter a block number, -r to refresh, or -q to stop: ').strip().lower() choice = logger.readline('Enter a block number, -r to refresh, or -q to stop: ').strip().lower()
@ -106,15 +119,15 @@ class OnionrMail:
except KeyError: except KeyError:
pass pass
else: else:
cancel = ''
readBlock.verifySig() readBlock.verifySig()
print('Message recieved from', readBlock.signer) print('Message recieved from %s' % (readBlock.signer,))
print('Valid signature:', readBlock.validSig) print('Valid signature:', readBlock.validSig)
if not readBlock.validSig: if not readBlock.validSig:
logger.warn('This message has an INVALID signature. Anyone could have sent this message.') logger.warn('This message has an INVALID signature. ANYONE could have sent this message.')
logger.readline('Press enter to continue to message.') cancel = logger.readline('Press enter to continue to message, or -q to not open the message (recommended).')
if cancel != '-q':
print(draw_border(self.myCore._utils.escapeAnsi(readBlock.bcontent.decode().strip()))) print(draw_border(self.myCore._utils.escapeAnsi(readBlock.bcontent.decode().strip())))
return return
def draftMessage(self): def draftMessage(self):

View File

@ -41,7 +41,7 @@
}, },
"tor" : { "tor" : {
"v3onions": false
}, },
"i2p":{ "i2p":{
@ -51,14 +51,18 @@
}, },
"allocations":{ "allocations":{
"disk": 9000000000, "disk": 10000000000,
"netTotal": 1000000000, "netTotal": 1000000000,
"blockCache" : 5000000, "blockCache": 5000000,
"blockCacheTotal" : 50000000 "blockCacheTotal": 50000000
}, },
"peers":{ "peers":{
"minimumScore": -4000, "minimumScore": -100,
"maxStoredPeers": 100, "maxStoredPeers": 5000,
"maxConnect": 3 "maxConnect": 10
},
"timers":{
"lookupBlocks": 25,
"getBlocks": 30
} }
} }

View File

@ -1,6 +1,6 @@
<h1>This is an Onionr Node</h1> <h1>This is an Onionr Node</h1>
<p>The content on this server is not necessarily created by the server owner, and was not necessarily stored with the owner's knowledge.</p> <p>The content on this server is not necessarily created by the server owner, and was not necessarily stored specifically with the owner's knowledge of its contents.</p>
<p>Onionr is a decentralized, distributed data storage system, that anyone can insert data into.</p> <p>Onionr is a decentralized, distributed data storage system, that anyone can insert data into.</p>

61
onionr/storagecounter.py Normal file
View File

@ -0,0 +1,61 @@
'''
Onionr - P2P Microblogging Platform & Social network.
Keeps track of how much disk space we're using
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import config
class StorageCounter:
def __init__(self, coreInst):
self._core = coreInst
self.dataFile = self._core.usageFile
return
def isFull(self):
retData = False
if self._core.config.get('allocations.disk') <= (self.getAmount() + 1000):
retData = True
return retData
def _update(self, data):
with open(self.dataFile, 'w') as dataFile:
dataFile.write(str(data))
def getAmount(self):
'''Return how much disk space we're using (according to record)'''
retData = 0
try:
with open(self.dataFile, 'r') as dataFile:
retData = int(dataFile.read())
except FileNotFoundError:
pass
return retData
def addBytes(self, amount):
'''Record that we are now using more disk space, unless doing so would exceed configured max'''
newAmount = amount + self.getAmount()
retData = newAmount
if newAmount > self._core.config.get('allocations.disk'):
retData = False
else:
self._update(newAmount)
return retData
def removeBytes(self, amount):
'''Record that we are now using less disk space'''
newAmount = self.getAmount() - amount
self._update(newAmount)
return newAmount

View File

@ -5,29 +5,39 @@
Anonymous P2P platform, using Tor & I2P. Anonymous P2P platform, using Tor & I2P.
Major work in progress. ***Experimental, not safe or easy to use yet***
***THIS SOFTWARE IS NOT USABLE OR SECURE YET.*** <hr>
**The main repo for this software is at https://gitlab.com/beardog/Onionr/** **The main repo for this software is at https://gitlab.com/beardog/Onionr/**
**Roadmap/features:**
# Summary
Onionr is a decentralized, peer-to-peer data storage network, designed to be anonymous and resistant to (meta)data analysis and spam.
Onionr can be used for mail, as a social network, instant messenger, file sharing software, or for encrypted group discussion.
# Roadmap/features
Check the [Gitlab Project](https://gitlab.com/beardog/Onionr/milestones/1) to see progress towards the alpha release. Check the [Gitlab Project](https://gitlab.com/beardog/Onionr/milestones/1) to see progress towards the alpha release.
## Core internal features
* [X] Fully p2p/decentralized, no trackers or other single points of failure * [X] Fully p2p/decentralized, no trackers or other single points of failure
* [X] High level of anonymity * [X] End to end encryption of user data
* [ ] End to end encryption where applicable
* [X] Optional non-encrypted blocks, useful for blog posts or public file sharing * [X] Optional non-encrypted blocks, useful for blog posts or public file sharing
* [ ] Easy API system for integration to websites * [X] Easy API system for integration to websites
* [ ] Metadata analysis resistance (being improved)
# Development
This software is in heavy development. If for some reason you want to get involved, get in touch first. ## Other features
**Onionr API and functionality is subject to non-backwards compatible change during development** **Onionr API and functionality is subject to non-backwards compatible change during pre-alpha development**
# Donate ## Help out
Everyone is welcome to help out. Please get in touch first if you are making non-trivial changes. If you can't help with programming, you can write documentation or guides.
Bitcoin/Bitcoin Cash: 1onion55FXzm6h8KQw3zFw2igpHcV7LPq Bitcoin/Bitcoin Cash: 1onion55FXzm6h8KQw3zFw2igpHcV7LPq
@ -36,5 +46,3 @@ Bitcoin/Bitcoin Cash: 1onion55FXzm6h8KQw3zFw2igpHcV7LPq
The Tor Project, I2P developers, and anyone else do not own, create, or endorse this project, and are not otherwise involved. The Tor Project, I2P developers, and anyone else do not own, create, or endorse this project, and are not otherwise involved.
The badges (besides travis-ci build) are by Maik Ellerbrock is licensed under a Creative Commons Attribution 4.0 International License. The badges (besides travis-ci build) are by Maik Ellerbrock is licensed under a Creative Commons Attribution 4.0 International License.
The onion in the Onionr logo is adapted from [this](https://commons.wikimedia.org/wiki/File:Red_Onion_on_White.JPG) image by Colin on Wikimedia under a Creative Commons Attribution-Share Alike 3.0 Unported license. The Onionr logo is under the same license.

View File

@ -1,7 +1,7 @@
urllib3==1.23 urllib3==1.23
requests==2.18.4 requests==2.18.4
PyNaCl==1.2.1 PyNaCl==1.2.1
gevent==1.2.2 gevent==1.3.6
sha3==0.2.1 sha3==0.2.1
defusedxml==0.5.0 defusedxml==0.5.0
simple_crypt==4.1.7 simple_crypt==4.1.7