diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000..4a0c253e
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,4 @@
+onionr/data/**/*
+onionr/data
+RUN-WINDOWS.bat
+MY-RUN.sh
diff --git a/.gitignore b/.gitignore
index 6edc23ff..26e43b0e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,3 +13,4 @@ onionr/data-encrypted.dat
onionr/.onionr-lock
core
.vscode/*
+venv/*
diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index f2ab3397..00000000
--- a/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "onionr/bitpeer"]
- path = onionr/bitpeer
- url = https://github.com/beardog108/bitpeer.py
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..e6132726
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,28 @@
+FROM ubuntu:bionic
+
+#Base settings
+ENV HOME /root
+
+#Install needed packages
+RUN apt update && apt install -y python3 python3-dev python3-pip tor locales nano sqlite3
+
+RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
+ locale-gen
+ENV LANG en_US.UTF-8
+ENV LANGUAGE en_US:en
+ENV LC_ALL en_US.UTF-8
+
+WORKDIR /srv/
+ADD ./requirements.txt /srv/requirements.txt
+RUN pip3 install -r requirements.txt
+
+WORKDIR /root/
+#Add Onionr source
+COPY . /root/
+VOLUME /root/data/
+
+#Set upstart command
+CMD bash
+
+#Expose ports
+EXPOSE 8080
diff --git a/Makefile b/Makefile
index c51fc72b..13d9c0f9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,3 +1,5 @@
+PREFIX = /usr/local
+
.DEFAULT_GOAL := setup
setup:
@@ -5,16 +7,15 @@ setup:
-@cd onionr/static-data/ui/; ./compile.py
install:
- sudo rm -rf /usr/share/onionr/
- sudo rm -f /usr/bin/onionr
- sudo cp -rp ./onionr /usr/share/onionr
- sudo sh -c "echo \"#!/bin/sh\ncd /usr/share/onionr/\n./onionr.py \\\"\\\$$@\\\"\" > /usr/bin/onionr"
- sudo chmod +x /usr/bin/onionr
- sudo chown -R `whoami` /usr/share/onionr/
+ cp -rfp ./onionr $(DESTDIR)$(PREFIX)/share/onionr
+ echo '#!/bin/sh' > $(DESTDIR)$(PREFIX)/bin/onionr
+ echo 'cd $(DESTDIR)$(PREFIX)/share/onionr' > $(DESTDIR)$(PREFIX)/bin/onionr
+ echo './onionr "$$@"' > $(DESTDIR)$(PREFIX)/bin/onionr
+ chmod +x $(DESTDIR)$(PREFIX)/bin/onionr
uninstall:
- sudo rm -rf /usr/share/onionr
- sudo rm -f /usr/bin/onionr
+ rm -rf $(DESTDIR)$(PREFIX)/share/onionr
+ rm -f $(DESTDIR)$(PREFIX)/bin/onionr
test:
@./RUN-LINUX.sh stop
@@ -27,7 +28,7 @@ test:
soft-reset:
@echo "Soft-resetting Onionr..."
- rm -f onionr/data/blocks/*.dat onionr/data/*.db | true > /dev/null 2>&1
+ rm -f onionr/data/blocks/*.dat onionr/data/*.db onionr/data/block-nonces.dat | true > /dev/null 2>&1
@./RUN-LINUX.sh version | grep -v "Failed" --color=always
reset:
diff --git a/docs/whitepaper.md b/docs/whitepaper.md
index 789f8e47..e791b83c 100644
--- a/docs/whitepaper.md
+++ b/docs/whitepaper.md
@@ -86,6 +86,12 @@ Blocks are stored indefinitely until the allocated space is filled, at which poi
## Block Timestamping
-Onionr can provide evidence when a block was inserted by requesting other users to sign a hash of the current time with the block data hash: sha3_256(time + sha3_256(block data)).
+Onionr can provide evidence of when a block was inserted by requesting other users to sign a hash of the current time with the block data hash: sha3_256(time + sha3_256(block data)).
-This can be done either by the creator of the block prior to generation, or by any node after insertion.
\ No newline at end of file
+This can be done either by the creator of the block prior to generation, or by any node after insertion.
+
+In addition, randomness beacons such as the one operated by [NIST](https://beacon.nist.gov/home) or the hash of the latest blocks in a cryptocurrency network could be used to affirm that a block was at least not *created* before a given time.
+
+# Direct Connections
+
+We propose a system to
\ No newline at end of file
diff --git a/onionr/api.py b/onionr/api.py
index a36e6001..625e083c 100755
--- a/onionr/api.py
+++ b/onionr/api.py
@@ -20,11 +20,11 @@
import flask
from flask import request, Response, abort, send_from_directory
from multiprocessing import Process
-from gevent.wsgi import WSGIServer
+from gevent.pywsgi import WSGIServer
import sys, random, threading, hmac, hashlib, base64, time, math, os, json
from core import Core
from onionrblockapi import Block
-import onionrutils, onionrcrypto, blockimporter, onionrevents as events, logger, config
+import onionrutils, onionrexceptions, onionrcrypto, blockimporter, onionrevents as events, logger, config
class API:
'''
@@ -114,9 +114,7 @@ class API:
'''
Simply define the request as not having yet failed, before every request.
'''
-
self.requestFailed = False
-
return
@app.after_request
@@ -236,16 +234,6 @@ class API:
resp = Response('Goodbye')
elif action == 'ping':
resp = Response('pong')
- elif action == 'site':
- block = data
- siteData = self._core.getData(data)
- response = 'not found'
- if siteData != '' and siteData != False:
- self.mimeType = 'text/html'
- response = siteData.split(b'-', 2)[-1]
- resp = Response(response)
- elif action == 'info':
- resp = Response(json.dumps({'pubkey' : self._core._crypto.pubKey, 'host' : self._core.hsAdder}))
elif action == "insertBlock":
response = {'success' : False, 'reason' : 'An unknown error occurred'}
@@ -394,13 +382,57 @@ class API:
pass
else:
if sys.getsizeof(data) < 100000000:
- if blockimporter.importBlockFromData(data, self._core):
- resp = 'success'
- else:
- logger.warn('Error encountered importing uploaded block')
+ try:
+ if blockimporter.importBlockFromData(data, self._core):
+ resp = 'success'
+ else:
+ logger.warn('Error encountered importing uploaded block')
+ except onionrexceptions.BlacklistedBlock:
+ logger.debug('uploaded block is blacklisted')
+ pass
resp = Response(resp)
return resp
+
+ @app.route('/public/announce/', methods=['POST'])
+ def acceptAnnounce():
+ self.validateHost('public')
+ resp = 'failure'
+ powHash = ''
+ randomData = ''
+ newNode = ''
+ ourAdder = self._core.hsAddress.encode()
+ try:
+ newNode = request.form['node'].encode()
+ except KeyError:
+ logger.warn('No block specified for upload')
+ pass
+ else:
+ try:
+ randomData = request.form['random']
+ randomData = base64.b64decode(randomData)
+ except KeyError:
+ logger.warn('No random data specified for upload')
+ else:
+ nodes = newNode + self._core.hsAddress.encode()
+ nodes = self._core._crypto.blake2bHash(nodes)
+ powHash = self._core._crypto.blake2bHash(randomData + nodes)
+ try:
+ powHash = powHash.decode()
+ except AttributeError:
+ pass
+ if powHash.startswith('0000'):
+ try:
+ newNode = newNode.decode()
+ except AttributeError:
+ pass
+ if self._core.addAddress(newNode):
+ resp = 'Success'
+ else:
+ logger.warn(newNode.decode() + ' failed to meet POW: ' + powHash)
+ resp = Response(resp)
+ return resp
+
@app.route('/public/')
def public_handler():
# Public means it is publicly network accessible
@@ -425,20 +457,11 @@ class API:
resp = Response(self._utils.getBlockDBHash())
elif action == 'getBlockHashes':
resp = Response('\n'.join(self._core.getBlockList()))
- elif action == 'announce':
- if data != '':
- # TODO: require POW for this
- if self._core.addAddress(data):
- resp = Response('Success')
- else:
- resp = Response('')
- else:
- resp = Response('')
# setData should be something the communicator initiates, not this api
elif action == 'getData':
resp = ''
if self._utils.validateHash(data):
- if not os.path.exists('data/blocks/' + data + '.db'):
+ if os.path.exists('data/blocks/' + data + '.dat'):
block = Block(hash=data.encode(), core=self._core)
resp = base64.b64encode(block.getRaw().encode()).decode()
if len(resp) == 0:
@@ -472,7 +495,6 @@ class API:
def authFail(err):
self.requestFailed = True
resp = Response("403")
-
return resp
@app.errorhandler(401)
@@ -485,11 +507,13 @@ class API:
logger.info('Starting client on ' + self.host + ':' + str(bindPort) + '...', timestamp=False)
try:
+ while len(self._core.hsAddress) == 0:
+ self._core.refreshFirstStartVars()
+ time.sleep(0.5)
self.http_server = WSGIServer((self.host, bindPort), app)
self.http_server.serve_forever()
except KeyboardInterrupt:
pass
- #app.run(host=self.host, port=bindPort, debug=False, threaded=True)
except Exception as e:
logger.error(str(e))
logger.fatal('Failed to start client on ' + self.host + ':' + str(bindPort) + ', exiting...')
diff --git a/onionr/blockimporter.py b/onionr/blockimporter.py
index a2695093..ce1cd1fe 100644
--- a/onionr/blockimporter.py
+++ b/onionr/blockimporter.py
@@ -20,6 +20,12 @@
import core, onionrexceptions, logger
def importBlockFromData(content, coreInst):
retData = False
+
+ dataHash = coreInst._crypto.sha3Hash(content)
+
+ if coreInst._blacklist.inBlacklist(dataHash):
+ raise onionrexceptions.BlacklistedBlock('%s is a blacklisted block' % (dataHash,))
+
if not isinstance(coreInst, core.Core):
raise Exception("coreInst must be an Onionr core instance")
@@ -30,11 +36,15 @@ def importBlockFromData(content, coreInst):
metas = coreInst._utils.getBlockMetadataFromData(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0]
- if coreInst._utils.validateMetadata(metadata): # check if metadata is valid
+ if coreInst._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid
if coreInst._crypto.verifyPow(content): # check if POW is enough/correct
logger.info('Block passed proof, saving.')
- blockHash = coreInst.setData(content)
- blockHash = coreInst.addToBlockDB(blockHash, dataSaved=True)
- coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
- retData = True
+ try:
+ blockHash = coreInst.setData(content)
+ except onionrexceptions.DiskAllocationReached:
+ pass
+ else:
+ coreInst.addToBlockDB(blockHash, dataSaved=True)
+ coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
+ retData = True
return retData
\ No newline at end of file
diff --git a/onionr/communicator2.py b/onionr/communicator2.py
index b641b76e..f203dd70 100755
--- a/onionr/communicator2.py
+++ b/onionr/communicator2.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
'''
- Onionr - P2P Microblogging Platform & Social network.
+ Onionr - P2P Anonymous Storage Network
This file contains both the OnionrCommunicate class for communcating with peers
and code to operate as a daemon, getting commands from the command queue database (see core.Core.daemonQueue)
@@ -21,11 +21,14 @@
'''
import sys, os, core, config, json, requests, time, logger, threading, base64, onionr
import onionrexceptions, onionrpeers, onionrevents as events, onionrplugins as plugins, onionrblockapi as block
+import onionrdaemontools
from defusedxml import minidom
class OnionrCommunicatorDaemon:
def __init__(self, debug, developmentMode):
+ self.isOnline = True # Assume we're connected to the internet
+
# list of timer instances
self.timers = []
@@ -48,6 +51,8 @@ class OnionrCommunicatorDaemon:
# lists of connected peers and peers we know we can't reach currently
self.onlinePeers = []
self.offlinePeers = []
+ self.cooldownPeer = {}
+ self.connectTimes = {}
self.peerProfiles = [] # list of peer's profiles (onionrpeers.PeerProfile instances)
# amount of threads running by name, used to prevent too many
@@ -69,28 +74,34 @@ class OnionrCommunicatorDaemon:
# Loads in and starts the enabled plugins
plugins.reload()
+ # daemon tools are misc daemon functions, e.g. announce to online peers
+ # intended only for use by OnionrCommunicatorDaemon
+ #self.daemonTools = onionrdaemontools.DaemonTools(self)
+ self.daemonTools = onionrdaemontools.DaemonTools(self)
+
if debug or developmentMode:
OnionrCommunicatorTimers(self, self.heartbeat, 10)
- # Print nice header thing :)
- if config.get('general.display_header', True) and not self.shutdown:
- self.header()
-
# Set timers, function reference, seconds
# requiresPeer True means the timer function won't fire if we have no connected peers
OnionrCommunicatorTimers(self, self.daemonCommands, 5)
OnionrCommunicatorTimers(self, self.detectAPICrash, 5)
- peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60)
- OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1)
- OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True)
+ peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60, maxThreads=1)
+ OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks'), requiresPeer=True, maxThreads=1)
+ OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks'), requiresPeer=True)
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
+ OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65)
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
+ OnionrCommunicatorTimers(self, self.daemonTools.cooldownPeer, 30, requiresPeer=True)
+ netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
+ announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1)
cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True)
# set loop to execute instantly to load up peer pool (replaced old pool init wait)
peerPoolTimer.count = (peerPoolTimer.frequency - 1)
cleanupTimer.count = (cleanupTimer.frequency - 60)
+ announceTimer.count = (cleanupTimer.frequency - 60)
# Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking
try:
@@ -105,14 +116,14 @@ class OnionrCommunicatorDaemon:
pass
logger.info('Goodbye.')
- self._core._utils.localCommand('shutdown')
+ self._core._utils.localCommand('shutdown') # shutdown the api
time.sleep(0.5)
def lookupKeys(self):
'''Lookup new keys'''
logger.debug('Looking up new keys...')
tryAmount = 1
- for i in range(tryAmount):
+ for i in range(tryAmount): # amount of times to ask peers for new keys
# Download new key list from random online peers
peer = self.pickOnlinePeer()
newKeys = self.peerAction(peer, action='kex')
@@ -139,6 +150,12 @@ class OnionrCommunicatorDaemon:
existingBlocks = self._core.getBlockList()
triedPeers = [] # list of peers we've tried this time around
for i in range(tryAmount):
+ # check if disk allocation is used
+ if not self.isOnline:
+ break
+ if self._core._utils.storageCounter.isFull():
+ logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
+ break
peer = self.pickOnlinePeer() # select random online peer
# if we've already tried all the online peers this time around, stop
if peer in triedPeers:
@@ -153,7 +170,7 @@ class OnionrCommunicatorDaemon:
if newDBHash != self._core.getAddressInfo(peer, 'DBHash'):
self._core.setAddressInfo(peer, 'DBHash', newDBHash)
try:
- newBlocks = self.peerAction(peer, 'getBlockHashes')
+ newBlocks = self.peerAction(peer, 'getBlockHashes') # get list of new block hashes
except Exception as error:
logger.warn("could not get new blocks with " + peer, error=error)
newBlocks = False
@@ -164,20 +181,31 @@ class OnionrCommunicatorDaemon:
# if newline seperated string is valid hash
if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue
- if i not in self.blockQueue:
- self.blockQueue.append(i)
+ if i not in self.blockQueue and not self._core._blacklist.inBlacklist(i):
+ self.blockQueue.append(i) # add blocks to download queue
self.decrementThreadCount('lookupBlocks')
return
def getBlocks(self):
'''download new blocks in queue'''
for blockHash in self.blockQueue:
- if self.shutdown:
+ removeFromQueue = True
+ if self.shutdown or not self.isOnline:
+ # Exit loop if shutting down or offline
break
+ # Do not download blocks being downloaded or that are already saved (edge cases)
if blockHash in self.currentDownloading:
logger.debug('ALREADY DOWNLOADING ' + blockHash)
continue
- self.currentDownloading.append(blockHash)
+ if blockHash in self._core.getBlockList():
+ logger.debug('%s is already saved' % (blockHash,))
+ self.blockQueue.remove(blockHash)
+ continue
+ if self._core._blacklist.inBlacklist(blockHash):
+ continue
+ if self._core._utils.storageCounter.isFull():
+ break
+ self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
logger.info("Attempting to download %s..." % blockHash)
peerUsed = self.pickOnlinePeer()
content = self.peerAction(peerUsed, 'getData', data=blockHash) # block content from random peer (includes metadata)
@@ -197,16 +225,25 @@ class OnionrCommunicatorDaemon:
metas = self._core._utils.getBlockMetadataFromData(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0]
#meta = metas[1]
- if self._core._utils.validateMetadata(metadata): # check if metadata is valid
+ if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
if self._core._crypto.verifyPow(content): # check if POW is enough/correct
- logger.info('Block passed proof, saving.')
- self._core.setData(content)
- self._core.addToBlockDB(blockHash, dataSaved=True)
- self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
+ logger.info('Block passed proof, attempting save.')
+ try:
+ self._core.setData(content)
+ except onionrexceptions.DiskAllocationReached:
+ logger.error("Reached disk allocation allowance, cannot save this block.")
+ removeFromQueue = False
+ else:
+ self._core.addToBlockDB(blockHash, dataSaved=True)
+ self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
else:
logger.warn('POW failed for block ' + blockHash)
else:
- logger.warn('Metadata for ' + blockHash + ' is invalid.')
+ if self._core._blacklist.inBlacklist(realHash):
+ logger.warn('%s is blacklisted' % (realHash,))
+ else:
+ logger.warn('Metadata for ' + blockHash + ' is invalid.')
+ self._core._blacklist.addToDB(blockHash)
else:
# if block didn't meet expected hash
tempHash = self._core._crypto.sha3Hash(content) # lazy hack, TODO use var
@@ -217,7 +254,8 @@ class OnionrCommunicatorDaemon:
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50)
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
- self.blockQueue.remove(blockHash) # remove from block queue both if success or false
+ if removeFromQueue:
+ self.blockQueue.remove(blockHash) # remove from block queue both if success or false
self.currentDownloading.remove(blockHash)
self.decrementThreadCount('getBlocks')
return
@@ -260,7 +298,7 @@ class OnionrCommunicatorDaemon:
'''Manages the self.onlinePeers attribute list, connects to more peers if we have none connected'''
logger.info('Refreshing peer pool.')
- maxPeers = 6
+ maxPeers = int(config.get('peers.maxConnect'))
needed = maxPeers - len(self.onlinePeers)
for i in range(needed):
@@ -278,8 +316,9 @@ class OnionrCommunicatorDaemon:
def addBootstrapListToPeerList(self, peerList):
'''Add the bootstrap list to the peer list (no duplicates)'''
for i in self._core.bootstrapList:
- if i not in peerList and i not in self.offlinePeers and i != self._core.hsAdder:
+ if i not in peerList and i not in self.offlinePeers and i != self._core.hsAddress:
peerList.append(i)
+ self._core.addAddress(i)
def connectNewPeer(self, peer='', useBootstrap=False):
'''Adds a new random online peer to self.onlinePeers'''
@@ -300,7 +339,9 @@ class OnionrCommunicatorDaemon:
self.addBootstrapListToPeerList(peerList)
for address in peerList:
- if len(address) == 0 or address in tried or address in self.onlinePeers:
+ if not config.get('tor.v3onions') and len(address) == 62:
+ continue
+ if len(address) == 0 or address in tried or address in self.onlinePeers or address in self.cooldownPeer:
continue
if self.shutdown:
return
@@ -309,6 +350,7 @@ class OnionrCommunicatorDaemon:
time.sleep(0.1)
if address not in self.onlinePeers:
self.onlinePeers.append(address)
+ self.connectTimes[address] = self._core._utils.getEpoch()
retData = address
# add peer to profile list if they're not in it
@@ -323,6 +365,17 @@ class OnionrCommunicatorDaemon:
logger.debug('Failed to connect to ' + address)
return retData
+ def removeOnlinePeer(self, peer):
+ '''Remove an online peer'''
+ try:
+ del self.connectTimes[peer]
+ except KeyError:
+ pass
+ try:
+ self.onlinePeers.remove(peer)
+ except ValueError:
+ pass
+
def peerCleanup(self):
'''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)'''
onionrpeers.peerCleanup(self._core)
@@ -354,8 +407,9 @@ class OnionrCommunicatorDaemon:
if retData == False:
try:
self.getPeerProfileInstance(peer).addScore(-10)
- self.onlinePeers.remove(peer)
- self.getOnlinePeers() # Will only add a new peer to pool if needed
+ self.removeOnlinePeer(peer)
+ if action != 'ping':
+ self.getOnlinePeers() # Will only add a new peer to pool if needed
except ValueError:
pass
else:
@@ -437,17 +491,10 @@ class OnionrCommunicatorDaemon:
def announce(self, peer):
'''Announce to peers our address'''
- announceCount = 0
- announceAmount = 2
- for peer in self.onlinePeers:
- announceCount += 1
- if self.peerAction(peer, 'announce', self._core.hsAdder) == 'Success':
- logger.info('Successfully introduced node to ' + peer)
- break
- else:
- if announceCount == announceAmount:
- logger.warn('Could not introduce node. Try again soon')
- break
+ if self.daemonTools.announceNode():
+ logger.info('Successfully introduced node to ' + peer)
+ else:
+ logger.warn('Could not introduce node.')
def detectAPICrash(self):
'''exit if the api server crashes/stops'''
@@ -463,13 +510,6 @@ class OnionrCommunicatorDaemon:
self.shutdown = True
self.decrementThreadCount('detectAPICrash')
- def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'):
- if os.path.exists('static-data/header.txt'):
- with open('static-data/header.txt', 'rb') as file:
- # only to stdout, not file or log or anything
- sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', onionr.ONIONR_VERSION))
- logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n')
-
class OnionrCommunicatorTimers:
def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5, requiresPeer=False):
self.timerFunction = timerFunction
diff --git a/onionr/core.py b/onionr/core.py
index eb45e182..ab8b640b 100644
--- a/onionr/core.py
+++ b/onionr/core.py
@@ -1,5 +1,5 @@
'''
- Onionr - P2P Microblogging Platform & Social network
+ Onionr - P2P Anonymous Storage Network
Core Onionr library, useful for external programs. Handles peer & data processing
'''
@@ -21,7 +21,8 @@ import sqlite3, os, sys, time, math, base64, tarfile, getpass, simplecrypt, hash
from onionrblockapi import Block
import onionrutils, onionrcrypto, onionrproofs, onionrevents as events, onionrexceptions, onionrvalues
-
+import onionrblacklist
+import dbcreator
if sys.version_info < (3, 6):
try:
import sha3
@@ -40,13 +41,18 @@ class Core:
self.blockDB = 'data/blocks.db'
self.blockDataLocation = 'data/blocks/'
self.addressDB = 'data/address.db'
- self.hsAdder = ''
+ self.hsAddress = ''
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
self.bootstrapList = []
self.requirements = onionrvalues.OnionrValues()
self.torPort = torPort
+ self.dataNonceFile = 'data/block-nonces.dat'
+ self.dbCreate = dbcreator.DBCreator(self)
self.usageFile = 'data/disk-usage.txt'
+ self.config = config
+
+ self.maxBlockSize = 10000000 # max block size in bytes
if not os.path.exists('data/'):
os.mkdir('data/')
@@ -57,7 +63,7 @@ class Core:
if os.path.exists('data/hs/hostname'):
with open('data/hs/hostname', 'r') as hs:
- self.hsAdder = hs.read().strip()
+ self.hsAddress = hs.read().strip()
# Load bootstrap address list
if os.path.exists(self.bootstrapFileLocation):
@@ -71,6 +77,7 @@ class Core:
self._utils = onionrutils.OnionrUtils(self)
# Initialize the crypto object
self._crypto = onionrcrypto.OnionrCrypto(self)
+ self._blacklist = onionrblacklist.OnionrBlackList(self)
except Exception as error:
logger.error('Failed to initialize core Onionr library.', error=error)
@@ -78,6 +85,12 @@ class Core:
sys.exit(1)
return
+ def refreshFirstStartVars(self):
+ '''Hack to refresh some vars which may not be set on first start'''
+ if os.path.exists('data/hs/hostname'):
+ with open('data/hs/hostname', 'r') as hs:
+ self.hsAddress = hs.read().strip()
+
def addPeer(self, peerID, powID, name=''):
'''
Adds a public key to the key database (misleading function name)
@@ -92,7 +105,7 @@ class Core:
conn = sqlite3.connect(self.peerDB)
hashID = self._crypto.pubKeyHashID(peerID)
c = conn.cursor()
- t = (peerID, name, 'unknown', hashID, powID)
+ t = (peerID, name, 'unknown', hashID, powID, 0)
for i in c.execute("SELECT * FROM PEERS where id = '" + peerID + "';"):
try:
@@ -103,7 +116,7 @@ class Core:
pass
except IndexError:
pass
- c.execute('INSERT INTO peers (id, name, dateSeen, pow, hashID) VALUES(?, ?, ?, ?, ?);', t)
+ c.execute('INSERT INTO peers (id, name, dateSeen, pow, hashID, trust) VALUES(?, ?, ?, ?, ?, ?);', t)
conn.commit()
conn.close()
@@ -125,7 +138,6 @@ class Core:
for i in c.execute("SELECT * FROM adders where address = '" + address + "';"):
try:
if i[0] == address:
- logger.warn('Not adding existing address')
conn.close()
return False
except ValueError:
@@ -158,14 +170,15 @@ class Core:
conn.close()
events.event('address_remove', data = {'address': address}, onionr = None)
-
return True
else:
return False
def removeBlock(self, block):
'''
- remove a block from this node
+ remove a block from this node (does not automatically blacklist)
+
+ **You may want blacklist.addToDB(blockHash)
'''
if self._utils.validateHash(block):
conn = sqlite3.connect(self.blockDB)
@@ -174,97 +187,36 @@ class Core:
c.execute('Delete from hashes where hash=?;', t)
conn.commit()
conn.close()
+ blockFile = 'data/blocks/' + block + '.dat'
+ dataSize = 0
try:
- os.remove('data/blocks/' + block + '.dat')
+ ''' Get size of data when loaded as an object/var, rather than on disk,
+ to avoid conflict with getsizeof when saving blocks
+ '''
+ with open(blockFile, 'r') as data:
+ dataSize = sys.getsizeof(data.read())
+ self._utils.storageCounter.removeBytes(dataSize)
+ os.remove(blockFile)
except FileNotFoundError:
pass
def createAddressDB(self):
'''
Generate the address database
-
- types:
- 1: I2P b32 address
- 2: Tor v2 (like facebookcorewwwi.onion)
- 3: Tor v3
'''
- conn = sqlite3.connect(self.addressDB)
- c = conn.cursor()
- c.execute('''CREATE TABLE adders(
- address text,
- type int,
- knownPeer text,
- speed int,
- success int,
- DBHash text,
- powValue text,
- failure int,
- lastConnect int,
- lastConnectAttempt int,
- trust int
- );
- ''')
- conn.commit()
- conn.close()
+ self.dbCreate.createAddressDB()
def createPeerDB(self):
'''
Generate the peer sqlite3 database and populate it with the peers table.
'''
- # generate the peer database
- conn = sqlite3.connect(self.peerDB)
- c = conn.cursor()
- c.execute('''CREATE TABLE peers(
- ID text not null,
- name text,
- adders text,
- blockDBHash text,
- forwardKey text,
- dateSeen not null,
- bytesStored int,
- trust int,
- pubkeyExchanged int,
- hashID text,
- pow text not null);
- ''')
- conn.commit()
- conn.close()
- return
+ self.dbCreate.createPeerDB()
def createBlockDB(self):
'''
Create a database for blocks
-
- hash - the hash of a block
- dateReceived - the date the block was recieved, not necessarily when it was created
- decrypted - if we can successfully decrypt the block (does not describe its current state)
- dataType - data type of the block
- dataFound - if the data has been found for the block
- dataSaved - if the data has been saved for the block
- sig - optional signature by the author (not optional if author is specified)
- author - multi-round partial sha3-256 hash of authors public key
- dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is
'''
- if os.path.exists(self.blockDB):
- raise Exception("Block database already exists")
- conn = sqlite3.connect(self.blockDB)
- c = conn.cursor()
- c.execute('''CREATE TABLE hashes(
- hash text not null,
- dateReceived int,
- decrypted int,
- dataType text,
- dataFound int,
- dataSaved int,
- sig text,
- author text,
- dateClaimed int
- );
- ''')
- conn.commit()
- conn.close()
-
- return
+ self.dbCreate.createBlockDB()
def addToBlockDB(self, newHash, selfInsert=False, dataSaved=False):
'''
@@ -304,16 +256,26 @@ class Core:
return data
- def setData(self, data):
- '''
- Set the data assciated with a hash
- '''
- data = data
+ def _getSha3Hash(self, data):
hasher = hashlib.sha3_256()
if not type(data) is bytes:
data = data.encode()
hasher.update(data)
dataHash = hasher.hexdigest()
+ return dataHash
+
+ def setData(self, data):
+ '''
+ Set the data assciated with a hash
+ '''
+ data = data
+ dataSize = sys.getsizeof(data)
+
+ if not type(data) is bytes:
+ data = data.encode()
+
+ dataHash = self._getSha3Hash(data)
+
if type(dataHash) is bytes:
dataHash = dataHash.decode()
blockFileName = self.blockDataLocation + dataHash + '.dat'
@@ -321,15 +283,19 @@ class Core:
pass # TODO: properly check if block is already saved elsewhere
#raise Exception("Data is already set for " + dataHash)
else:
- blockFile = open(blockFileName, 'wb')
- blockFile.write(data)
- blockFile.close()
-
- conn = sqlite3.connect(self.blockDB)
- c = conn.cursor()
- c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';")
- conn.commit()
- conn.close()
+ if self._utils.storageCounter.addBytes(dataSize) != False:
+ blockFile = open(blockFileName, 'wb')
+ blockFile.write(data)
+ blockFile.close()
+ conn = sqlite3.connect(self.blockDB)
+ c = conn.cursor()
+ c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';")
+ conn.commit()
+ conn.close()
+ with open(self.dataNonceFile, 'a') as nonceFile:
+ nonceFile.write(dataHash + '\n')
+ else:
+ raise onionrexceptions.DiskAllocationReached
return dataHash
@@ -411,18 +377,22 @@ class Core:
'''
Add a command to the daemon queue, used by the communication daemon (communicator.py)
'''
+ retData = True
# Intended to be used by the web server
date = self._utils.getEpoch()
conn = sqlite3.connect(self.queueDB)
c = conn.cursor()
t = (command, data, date)
- c.execute('INSERT INTO commands (command, data, date) VALUES(?, ?, ?)', t)
- conn.commit()
- conn.close()
-
+ try:
+ c.execute('INSERT INTO commands (command, data, date) VALUES(?, ?, ?)', t)
+ conn.commit()
+ conn.close()
+ except sqlite3.OperationalError:
+ retData = False
+ self.daemonQueue()
events.event('queue_push', data = {'command': command, 'data': data}, onionr = None)
- return
+ return retData
def clearDaemonQueue(self):
'''
@@ -456,19 +426,23 @@ class Core:
conn.close()
return addressList
- def listPeers(self, randomOrder=True, getPow=False):
+ def listPeers(self, randomOrder=True, getPow=False, trust=0):
'''
Return a list of public keys (misleading function name)
randomOrder determines if the list should be in a random order
+ trust sets the minimum trust to list
'''
conn = sqlite3.connect(self.peerDB)
c = conn.cursor()
payload = ""
+ if trust not in (0, 1, 2):
+ logger.error('Tried to select invalid trust.')
+ return
if randomOrder:
- payload = 'SELECT * FROM peers ORDER BY RANDOM();'
+ payload = 'SELECT * FROM peers where trust >= %s ORDER BY RANDOM();' % (trust,)
else:
- payload = 'SELECT * FROM peers;'
+ payload = 'SELECT * FROM peers where trust >= %s;' % (trust,)
peerList = []
for i in c.execute(payload):
try:
@@ -592,7 +566,7 @@ class Core:
if unsaved:
execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();'
else:
- execute = 'SELECT hash FROM hashes ORDER BY dateReceived DESC;'
+ execute = 'SELECT hash FROM hashes ORDER BY dateReceived ASC;'
rows = list()
for row in c.execute(execute):
for i in row:
@@ -677,6 +651,18 @@ class Core:
'''
retData = False
+ # check nonce
+ dataNonce = self._utils.bytesToStr(self._crypto.sha3Hash(data))
+ try:
+ with open(self.dataNonceFile, 'r') as nonces:
+ if dataNonce in nonces:
+ return retData
+ except FileNotFoundError:
+ pass
+ # record nonce
+ with open(self.dataNonceFile, 'a') as nonceFile:
+ nonceFile.write(dataNonce + '\n')
+
if meta is None:
meta = dict()
@@ -688,6 +674,7 @@ class Core:
signature = ''
signer = ''
metadata = {}
+ # metadata is full block metadata, meta is internal, user specified metadata
# only use header if not set in provided meta
if not header is None:
@@ -735,7 +722,7 @@ class Core:
metadata['sig'] = signature
metadata['signer'] = signer
metadata['time'] = str(self._utils.getEpoch())
-
+
# send block data (and metadata) to POW module to get tokenized block data
proof = onionrproofs.POW(metadata, data)
payload = proof.waitForResult()
diff --git a/onionr/dbcreator.py b/onionr/dbcreator.py
new file mode 100644
index 00000000..5f3d2c79
--- /dev/null
+++ b/onionr/dbcreator.py
@@ -0,0 +1,108 @@
+'''
+ Onionr - P2P Anonymous Data Storage & Sharing
+
+ DBCreator, creates sqlite3 databases used by Onionr
+'''
+'''
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
The content on this server is not necessarily created by the server owner, and was not necessarily stored with the owner's knowledge.
+The content on this server is not necessarily created by the server owner, and was not necessarily stored specifically with the owner's knowledge of its contents.
Onionr is a decentralized, distributed data storage system, that anyone can insert data into.
diff --git a/onionr/storagecounter.py b/onionr/storagecounter.py new file mode 100644 index 00000000..4468dacc --- /dev/null +++ b/onionr/storagecounter.py @@ -0,0 +1,61 @@ +''' + Onionr - P2P Microblogging Platform & Social network. + + Keeps track of how much disk space we're using +''' +''' + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see