diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..4a0c253e --- /dev/null +++ b/.dockerignore @@ -0,0 +1,4 @@ +onionr/data/**/* +onionr/data +RUN-WINDOWS.bat +MY-RUN.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 00000000..292dfb14 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,6 @@ +test: + script: + - apt-get update -qy + - apt-get install -y python3-dev python3-pip tor + - pip3 install -r requirements.txt + - make test \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..c83de87d --- /dev/null +++ b/Dockerfile @@ -0,0 +1,28 @@ +FROM ubuntu:bionic + +#Base settings +ENV HOME /root + +#Install needed packages +RUN apt update && apt install -y python3 python3-dev python3-pip tor locales nano + +RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ + locale-gen +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +WORKDIR /srv/ +ADD ./requirements.txt /srv/requirements.txt +RUN pip3 install -r requirements.txt + +WORKDIR /root/ +#Add Onionr source +COPY . /root/ +VOLUME /root/data/ + +#Set upstart command +CMD bash + +#Expose ports +EXPOSE 8080 diff --git a/Makefile b/Makefile index 23b32ccc..13d9c0f9 100644 --- a/Makefile +++ b/Makefile @@ -1,32 +1,34 @@ +PREFIX = /usr/local + .DEFAULT_GOAL := setup setup: sudo pip3 install -r requirements.txt + -@cd onionr/static-data/ui/; ./compile.py install: - sudo rm -rf /usr/share/onionr/ - sudo rm -f /usr/bin/onionr - sudo cp -rp ./onionr /usr/share/onionr - sudo sh -c "echo \"#!/bin/sh\ncd /usr/share/onionr/\n./onionr.py \\\"\\\$$@\\\"\" > /usr/bin/onionr" - sudo chmod +x /usr/bin/onionr - sudo chown -R `whoami` /usr/share/onionr/ + cp -rfp ./onionr $(DESTDIR)$(PREFIX)/share/onionr + echo '#!/bin/sh' > $(DESTDIR)$(PREFIX)/bin/onionr + echo 'cd $(DESTDIR)$(PREFIX)/share/onionr' > $(DESTDIR)$(PREFIX)/bin/onionr + echo './onionr "$$@"' > $(DESTDIR)$(PREFIX)/bin/onionr + chmod +x $(DESTDIR)$(PREFIX)/bin/onionr uninstall: - sudo rm -rf /usr/share/onionr - sudo rm -f /usr/bin/onionr + rm -rf $(DESTDIR)$(PREFIX)/share/onionr + rm -f $(DESTDIR)$(PREFIX)/bin/onionr test: @./RUN-LINUX.sh stop @sleep 1 @rm -rf onionr/data-backup @mv onionr/data onionr/data-backup | true > /dev/null 2>&1 - -@cd onionr; ./tests.py; ./cryptotests.py; + -@cd onionr; ./tests.py; @rm -rf onionr/data @mv onionr/data-backup onionr/data | true > /dev/null 2>&1 soft-reset: @echo "Soft-resetting Onionr..." - rm -f onionr/data/blocks/*.dat onionr/data/*.db | true > /dev/null 2>&1 + rm -f onionr/data/blocks/*.dat onionr/data/*.db onionr/data/block-nonces.dat | true > /dev/null 2>&1 @./RUN-LINUX.sh version | grep -v "Failed" --color=always reset: diff --git a/docs/api.md b/docs/api.md index 7f9128a5..52a55368 100644 --- a/docs/api.md +++ b/docs/api.md @@ -1,34 +1,2 @@ -BLOCK HEADERS (simple ID system to identify block type) ------------------------------------------------ --crypt- (encrypted block) --bin- (binary file) --txt- (plaintext) - HTTP API ------------------------------------------------- -/client/ (Private info, not publicly accessible) - -- hello - - hello world -- shutdown - - exit onionr -- stats - - show node stats - -/public/ - -- firstConnect - - initialize with peer -- ping - - pong -- setHMAC - - set a created symmetric key -- getDBHash - - get the hash of the current hash database state -- getPGP - - export node's PGP public key -- getData - - get a data block -- getBlockHashes - - get a list of the node's hashes -------------------------------------------------- +TODO diff --git a/docs/onionr-draft.md b/docs/onionr-draft.md deleted file mode 100644 index acce39e7..00000000 --- a/docs/onionr-draft.md +++ /dev/null @@ -1,57 +0,0 @@ -# Onionr Protocol Spec v2 - -A P2P platform for Tor & I2P - -# Overview - -Onionr is an encrypted microblogging & mailing system designed in the spirit of Twitter. -There are no central servers and all traffic is peer to peer by default (routed via Tor or I2P). -User IDs are simply Tor onion service/I2P host id + Ed25519 key fingerprint. -Private blocks are only able to be read by the intended peer. -All traffic is over Tor/I2P, connecting only to Tor onion and I2P hidden services. - -## Goals: - • Selective sharing of information - • Secure & semi-anonymous direct messaging - • Forward secrecy - • Defense in depth - • Data should be secure for years to come - • Decentralization - * Avoid browser-based exploits that plague similar software - * Avoid timing attacks & unexpected metadata leaks - -## Protocol - -Onionr nodes use HTTP (over Tor/I2P) to exchange keys, metadata, and blocks. Blocks are identified by their sha3_256 hash. Nodes sync a table of blocks hashes and attempt to download blocks they do not yet have from random peers. - -Blocks may be encrypted using Curve25519 or Salsa20. - -Blocks have IDs in the following format: - --Optional hash of public key of publisher (base64)-optional signature (non-optional if publisher is specified) (Base64)-block type-block hash(sha3-256) - -pubkeyHash-signature-type-hash - -## Connections - -When a node first comes online, it attempts to bootstrap using a default list provided by a client. -When two peers connect, they exchange Ed25519 keys (if applicable) then Salsa20 keys. - -Salsa20 keys are regenerated either every X many communications with a peer or every X minutes. - -Every 100kb or every 2 hours is a recommended default. - -All valid requests with HMAC should be recorded until used HMAC's expiry to prevent replay attacks. -Peer Types - * Friends: - * Encrypted ‘friends only’ posts to one another - * Usually less strict rate & storage limits - * Strangers: - * Used for storage of encrypted or public information - * Can only read public posts - * Usually stricter rate & storage limits - -## Spam mitigation - -To send or receive data, a node can optionally request that the other node generate a hash that when in hexadecimal representation contains a random string at a random location in the string. Clients will configure what difficulty to request, and what difficulty is acceptable for themselves to perform. Difficulty should correlate with recent network & disk usage and data size. Friends can be configured to have less strict (to non existent) limits, separately from strangers. (proof of work). -Rate limits can be strict, as Onionr is not intended to be an instant messaging application. \ No newline at end of file diff --git a/docs/onionr-logo.png b/docs/onionr-logo.png index d0860f6d..b6c3c9b5 100644 Binary files a/docs/onionr-logo.png and b/docs/onionr-logo.png differ diff --git a/docs/onionr-web.png b/docs/onionr-web.png new file mode 100644 index 00000000..3124253d Binary files /dev/null and b/docs/onionr-web.png differ diff --git a/docs/whitepaper.md b/docs/whitepaper.md new file mode 100644 index 00000000..e791b83c --- /dev/null +++ b/docs/whitepaper.md @@ -0,0 +1,97 @@ +

+ <h1>Onionr</h1> +

+

Anonymous, Decentralized, Distributed Network

+ +# Introduction + +The most important thing in the modern world is information. The ability to communicate freely with others. The internet has provided humanity with the ability to spread information globally, but there are many people who try (and sometimes succeed) to stifle the flow of information. + +Internet censorship comes in many forms, state censorship, corporate consolidation of media, threats of violence, network exploitation (e.g. denial of service attacks). + +To prevent censorship or loss of information, these measures must be in place: + +* Resistance to censorship of underlying infrastructure or of network hosts + +* Anonymization of users by default + * The Inability to violently coerce human users (personal threats/"doxxing", or totalitarian regime censorship) + +* Economic availability. A system should not rely on a single device to be constantly online, and should not be overly expensive to use. The majority of people in the world own cell phones, but comparatively few own personal computers, particularly in developing countries. + +There are many great projects that tackle decentralization and privacy issues, but there are none which tackle all of the above issues. Some of the existing networks have also not worked well in practice, or are more complicated than they need to be. + +# Onionr Design Goals + +When designing Onionr we had these goals in mind: + +* Anonymous Blocks + + * Difficult to determine block creator or users regardless of transport used +* Default Anonymous Transport Layer + * Tor and I2P +* Transport agnosticism +* Default global sync, but can configure what blocks to seed +* Spam resistance +* Encrypted blocks + +# Onionr Design + +(See the spec for specific details) + +## General Overview + +At its core, Onionr is merely a description for storing data in self-verifying packages ("blocks"). These blocks can be encrypted to a user (or self), encrypted symmetrically, or not at all. Blocks can be signed by their creator, but regardless, they are self-verifying due to being identified by a sha3-256 hash value; once a block is created, it cannot be modified. + +Onionr exchanges a list of blocks between all nodes. By default, all nodes download and share all other blocks, however this is configurable. + +## User IDs + +User IDs are simply Ed25519 public keys. They are represented in Base32 format, or encoded using the [PGP Word List](https://en.wikipedia.org/wiki/PGP_word_list). + +Public keys can be generated deterministicly with a password using a key derivation function (Argon2id). This password can be shared between many users in order to share data anonymously among a group, using only 1 password. This is useful in some cases, but is risky, as if one user causes the key to be compromised and does not notify the group or revoke the key, there is no way to know. + +## Nodes + +Although Onionr is transport agnostic, the only supported transports in the reference implemetation are Tor .onion services and I2P hidden services. Nodes announce their address on creation. + +### Node Profiling + +To mitigate maliciously slow or unreliable nodes, Onionr builds a profile on nodes it connects to. Nodes are assigned a score, which raises based on the amount of successful block transfers, speed, and reliabilty of a node, and reduces based on how unreliable a node is. If a node is unreachable for over 24 hours after contact, it is forgotten. Onionr can also prioritize connection to 'friend' nodes. + +## Block Format + +Onionr blocks are very simple. They are structured in two main parts: a metadata section and a data section, with a line feed delimiting where metadata ends and data begins. + +Metadata defines what kind of data is in a block, signature data, encryption settings, and other arbitrary information. + +Optionally, a random token can be inserted into the metadata for use in Proof of Work. + +### Block Encryption + +For encryption, Onionr uses ephemeral Curve25519 keys for key exchange and XSalsa20-Poly1305 as a symmetric cipher, or optionally using only XSalsa20-Poly1305 with a pre-shared key. + +Regardless of encryption, blocks can be signed internally using Ed25519. + +## Block Exchange + +Blocks can be exchanged using any method, as they are not reliant on any other blocks. + +By default, every node shares a list of the blocks it is sharing, and will download any blocks it does not yet have. + +## Spam mitigation and block storage time + +By default, an Onionr node adjusts the target difficulty for blocks to be accepted based on the percent of disk usage allocated to Onionr. + +Blocks are stored indefinitely until the allocated space is filled, at which point Onionr will remove the oldest blocks as needed, save for "pinned" blocks, which are permanently stored. + +## Block Timestamping + +Onionr can provide evidence of when a block was inserted by requesting other users to sign a hash of the current time with the block data hash: sha3_256(time + sha3_256(block data)). + +This can be done either by the creator of the block prior to generation, or by any node after insertion. + +In addition, randomness beacons such as the one operated by [NIST](https://beacon.nist.gov/home) or the hash of the latest blocks in a cryptocurrency network could be used to affirm that a block was at least not *created* before a given time. + +# Direct Connections + +We propose a system to \ No newline at end of file diff --git a/onionr/api.py b/onionr/api.py index bf592c59..d0407388 100755 --- a/onionr/api.py +++ b/onionr/api.py @@ -18,18 +18,21 @@ along with this program. If not, see . ''' import flask -from flask import request, Response, abort +from flask import request, Response, abort, send_from_directory from multiprocessing import Process from gevent.wsgi import WSGIServer -import sys, random, threading, hmac, hashlib, base64, time, math, os, logger, config +import sys, random, threading, hmac, hashlib, base64, time, math, os, json from core import Core from onionrblockapi import Block -import onionrutils, onionrcrypto +import onionrutils, onionrexceptions, onionrcrypto, blockimporter, onionrevents as events, logger, config class API: ''' Main HTTP API (Flask) ''' + + callbacks = {'public' : {}, 'private' : {}} + def validateToken(self, token): ''' Validate that the client token matches the given token @@ -42,6 +45,30 @@ class API: except TypeError: return False + def guessMime(path): + ''' + Guesses the mime type from the input filename + ''' + + mimetypes = { + 'html' : 'text/html', + 'js' : 'application/javascript', + 'css' : 'text/css', + 'png' : 'image/png', + 'jpg' : 'image/jpeg' + } + + for mimetype in mimetypes: + logger.debug(path + ' endswith .' + mimetype + '?') + if path.endswith('.%s' % mimetype): + logger.debug('- True!') + return mimetypes[mimetype] + else: + logger.debug('- no') + + logger.debug('%s not in %s' % (path, mimetypes)) + return 'text/plain' + def __init__(self, debug): ''' Initialize the api server, preping variables for later use @@ -73,6 +100,7 @@ class API: self.i2pEnabled = config.get('i2p.host', False) self.mimeType = 'text/plain' + self.overrideCSP = False with open('data/time-bypass.txt', 'w') as bypass: bypass.write(self.timeBypassToken) @@ -92,7 +120,6 @@ class API: Simply define the request as not having yet failed, before every request. ''' self.requestFailed = False - return @app.after_request @@ -102,17 +129,85 @@ class API: #else: # resp.headers['server'] = 'Onionr' resp.headers['Content-Type'] = self.mimeType - resp.headers["Content-Security-Policy"] = "default-src 'none'; script-src 'none'; object-src 'none'; style-src data: 'unsafe-inline'; img-src data:; media-src 'none'; frame-src 'none'; font-src 'none'; connect-src 'none'" + if not self.overrideCSP: + resp.headers["Content-Security-Policy"] = "default-src 'none'; script-src 'none'; object-src 'none'; style-src data: 'unsafe-inline'; img-src data:; media-src 'none'; frame-src 'none'; font-src 'none'; connect-src 'none'" resp.headers['X-Frame-Options'] = 'deny' resp.headers['X-Content-Type-Options'] = "nosniff" resp.headers['server'] = 'Onionr' # reset to text/plain to help prevent browser attacks - if self.mimeType != 'text/plain': - self.mimeType = 'text/plain' + self.mimeType = 'text/plain' + self.overrideCSP = False return resp + @app.route('/www/private/') + def www_private(path): + startTime = math.floor(time.time()) + + if request.args.get('timingToken') is None: + timingToken = '' + else: + timingToken = request.args.get('timingToken') + + if not config.get("www.private.run", True): + abort(403) + + self.validateHost('private') + + endTime = math.floor(time.time()) + elapsed = endTime - startTime + + if not hmac.compare_digest(timingToken, self.timeBypassToken): + if elapsed < self._privateDelayTime: + time.sleep(self._privateDelayTime - elapsed) + + return send_from_directory('static-data/www/private/', path) + + @app.route('/www/public/') + def www_public(path): + if not config.get("www.public.run", True): + abort(403) + + self.validateHost('public') + + return send_from_directory('static-data/www/public/', path) + + @app.route('/ui/') + def ui_private(path): + startTime = math.floor(time.time()) + + ''' + if request.args.get('timingToken') is None: + timingToken = '' + else: + timingToken = request.args.get('timingToken') + ''' + + if not config.get("www.ui.run", True): + abort(403) + + if config.get("www.ui.private", True): + self.validateHost('private') + else: + self.validateHost('public') + + ''' + endTime = math.floor(time.time()) + elapsed = endTime - startTime + + if not hmac.compare_digest(timingToken, self.timeBypassToken): + if elapsed < self._privateDelayTime: + time.sleep(self._privateDelayTime - elapsed) + ''' + + logger.debug('Serving %s' % path) + + self.mimeType = API.guessMime(path) + self.overrideCSP = True + + return send_from_directory('static-data/www/ui/dist/', path, mimetype = API.guessMime(path)) + @app.route('/client/') def private_handler(): if request.args.get('timingToken') is None: @@ -132,6 +227,9 @@ class API: if not self.validateToken(token): abort(403) + + events.event('webapi_private', onionr = None, data = {'action' : action, 'data' : data, 'timingToken' : timingToken, 'token' : token}) + self.validateHost('private') if action == 'hello': resp = Response('Hello, World! ' + request.host) @@ -141,17 +239,120 @@ class API: resp = Response('Goodbye') elif action == 'ping': resp = Response('pong') - elif action == 'stats': - resp = Response('me_irl') - raise Exception - elif action == 'site': - block = data - siteData = self._core.getData(data) - response = 'not found' - if siteData != '' and siteData != False: - self.mimeType = 'text/html' - response = siteData.split(b'-', 2)[-1] - resp = Response(response) + elif action == "insertBlock": + response = {'success' : False, 'reason' : 'An unknown error occurred'} + + if not ((data is None) or (len(str(data).strip()) == 0)): + try: + decoded = json.loads(data) + + block = Block() + + sign = False + + for key in decoded: + val = decoded[key] + + key = key.lower() + + if key == 'type': + block.setType(val) + elif key in ['body', 'content']: + block.setContent(val) + elif key == 'parent': + block.setParent(val) + elif key == 'sign': + sign = (str(val).lower() == 'true') + + hash = block.save(sign = sign) + + if not hash is False: + response['success'] = True + response['hash'] = hash + response['reason'] = 'Successfully wrote block to file' + else: + response['reason'] = 'Failed to save the block' + except Exception as e: + logger.warn('insertBlock api request failed', error = e) + logger.debug('Here\'s the request: %s' % data) + else: + response = {'success' : False, 'reason' : 'Missing `data` parameter.', 'blocks' : {}} + + resp = Response(json.dumps(response)) + elif action == 'searchBlocks': + response = {'success' : False, 'reason' : 'An unknown error occurred', 'blocks' : {}} + + if not ((data is None) or (len(str(data).strip()) == 0)): + try: + decoded = json.loads(data) + + type = None + signer = None + signed = None + parent = None + reverse = False + limit = None + + for key in decoded: + val = decoded[key] + + key = key.lower() + + if key == 'type': + type = str(val) + elif key == 'signer': + if isinstance(val, list): + signer = val + else: + signer = str(val) + elif key == 'signed': + signed = (str(val).lower() == 'true') + elif key == 'parent': + parent = str(val) + elif key == 'reverse': + reverse = (str(val).lower() == 'true') + elif key == 'limit': + limit = 10000 + + if val is None: + val = limit + + limit = min(limit, int(val)) + + blockObjects = Block.getBlocks(type = type, signer = signer, signed = signed, parent = parent, reverse = reverse, limit = limit) + + logger.debug('%s results for query %s' % (len(blockObjects), decoded)) + + blocks = list() + + for block in blockObjects: + blocks.append({ + 'hash' : block.getHash(), + 'type' : block.getType(), + 'content' : block.getContent(), + 'signature' : block.getSignature(), + 'signedData' : block.getSignedData(), + 'signed' : block.isSigned(), + 'valid' : block.isValid(), + 'date' : (int(block.getDate().strftime("%s")) if not block.getDate() is None else None), + 'parent' : (block.getParent().getHash() if not block.getParent() is None else None), + 'metadata' : block.getMetadata(), + 'header' : block.getHeader() + }) + + response['success'] = True + response['blocks'] = blocks + response['reason'] = 'Success' + except Exception as e: + logger.warn('searchBlock api request failed', error = e) + logger.debug('Here\'s the request: %s' % data) + else: + response = {'success' : False, 'reason' : 'Missing `data` parameter.', 'blocks' : {}} + + resp = Response(json.dumps(response)) + + elif action in API.callbacks['private']: + resp = Response(str(getCallback(action, scope = 'private')(request))) else: resp = Response('(O_o) Dude what? (invalid command)') endTime = math.floor(time.time()) @@ -175,6 +376,68 @@ class API: resp = Response("") return resp + @app.route('/public/upload/', methods=['POST']) + def blockUpload(): + self.validateHost('public') + resp = 'failure' + try: + data = request.form['block'] + except KeyError: + logger.warn('No block specified for upload') + pass + else: + if sys.getsizeof(data) < 100000000: + try: + if blockimporter.importBlockFromData(data, self._core): + resp = 'success' + else: + logger.warn('Error encountered importing uploaded block') + except onionrexceptions.BlacklistedBlock: + logger.debug('uploaded block is blacklisted') + pass + + resp = Response(resp) + return resp + + @app.route('/public/announce/', methods=['POST']) + def acceptAnnounce(): + self.validateHost('public') + resp = 'failure' + powHash = '' + randomData = '' + newNode = '' + ourAdder = self._core.hsAddress.encode() + try: + newNode = request.form['node'].encode() + except KeyError: + logger.warn('No block specified for upload') + pass + else: + try: + randomData = request.form['random'] + randomData = base64.b64decode(randomData) + except KeyError: + logger.warn('No random data specified for upload') + else: + nodes = newNode + self._core.hsAddress.encode() + nodes = self._core._crypto.blake2bHash(nodes) + powHash = self._core._crypto.blake2bHash(randomData + nodes) + try: + powHash = powHash.decode() + except AttributeError: + pass + if powHash.startswith('0000'): + try: + newNode = newNode.decode() + except AttributeError: + pass + if self._core.addAddress(newNode): + resp = 'Success' + else: + logger.warn(newNode.decode() + ' failed to meet POW: ' + powHash) + resp = Response(resp) + return resp + @app.route('/public/') def public_handler(): # Public means it is publicly network accessible @@ -186,6 +449,9 @@ class API: data = data except: data = '' + + events.event('webapi_public', onionr = None, data = {'action' : action, 'data' : data, 'requestingPeer' : requestingPeer, 'request' : request}) + if action == 'firstConnect': pass elif action == 'ping': @@ -196,22 +462,11 @@ class API: resp = Response(self._utils.getBlockDBHash()) elif action == 'getBlockHashes': resp = Response('\n'.join(self._core.getBlockList())) - elif action == 'directMessage': - resp = Response(self._core.handle_direct_connection(data)) - elif action == 'announce': - if data != '': - # TODO: require POW for this - if self._core.addAddress(data): - resp = Response('Success') - else: - resp = Response('') - else: - resp = Response('') # setData should be something the communicator initiates, not this api elif action == 'getData': resp = '' if self._utils.validateHash(data): - if not os.path.exists('data/blocks/' + data + '.db'): + if os.path.exists('data/blocks/' + data + '.dat'): block = Block(hash=data.encode(), core=self._core) resp = base64.b64encode(block.getRaw().encode()).decode() if len(resp) == 0: @@ -227,6 +482,8 @@ class API: peers = self._core.listPeers(getPow=True) response = ','.join(peers) resp = Response(response) + elif action in API.callbacks['public']: + resp = Response(str(getCallback(action, scope = 'public')(request))) else: resp = Response("") @@ -243,7 +500,6 @@ class API: def authFail(err): self.requestFailed = True resp = Response("403") - return resp @app.errorhandler(401) @@ -256,11 +512,13 @@ class API: logger.info('Starting client on ' + self.host + ':' + str(bindPort) + '...', timestamp=False) try: + while len(self._core.hsAddress) == 0: + self._core.refreshFirstStartVars() + time.sleep(0.5) self.http_server = WSGIServer((self.host, bindPort), app) self.http_server.serve_forever() except KeyboardInterrupt: pass - #app.run(host=self.host, port=bindPort, debug=False, threaded=True) except Exception as e: logger.error(str(e)) logger.fatal('Failed to start client on ' + self.host + ':' + str(bindPort) + ', exiting...') @@ -297,3 +555,31 @@ class API: # we exit rather than abort to avoid fingerprinting logger.debug('Avoiding fingerprinting, exiting...') sys.exit(1) + + def setCallback(action, callback, scope = 'public'): + if not scope in API.callbacks: + return False + + API.callbacks[scope][action] = callback + + return True + + def removeCallback(action, scope = 'public'): + if (not scope in API.callbacks) or (not action in API.callbacks[scope]): + return False + + del API.callbacks[scope][action] + + return True + + def getCallback(action, scope = 'public'): + if (not scope in API.callbacks) or (not action in API.callbacks[scope]): + return None + + return API.callbacks[scope][action] + + def getCallbacks(scope = None): + if (not scope is None) and (scope in API.callbacks): + return API.callbacks[scope] + + return API.callbacks diff --git a/onionr/blockimporter.py b/onionr/blockimporter.py new file mode 100644 index 00000000..2c29927f --- /dev/null +++ b/onionr/blockimporter.py @@ -0,0 +1,46 @@ +''' + Onionr - P2P Microblogging Platform & Social network + + Import block data and save it +''' +''' + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' +import core, onionrexceptions, logger +def importBlockFromData(content, coreInst): + retData = False + + dataHash = coreInst._crypto.sha3Hash(content) + + if coreInst._blacklist.inBlacklist(dataHash): + raise onionrexceptions.BlacklistedBlock('%s is a blacklisted block' % (dataHash,)) + + if not isinstance(coreInst, core.Core): + raise Exception("coreInst must be an Onionr core instance") + + try: + content = content.encode() + except AttributeError: + pass + + metas = coreInst._utils.getBlockMetadataFromData(content) # returns tuple(metadata, meta), meta is also in metadata + metadata = metas[0] + if coreInst._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid + if coreInst._crypto.verifyPow(content): # check if POW is enough/correct + logger.info('Block passed proof, saving.') + blockHash = coreInst.setData(content) + coreInst.addToBlockDB(blockHash, dataSaved=True) + coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database + retData = True + return retData \ No newline at end of file diff --git a/onionr/communicator.py b/onionr/communicator.py deleted file mode 100755 index 82d5a3c2..00000000 --- a/onionr/communicator.py +++ /dev/null @@ -1,783 +0,0 @@ -#!/usr/bin/env python3 -''' - Onionr - P2P Microblogging Platform & Social network. - - This file contains both the OnionrCommunicate class for communcating with peers - and code to operate as a daemon, getting commands from the command queue database (see core.Core.daemonQueue) -''' -''' - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' -import sqlite3, requests, hmac, hashlib, time, sys, os, math, logger, urllib.parse, base64, binascii, random, json, threading -import core, onionrutils, onionrcrypto, netcontroller, onionrproofs, config, onionrplugins as plugins -from onionrblockapi import Block - -class OnionrCommunicate: - def __init__(self, debug, developmentMode): - ''' - OnionrCommunicate - - This class handles communication with nodes in the Onionr network. - ''' - - self._core = core.Core() - self._utils = onionrutils.OnionrUtils(self._core) - self._crypto = onionrcrypto.OnionrCrypto(self._core) - self._netController = netcontroller.NetController(0) # arg is the HS port but not needed rn in this file - - self.newHashes = {} # use this to not keep hashes around too long if we cant get their data - self.keepNewHash = 12 - self.ignoredHashes = [] - - self.highFailureAmount = 7 - - self.communicatorThreads = 0 - self.maxThreads = 75 - self.processBlocksThreads = 0 - self.lookupBlocksThreads = 0 - - self.blocksProcessing = [] # list of blocks currently processing, to avoid trying a block twice at once in 2 seperate threads - self.peerStatus = {} # network actions (active requests) for peers used mainly to prevent conflicting actions in threads - - self.communicatorTimers = {} # communicator timers, name: rate (in seconds) - self.communicatorTimerCounts = {} - self.communicatorTimerFuncs = {} - - self.registerTimer('blockProcess', 20) - self.registerTimer('highFailure', 10) - self.registerTimer('heartBeat', 10) - self.registerTimer('pex', 120) - logger.debug('Communicator debugging enabled.') - - with open('data/hs/hostname', 'r') as torID: - todID = torID.read() - - apiRunningCheckRate = 10 - apiRunningCheckCount = 0 - - self.peerData = {} # Session data for peers (recent reachability, speed, etc) - - if os.path.exists(self._core.queueDB): - self._core.clearDaemonQueue() - - # Loads in and starts the enabled plugins - plugins.reload() - - # Print nice header thing :) - if config.get('general.display_header', True): - self.header() - - while True: - command = self._core.daemonQueue() - # Process blocks based on a timer - self.timerTick() - # TODO: migrate below if statements to be own functions which are called in the above timerTick() function - if self.communicatorTimers['highFailure'] == self.communicatorTimerCounts['highFailure']: - self.communicatorTimerCounts['highFailure'] = 0 - for i in self.peerData: - if self.peerData[i]['failCount'] >= self.highFailureAmount: - self.peerData[i]['failCount'] -= 1 - if self.communicatorTimers['pex'] == self.communicatorTimerCounts['pex']: - pT1 = threading.Thread(target=self.getNewPeers, name="pT1") - pT1.start() - pT2 = threading.Thread(target=self.getNewPeers, name="pT2") - pT2.start() - self.communicatorTimerCounts['pex'] = 0# TODO: do not reset timer if low peer count - if self.communicatorTimers['heartBeat'] == self.communicatorTimerCounts['heartBeat']: - logger.debug('Communicator heartbeat') - self.communicatorTimerCounts['heartBeat'] = 0 - if self.communicatorTimers['blockProcess'] == self.communicatorTimerCounts['blockProcess']: - lT1 = threading.Thread(target=self.lookupBlocks, name="lt1", args=(True,)) - lT2 = threading.Thread(target=self.lookupBlocks, name="lt2", args=(True,)) - lT3 = threading.Thread(target=self.lookupBlocks, name="lt3", args=(True,)) - lT4 = threading.Thread(target=self.lookupBlocks, name="lt4", args=(True,)) - pbT1 = threading.Thread(target=self.processBlocks, name='pbT1', args=(True,)) - pbT2 = threading.Thread(target=self.processBlocks, name='pbT2', args=(True,)) - pbT3 = threading.Thread(target=self.processBlocks, name='pbT3', args=(True,)) - pbT4 = threading.Thread(target=self.processBlocks, name='pbT4', args=(True,)) - if (self.maxThreads - 8) >= threading.active_count(): - lT1.start() - lT2.start() - lT3.start() - lT4.start() - pbT1.start() - pbT2.start() - pbT3.start() - pbT4.start() - self.communicatorTimerCounts['blockProcess'] = 0 - else: - logger.debug(threading.active_count()) - logger.debug('Too many threads.') - if command != False: - if command[0] == 'shutdown': - logger.info('Daemon received exit command.', timestamp=True) - break - elif command[0] == 'announceNode': - announceAttempts = 3 - announceAttemptCount = 0 - announceVal = False - logger.info('Announcing node to %s...' % command[1], timestamp=True) - while not announceVal: - announceAttemptCount += 1 - announceVal = self.performGet('announce', command[1], data=self._core.hsAdder.replace('\n', ''), skipHighFailureAddress=True) - # logger.info(announceVal) - if announceAttemptCount >= announceAttempts: - logger.warn('Unable to announce to %s' % command[1]) - break - elif command[0] == 'runCheck': - logger.debug('Status check; looks good.') - open('data/.runcheck', 'w+').close() - elif command[0] == 'kex': - self.pexCount = pexTimer - 1 - elif command[0] == 'event': - # todo - pass - elif command[0] == 'checkCallbacks': - try: - data = json.loads(command[1]) - - logger.info('Checking for callbacks with connection %s...' % data['id']) - - self.check_callbacks(data, config.get('general.dc_execcallbacks', True)) - - events.event('incoming_direct_connection', data = {'callback' : True, 'communicator' : self, 'data' : data}) - except Exception as e: - logger.error('Failed to interpret callbacks for checking', e) - elif command[0] == 'incomingDirectConnection': - try: - data = json.loads(command[1]) - - logger.info('Handling incoming connection %s...' % data['id']) - - self.incoming_direct_connection(data) - - events.event('incoming_direct_connection', data = {'callback' : False, 'communicator' : self, 'data' : data}) - except Exception as e: - logger.error('Failed to handle callbacks for checking', e) - - apiRunningCheckCount += 1 - - # check if local API is up - if apiRunningCheckCount > apiRunningCheckRate: - if self._core._utils.localCommand('ping') != 'pong': - for i in range(4): - if self._utils.localCommand('ping') == 'pong': - apiRunningCheckCount = 0 - break # break for loop - time.sleep(1) - else: - # This executes if the api is NOT detected to be running - logger.error('Daemon detected API crash (or otherwise unable to reach API after long time), stopping...') - break # break main daemon loop - apiRunningCheckCount = 0 - - time.sleep(1) - - self._netController.killTor() - return - - future_callbacks = {} - connection_handlers = {} - id_peer_cache = {} - - def registerTimer(self, timerName, rate, timerFunc=None): - ''' - Register a communicator timer - ''' - self.communicatorTimers[timerName] = rate - self.communicatorTimerCounts[timerName] = 0 - self.communicatorTimerFuncs[timerName] = timerFunc - - def timerTick(self): - ''' - Increments timers "ticks" and calls funcs if applicable - ''' - tName = '' - for i in self.communicatorTimers.items(): - tName = i[0] - self.communicatorTimerCounts[tName] += 1 - - if self.communicatorTimerCounts[tName] == self.communicatorTimers[tName]: - try: - self.communicatorTimerFuncs[tName]() - except TypeError: - pass - else: - self.communicatorTimerCounts[tName] = 0 - - - def get_connection_handlers(self, name = None): - ''' - Returns a list of callback handlers by name, or, if name is None, it returns all handlers. - ''' - - if name is None: - return self.connection_handlers - elif name in self.connection_handlers: - return self.connection_handlers[name] - else: - return list() - - def add_connection_handler(self, name, handler): - ''' - Adds a function to be called when an connection that is NOT a callback is received. - Takes in the name of the communication type and the handler as input - ''' - - if not name in self.connection_handlers: - self.connection_handlers[name] = list() - - self.connection_handlers[name].append(handler) - - return - - def remove_connection_handler(self, name, handler = None): - ''' - Removes a connection handler if specified, or removes all by name - ''' - - if handler is None: - if name in self.connection_handlers: - self.connection_handlers[name].remove(handler) - elif name in self.connection_handlers: - del self.connection_handlers[name] - - return - - - def set_callback(self, identifier, callback): - ''' - (Over)writes a callback by communication identifier - ''' - - if not callback is None: - self.future_callbacks[identifier] = callback - return True - - return False - - def unset_callback(self, identifier): - ''' - Unsets a callback by communication identifier, if set - ''' - - if identifier in future_callbacks: - del self.future_callbacks[identifier] - return True - - return False - - def get_callback(self, identifier): - ''' - Returns a callback by communication identifier if set, or None - ''' - - if identifier in self.future_callbacks: - return self.future_callbacks[id] - - return None - - def direct_connect(self, peer, data = None, callback = None, log = True): - ''' - Communicates something directly with the client - - - `peer` should obviously be the peer id to request. - - `data` should be a dict (NOT str), with the parameter "type" - ex. {'type': 'sendMessage', 'content': 'hey, this is a dm'} - In that dict, the key 'token' must NEVER be set. If it is, it will - be overwritten. - - if `callback` is set to a function, it will call that function - back if/when the client the request is sent to decides to respond. - Do NOT depend on a response, because users can configure their - clients not to respond to this type of request. - - `log` is set to True by default-- what this does is log the - request for debug purposes. Should be False for sensitive actions. - ''' - - # TODO: Timing attack prevention - try: - # does not need to be secure random, only used for keeping track of async responses - # Actually, on second thought, it does need to be secure random. Otherwise, if it is predictable, someone could trigger arbitrary callbacks that have been saved on the local node, wrecking all kinds of havoc. Better just to keep it secure random. - identifier = self._utils.token(32) - if 'id' in data: - identifier = data['id'] - - if not identifier in id_peer_cache: - id_peer_cache[identifier] = peer - - if type(data) == str: - # if someone inputs a string instead of a dict, it will assume it's the type - data = {'type' : data} - - data['id'] = identifier - data['token'] = '' # later put PoW stuff here or whatever is needed - data_str = json.dumps(data) - - events.event('outgoing_direct_connection', data = {'callback' : True, 'communicator' : self, 'data' : data, 'id' : identifier, 'token' : token, 'peer' : peer, 'callback' : callback, 'log' : log}) - - logger.debug('Direct connection (identifier: "%s"): %s' % (identifier, data_str)) - try: - self.performGet('directMessage', peer, data_str) - except: - logger.warn('Failed to connect to peer: "%s".' % str(peer)) - return False - - if not callback is None: - self.set_callback(identifier, callback) - - return True - except Exception as e: - logger.warn('Unknown error, failed to execute direct connect (peer: "%s").' % str(peer), e) - - return False - - def direct_connect_response(self, identifier, data, peer = None, callback = None, log = True): - ''' - Responds to a previous connection. Hostname will be pulled from id_peer_cache if not specified in `peer` parameter. - - If yet another callback is requested, it can be put in the `callback` parameter. - ''' - - if config.get('general.dc_response', True): - data['id'] = identifier - data['sender'] = open('data/hs/hostname').read() - data['callback'] = True - - if (origin is None) and (identifier in id_peer_cache): - origin = id_peer_cache[identifier] - - if not identifier in id_peer_cache: - id_peer_cache[identifier] = peer - - if origin is None: - logger.warn('Failed to identify peer for connection %s' % str(identifier)) - return False - else: - return self.direct_connect(peer, data = data, callback = callback, log = log) - else: - logger.warn('Node tried to respond to direct connection id %s, but it was rejected due to `dc_response` restriction.' % str(identifier)) - return False - - - def check_callbacks(self, data, execute = True, remove = True): - ''' - Check if a callback is set, and if so, execute it - ''' - - try: - if type(data) is str: - data = json.loads(data) - - if 'id' in data: # TODO: prevent enumeration, require extra PoW - identifier = data['id'] - - if identifier in self.future_callbacks: - if execute: - self.get_callback(identifier)(data) - logger.debug('Request callback "%s" executed.' % str(identifier)) - if remove: - self.unset_callback(identifier) - - return True - - logger.warn('Unable to find request callback for ID "%s".' % str(identifier)) - else: - logger.warn('Unable to identify callback request, `id` parameter missing: %s' % json.dumps(data)) - except Exception as e: - logger.warn('Unknown error, failed to execute direct connection callback (peer: "%s").' % str(peer), e) - - return False - - def incoming_direct_connection(self, data): - ''' - This code is run whenever there is a new incoming connection. - ''' - - if 'type' in data and data['type'] in self.connection_handlers: - for handler in self.get_connection_handlers(name): - handler(data) - - return - - def getNewPeers(self): - ''' - Get new peers and ed25519 keys - ''' - - peersCheck = 1 # Amount of peers to ask for new peers + keys - peersChecked = 0 - peerList = list(self._core.listAdders()) # random ordered list of peers - newKeys = [] - newAdders = [] - if len(peerList) > 0: - maxN = len(peerList) - 1 - else: - peersCheck = 0 - maxN = 0 - - if len(peerList) > peersCheck: - peersCheck = len(peerList) - - while peersCheck > peersChecked: - #i = secrets.randbelow(maxN) # cant use prior to 3.6 - i = random.randint(0, maxN) - - try: - if self.peerStatusTaken(peerList[i], 'pex') or self.peerStatusTaken(peerList[i], 'kex'): - continue - except IndexError: - pass - - logger.info('Using %s to find new peers...' % peerList[i], timestamp=True) - - try: - newAdders = self.performGet('pex', peerList[i], skipHighFailureAddress=True) - if not newAdders is False: # keep the is False thing in there, it might not be bool - logger.debug('Attempting to merge address: %s' % str(newAdders)) - self._utils.mergeAdders(newAdders) - except requests.exceptions.ConnectionError: - logger.info('%s connection failed' % peerList[i], timestamp=True) - continue - else: - try: - logger.info('Using %s to find new keys...' % peerList[i]) - newKeys = self.performGet('kex', peerList[i], skipHighFailureAddress=True) - logger.debug('Attempting to merge pubkey: %s' % str(newKeys)) - # TODO: Require keys to come with POW token (very large amount of POW) - self._utils.mergeKeys(newKeys) - except requests.exceptions.ConnectionError: - logger.info('%s connection failed' % peerList[i], timestamp=True) - continue - else: - peersChecked += 1 - return - - def lookupBlocks(self, isThread=False): - ''' - Lookup blocks and merge new ones - ''' - if isThread: - self.lookupBlocksThreads += 1 - peerList = self._core.listAdders() - blockList = list() - - for i in peerList: - if self.peerStatusTaken(i, 'getBlockHashes') or self.peerStatusTaken(i, 'getDBHash'): - continue - try: - if self.peerData[i]['failCount'] >= self.highFailureAmount: - continue - except KeyError: - pass - - lastDB = self._core.getAddressInfo(i, 'DBHash') - - if lastDB == None: - logger.debug('Fetching db hash from %s, no previous known.' % str(i)) - else: - logger.debug('Fetching db hash from %s, %s last known' % (str(i), str(lastDB))) - - currentDB = self.performGet('getDBHash', i) - - if currentDB != False: - logger.debug('%s hash db (from request): %s' % (str(i), str(currentDB))) - else: - logger.warn('Failed to get hash db status for %s' % str(i)) - - if currentDB != False: - if lastDB != currentDB: - logger.debug('Fetching hash from %s - %s current hash.' % (str(i), currentDB)) - try: - blockList.extend(self.performGet('getBlockHashes', i).split('\n')) - except TypeError: - logger.warn('Failed to get data hash from %s' % str(i)) - self.peerData[i]['failCount'] -= 1 - if self._utils.validateHash(currentDB): - self._core.setAddressInfo(i, "DBHash", currentDB) - - if len(blockList) != 0: - pass - - for i in blockList: - if len(i.strip()) == 0: - continue - try: - if self._utils.hasBlock(i): - continue - except: - logger.warn('Invalid hash') # TODO: move below validate hash check below - pass - if i in self.ignoredHashes: - continue - - #logger.debug('Exchanged block (blockList): ' + i) - if not self._utils.validateHash(i): - # skip hash if it isn't valid - logger.warn('Hash %s is not valid' % str(i)) - continue - else: - self.newHashes[i] = 0 - logger.debug('Adding %s to hash database...' % str(i)) - self._core.addToBlockDB(i) - self.lookupBlocksThreads -= 1 - return - - def processBlocks(self, isThread=False): - ''' - Work with the block database and download any missing blocks - - This is meant to be called from the communicator daemon on its timer. - ''' - if isThread: - self.processBlocksThreads += 1 - for i in self._core.getBlockList(unsaved = True): - if i != "": - if i in self.blocksProcessing or i in self.ignoredHashes: - #logger.debug('already processing ' + i) - continue - else: - self.blocksProcessing.append(i) - try: - self.newHashes[i] - except KeyError: - self.newHashes[i] = 0 - - # check if a new hash has been around too long, delete it from database and add it to ignore list - if self.newHashes[i] >= self.keepNewHash: - logger.warn('Ignoring block %s because it took to long to get valid data.' % str(i)) - del self.newHashes[i] - self._core.removeBlock(i) - self.ignoredHashes.append(i) - continue - - self.newHashes[i] += 1 - logger.warn('Block is unsaved: %s' % str(i)) - data = self.downloadBlock(i) - - # if block was successfully gotten (hash already verified) - if data: - del self.newHashes[i] # remove from probation list - - # deal with block metadata - blockContent = self._core.getData(i) - try: - blockContent = blockContent.encode() - except AttributeError: - pass - try: - #blockMetadata = json.loads(self._core.getData(i)).split('}')[0] + '}' - blockMetadata = json.loads(blockContent[:blockContent.find(b'\n')].decode()) - try: - blockMeta2 = json.loads(blockMetadata['meta']) - except KeyError: - blockMeta2 = {'type': ''} - pass - blockContent = blockContent[blockContent.find(b'\n') + 1:] - try: - blockContent = blockContent.decode() - except AttributeError: - pass - - if not self._crypto.verifyPow(blockContent, blockMeta2): - logger.warn("%s has invalid or insufficient proof of work token, deleting..." % str(i)) - self._core.removeBlock(i) - continue - else: - if (('sig' in blockMetadata) and ('id' in blockMeta2)): # id doesn't exist in blockMeta2, so this won't workin the first place - - #blockData = json.dumps(blockMetadata['meta']) + blockMetadata[blockMetadata.rfind(b'}') + 1:] - - creator = self._utils.getPeerByHashId(blockMeta2['id']) - try: - creator = creator.decode() - except AttributeError: - pass - - if self._core._crypto.edVerify(blockMetadata['meta'] + blockContent, creator, blockMetadata['sig'], encodedData=True): - logger.info('%s was signed' % str(i)) - self._core.updateBlockInfo(i, 'sig', 'true') - else: - logger.warn('%s has an invalid signature' % str(i)) - self._core.updateBlockInfo(i, 'sig', 'false') - try: - logger.info('Block type is %s' % str(blockMeta2['type'])) - self._core.updateBlockInfo(i, 'dataType', blockMeta2['type']) - self.removeBlockFromProcessingList(i) - self.removeBlockFromProcessingList(i) - except KeyError: - logger.warn('Block has no type') - pass - except json.decoder.JSONDecodeError: - logger.warn('Could not decode block metadata') - self.removeBlockFromProcessingList(i) - self.processBlocksThreads -= 1 - return - - def removeBlockFromProcessingList(self, block): - ''' - Remove a block from the processing list - ''' - try: - self.blocksProcessing.remove(block) - except ValueError: - return False - else: - return True - - def downloadBlock(self, hash, peerTries=3): - ''' - Download a block from random order of peers - ''' - - retVal = False - peerList = self._core.listAdders() - blocks = '' - peerTryCount = 0 - - for i in peerList: - try: - if self.peerData[i]['failCount'] >= self.highFailureAmount: - continue - except KeyError: - pass - if peerTryCount >= peerTries: - break - - hasher = hashlib.sha3_256() - data = self.performGet('getData', i, hash, skipHighFailureAddress=True) - - if data == False or len(data) > 10000000 or data == '': - peerTryCount += 1 - continue - - try: - data = base64.b64decode(data) - except binascii.Error: - data = b'' - - hasher.update(data) - digest = hasher.hexdigest() - - if type(digest) is bytes: - digest = digest.decode() - - if digest == hash.strip(): - self._core.setData(data) - logger.info('Successfully obtained data for %s' % str(hash), timestamp=True) - retVal = True - break - else: - logger.warn("Failed to validate %s -- hash calculated was %s" % (hash, digest)) - peerTryCount += 1 - - return retVal - - def urlencode(self, data): - ''' - URL encodes the data - ''' - return urllib.parse.quote_plus(data) - - def performGet(self, action, peer, data=None, skipHighFailureAddress=False, selfCheck=True): - ''' - Performs a request to a peer through Tor or i2p (currently only Tor) - ''' - - if not peer.endswith('.onion') and not peer.endswith('.onion/') and not peer.endswith('.b32.i2p'): - raise PeerError('Currently only Tor/i2p .onion/.b32.i2p peers are supported. You must manually specify .onion/.b32.i2p') - - if len(self._core.hsAdder.strip()) == 0: - raise Exception("Could not perform self address check in performGet due to not knowing our address") - if selfCheck: - if peer.replace('/', '') == self._core.hsAdder: - logger.warn('Tried to performGet to own hidden service, but selfCheck was not set to false') - return - - # Store peer in peerData dictionary (non permanent) - if not peer in self.peerData: - self.peerData[peer] = {'connectCount': 0, 'failCount': 0, 'lastConnectTime': self._utils.getEpoch()} - socksPort = sys.argv[2] - '''We use socks5h to use tor as DNS''' - - if peer.endswith('onion'): - proxies = {'http': 'socks5h://127.0.0.1:' + str(socksPort), 'https': 'socks5h://127.0.0.1:' + str(socksPort)} - - elif peer.endswith('b32.i2p'): - proxies = {'http': 'http://127.0.0.1:4444'} - headers = {'user-agent': 'PyOnionr'} - url = 'http://' + peer + '/public/?action=' + self.urlencode(action) - - if data != None: - url = url + '&data=' + self.urlencode(data) - try: - if skipHighFailureAddress and self.peerData[peer]['failCount'] > self.highFailureAmount: - retData = False - logger.debug('Skipping %s because of high failure rate.' % peer) - else: - self.peerStatus[peer] = action - logger.debug('Contacting %s on port %s' % (peer, str(socksPort))) - try: - r = requests.get(url, headers=headers, proxies=proxies, allow_redirects=False, timeout=(15, 30)) - except ValueError: - proxies = {'http': 'socks5://127.0.0.1:' + str(socksPort), 'https': 'socks5://127.0.0.1:' + str(socksPort)} - r = requests.get(url, headers=headers, proxies=proxies, allow_redirects=False, timeout=(15, 30)) - retData = r.text - except requests.exceptions.RequestException as e: - logger.debug('%s failed with peer %s' % (action, peer)) - logger.debug('Error: %s' % str(e)) - retData = False - - if not retData: - self.peerData[peer]['failCount'] += 1 - else: - self.peerData[peer]['connectCount'] += 1 - self.peerData[peer]['failCount'] -= 1 - self.peerData[peer]['lastConnectTime'] = self._utils.getEpoch() - self._core.setAddressInfo(peer, 'lastConnect', self._utils.getEpoch()) - return retData - - def peerStatusTaken(self, peer, status): - ''' - Returns if we are currently performing a specific action with a peer. - ''' - try: - if self.peerStatus[peer] == status: - return True - except KeyError: - pass - return False - - def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'): - if os.path.exists('static-data/header.txt'): - with open('static-data/header.txt', 'rb') as file: - # only to stdout, not file or log or anything - print(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n')) - logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n') - -shouldRun = False -debug = True -developmentMode = False -if config.get('general.dev_mode', True): - developmentMode = True -try: - if sys.argv[1] == 'run': - shouldRun = True -except IndexError: - pass -if shouldRun: - try: - OnionrCommunicate(debug, developmentMode) - except KeyboardInterrupt: - sys.exit(1) - pass diff --git a/onionr/communicator2.py b/onionr/communicator2.py index 01103bb5..38ba2692 100755 --- a/onionr/communicator2.py +++ b/onionr/communicator2.py @@ -19,31 +19,50 @@ You should have received a copy of the GNU General Public License along with this program. If not, see . ''' -import sys, os, core, config, json, onionrblockapi as block, requests, time, logger, threading, onionrplugins as plugins, base64 -import onionrexceptions +import sys, os, core, config, json, requests, time, logger, threading, base64, onionr +import onionrexceptions, onionrpeers, onionrevents as events, onionrplugins as plugins, onionrblockapi as block +import onionrdaemontools from defusedxml import minidom class OnionrCommunicatorDaemon: def __init__(self, debug, developmentMode): - logger.warn('New (unstable) communicator is being used.') + # list of timer instances self.timers = [] - self._core = core.Core(torPort=sys.argv[2]) + + # initalize core with Tor socks port being 3rd argument + self.proxyPort = sys.argv[2] + self._core = core.Core(torPort=self.proxyPort) + + # intalize NIST beacon salt and time self.nistSaltTimestamp = 0 self.powSalt = 0 + + self.blockToUpload = '' + + # loop time.sleep delay in seconds self.delay = 1 - self.proxyPort = sys.argv[2] + + # time app started running for info/statistics purposes self.startTime = self._core._utils.getEpoch() + # lists of connected peers and peers we know we can't reach currently self.onlinePeers = [] self.offlinePeers = [] + self.peerProfiles = [] # list of peer's profiles (onionrpeers.PeerProfile instances) + # amount of threads running by name, used to prevent too many self.threadCounts = {} + # set true when shutdown command recieved self.shutdown = False - self.blockQueue = [] # list of new blocks to download - + # list of new blocks to download, added to when new block lists are fetched from peers + self.blockQueue = [] + + # list of blocks currently downloading, avoid s + self.currentDownloading = [] + # Clear the daemon queue for any dead messages if os.path.exists(self._core.queueDB): self._core.clearDaemonQueue() @@ -51,105 +70,172 @@ class OnionrCommunicatorDaemon: # Loads in and starts the enabled plugins plugins.reload() - # Print nice header thing :) - if config.get('general.display_header', True): - self.header() + # daemon tools are misc daemon functions, e.g. announce to online peers + # intended only for use by OnionrCommunicatorDaemon + #self.daemonTools = onionrdaemontools.DaemonTools(self) + self.daemonTools = onionrdaemontools.DaemonTools(self) if debug or developmentMode: OnionrCommunicatorTimers(self, self.heartbeat, 10) - self.getOnlinePeers() + # Print nice header thing :) + if config.get('general.display_header', True) and not self.shutdown: + self.header() + + # Set timers, function reference, seconds + # requiresPeer True means the timer function won't fire if we have no connected peers + # TODO: make some of these timer counts configurable OnionrCommunicatorTimers(self, self.daemonCommands, 5) OnionrCommunicatorTimers(self, self.detectAPICrash, 5) - OnionrCommunicatorTimers(self, self.getOnlinePeers, 60) - OnionrCommunicatorTimers(self, self.lookupBlocks, 7) - OnionrCommunicatorTimers(self, self.getBlocks, 10) - OnionrCommunicatorTimers(self, self.clearOfflinePeer, 120) - OnionrCommunicatorTimers(self, self.lookupKeys, 125) - OnionrCommunicatorTimers(self, self.lookupAdders, 600) + peerPoolTimer = OnionrCommunicatorTimers(self, self.getOnlinePeers, 60) + OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1) + OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True) + OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58) + OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True) + OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True) + announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1) + cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True) + + # set loop to execute instantly to load up peer pool (replaced old pool init wait) + peerPoolTimer.count = (peerPoolTimer.frequency - 1) + cleanupTimer.count = (cleanupTimer.frequency - 60) + announceTimer.count = (cleanupTimer.frequency - 60) + + # Main daemon loop, mainly for calling timers, don't do any complex operations here to avoid locking + try: + while not self.shutdown: + for i in self.timers: + if self.shutdown: + break + i.processTimer() + time.sleep(self.delay) + except KeyboardInterrupt: + self.shutdown = True + pass - # Main daemon loop, mainly for calling timers, do not do any complex operations here - while not self.shutdown: - for i in self.timers: - i.processTimer() - time.sleep(self.delay) logger.info('Goodbye.') - + self._core._utils.localCommand('shutdown') + time.sleep(0.5) + def lookupKeys(self): '''Lookup new keys''' - logger.info('LOOKING UP NEW KEYS') + logger.debug('Looking up new keys...') tryAmount = 1 for i in range(tryAmount): + # Download new key list from random online peers peer = self.pickOnlinePeer() newKeys = self.peerAction(peer, action='kex') self._core._utils.mergeKeys(newKeys) - self.decrementThreadCount('lookupKeys') return - + def lookupAdders(self): '''Lookup new peer addresses''' logger.info('LOOKING UP NEW ADDRESSES') tryAmount = 1 for i in range(tryAmount): + # Download new peer address list from random online peers peer = self.pickOnlinePeer() newAdders = self.peerAction(peer, action='pex') self._core._utils.mergeAdders(newAdders) - - self.decrementThreadCount('lookupKeys') + self.decrementThreadCount('lookupAdders') + def lookupBlocks(self): - '''Lookup new blocks''' + '''Lookup new blocks & add them to download queue''' logger.info('LOOKING UP NEW BLOCKS') tryAmount = 2 newBlocks = '' + existingBlocks = self._core.getBlockList() + triedPeers = [] # list of peers we've tried this time around for i in range(tryAmount): - peer = self.pickOnlinePeer() - newDBHash = self.peerAction(peer, 'getDBHash') + peer = self.pickOnlinePeer() # select random online peer + # if we've already tried all the online peers this time around, stop + if peer in triedPeers: + if len(self.onlinePeers) == len(triedPeers): + break + else: + continue + newDBHash = self.peerAction(peer, 'getDBHash') # get their db hash if newDBHash == False: - continue + continue # if request failed, restart loop (peer is added to offline peers automatically) + triedPeers.append(peer) if newDBHash != self._core.getAddressInfo(peer, 'DBHash'): self._core.setAddressInfo(peer, 'DBHash', newDBHash) - newBlocks = self.peerAction(peer, 'getBlockHashes') + try: + newBlocks = self.peerAction(peer, 'getBlockHashes') + except Exception as error: + logger.warn("could not get new blocks with " + peer, error=error) + newBlocks = False if newBlocks != False: # if request was a success for i in newBlocks.split('\n'): if self._core._utils.validateHash(i): # if newline seperated string is valid hash - if not os.path.exists('data/blocks/' + i + '.db'): + if not i in existingBlocks: # if block does not exist on disk and is not already in block queue - if i not in self.blockQueue: + if i not in self.blockQueue and not self._core._blacklist.inBlacklist(i): self.blockQueue.append(i) self.decrementThreadCount('lookupBlocks') return def getBlocks(self): - '''download new blocks''' + '''download new blocks in queue''' for blockHash in self.blockQueue: - logger.info("ATTEMPTING TO DOWNLOAD " + blockHash) - content = self.peerAction(self.pickOnlinePeer(), 'getData', data=blockHash) + if self.shutdown: + break + if blockHash in self.currentDownloading: + logger.debug('ALREADY DOWNLOADING ' + blockHash) + continue + if blockHash in self._core.getBlockList(): + logger.debug('%s is already saved' % (blockHash,)) + self.blockQueue.remove(blockHash) + continue + self.currentDownloading.append(blockHash) + logger.info("Attempting to download %s..." % blockHash) + peerUsed = self.pickOnlinePeer() + content = self.peerAction(peerUsed, 'getData', data=blockHash) # block content from random peer (includes metadata) if content != False: try: content = content.encode() except AttributeError: pass - content = base64.b64decode(content) - if self._core._crypto.sha3Hash(content) == blockHash: + content = base64.b64decode(content) # content is base64 encoded in transport + realHash = self._core._crypto.sha3Hash(content) + try: + realHash = realHash.decode() # bytes on some versions for some reason + except AttributeError: + pass + if realHash == blockHash: content = content.decode() # decode here because sha3Hash needs bytes above metas = self._core._utils.getBlockMetadataFromData(content) # returns tuple(metadata, meta), meta is also in metadata metadata = metas[0] - meta = metas[1] - if self._core._utils.validateMetadata(metadata): - if self._core._crypto.verifyPow(metas[2], metadata): + #meta = metas[1] + if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce + if self._core._crypto.verifyPow(content): # check if POW is enough/correct logger.info('Block passed proof, saving.') self._core.setData(content) self._core.addToBlockDB(blockHash, dataSaved=True) + self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database else: logger.warn('POW failed for block ' + blockHash) else: - logger.warn('Metadata for ' + blockHash + ' is invalid.') - self.blockQueue.remove(blockHash) + if self._core._blacklist.inBlacklist(realHash): + logger.warn('%s is blacklisted' % (realHash,)) + else: + logger.warn('Metadata for ' + blockHash + ' is invalid.') + self._core._blacklist.addToDB(blockHash) else: - logger.warn('Block hash validation failed for ' + blockHash + ' got ' + self._core._crypto.sha3Hash(content)) + # if block didn't meet expected hash + tempHash = self._core._crypto.sha3Hash(content) # lazy hack, TODO use var + try: + tempHash = tempHash.decode() + except AttributeError: + pass + # Punish peer for sharing invalid block (not always malicious, but is bad regardless) + onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50) + logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) + self.blockQueue.remove(blockHash) # remove from block queue both if success or false + self.currentDownloading.remove(blockHash) self.decrementThreadCount('getBlocks') return @@ -176,7 +262,7 @@ class OnionrCommunicatorDaemon: self.threadCounts[threadName] -= 1 except KeyError: pass - + def clearOfflinePeer(self): '''Removes the longest offline peer to retry later''' try: @@ -184,20 +270,36 @@ class OnionrCommunicatorDaemon: except IndexError: pass else: - logger.debug('removed ' + removed + ' from offline list to try them again.') + logger.debug('Removed ' + removed + ' from offline list, will try them again.') self.decrementThreadCount('clearOfflinePeer') def getOnlinePeers(self): - '''Manages the self.onlinePeers attribute list''' + '''Manages the self.onlinePeers attribute list, connects to more peers if we have none connected''' + logger.info('Refreshing peer pool.') - maxPeers = 4 + maxPeers = 6 needed = maxPeers - len(self.onlinePeers) for i in range(needed): - self.connectNewPeer() + if len(self.onlinePeers) == 0: + self.connectNewPeer(useBootstrap=True) + else: + self.connectNewPeer() + if self.shutdown: + break + else: + if len(self.onlinePeers) == 0: + logger.warn('Could not connect to any peer.') self.decrementThreadCount('getOnlinePeers') - def connectNewPeer(self, peer=''): + def addBootstrapListToPeerList(self, peerList): + '''Add the bootstrap list to the peer list (no duplicates)''' + for i in self._core.bootstrapList: + if i not in peerList and i not in self.offlinePeers and i != self._core.hsAddress: + peerList.append(i) + self._core.addAddress(i) + + def connectNewPeer(self, peer='', useBootstrap=False): '''Adds a new random online peer to self.onlinePeers''' retData = False tried = self.offlinePeers @@ -208,33 +310,53 @@ class OnionrCommunicatorDaemon: raise onionrexceptions.InvalidAddress('Will not attempt connection test to invalid address') else: peerList = self._core.listAdders() + + peerList = onionrpeers.getScoreSortedPeerList(self._core) - if len(peerList) == 0: - peerList.extend(self._core.bootstrapList) + if len(peerList) == 0 or useBootstrap: + # Avoid duplicating bootstrap addresses in peerList + self.addBootstrapListToPeerList(peerList) for address in peerList: + if not config.get('tor.v3onions') and len(address) == 62: + continue if len(address) == 0 or address in tried or address in self.onlinePeers: continue + if self.shutdown: + return if self.peerAction(address, 'ping') == 'pong!': - logger.info('connected to ' + address) - self.onlinePeers.append(address) + logger.info('Connected to ' + address) + time.sleep(0.1) + if address not in self.onlinePeers: + self.onlinePeers.append(address) retData = address + + # add peer to profile list if they're not in it + for profile in self.peerProfiles: + if profile.address == address: + break + else: + self.peerProfiles.append(onionrpeers.PeerProfiles(address, self._core)) break else: tried.append(address) - logger.debug('failed to connect to ' + address) - else: - if len(self.onlinePeers) == 0: - logger.warn('Could not connect to any peer') + logger.debug('Failed to connect to ' + address) return retData - + + def peerCleanup(self): + '''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)''' + onionrpeers.peerCleanup(self._core) + self.decrementThreadCount('peerCleanup') + def printOnlinePeers(self): '''logs online peer list''' if len(self.onlinePeers) == 0: logger.warn('No online peers') - return - for i in self.onlinePeers: - logger.info(self.onlinePeers[i]) + else: + logger.info('Online peers:') + for i in self.onlinePeers: + score = str(self.getPeerProfileInstance(i).score) + logger.info(i + ', score: ' + score) def peerAction(self, peer, action, data=''): '''Perform a get request to a peer''' @@ -244,13 +366,33 @@ class OnionrCommunicatorDaemon: url = 'http://' + peer + '/public/?action=' + action if len(data) > 0: url += '&data=' + data + + self._core.setAddressInfo(peer, 'lastConnectAttempt', self._core._utils.getEpoch()) # mark the time we're trying to request this peer + retData = self._core._utils.doGetRequest(url, port=self.proxyPort) + # if request failed, (error), mark peer offline if retData == False: try: + self.getPeerProfileInstance(peer).addScore(-10) self.onlinePeers.remove(peer) self.getOnlinePeers() # Will only add a new peer to pool if needed except ValueError: pass + else: + self._core.setAddressInfo(peer, 'lastConnect', self._core._utils.getEpoch()) + self.getPeerProfileInstance(peer).addScore(1) + return retData + + def getPeerProfileInstance(self, peer): + '''Gets a peer profile instance from the list of profiles, by address name''' + for i in self.peerProfiles: + # if the peer's profile is already loaded, return that + if i.address == peer: + retData = i + break + else: + # if the peer's profile is not loaded, return a new one. connectNewPeer adds it the list on connect + retData = onionrpeers.PeerProfiles(peer, self._core) return retData def heartbeat(self): @@ -264,6 +406,8 @@ class OnionrCommunicatorDaemon: cmd = self._core.daemonQueue() if cmd is not False: + events.event('daemon_command', onionr = None, data = {'cmd' : cmd}) + if cmd[0] == 'shutdown': self.shutdown = True elif cmd[0] == 'announceNode': @@ -273,23 +417,50 @@ class OnionrCommunicatorDaemon: open('data/.runcheck', 'w+').close() elif cmd[0] == 'connectedPeers': self.printOnlinePeers() + elif cmd[0] == 'kex': + for i in self.timers: + if i.timerFunction.__name__ == 'lookupKeys': + i.count = (i.frequency - 1) + elif cmd[0] == 'pex': + for i in self.timers: + if i.timerFunction.__name__ == 'lookupAdders': + i.count = (i.frequency - 1) + elif cmd[0] == 'uploadBlock': + self.blockToUpload = cmd[1] + threading.Thread(target=self.uploadBlock).start() else: logger.info('Recieved daemonQueue command:' + cmd[0]) + self.decrementThreadCount('daemonCommands') + def uploadBlock(self): + '''Upload our block to a few peers''' + # when inserting a block, we try to upload it to a few peers to add some deniability + triedPeers = [] + if not self._core._utils.validateHash(self.blockToUpload): + logger.warn('Requested to upload invalid block') + return + for i in range(max(len(self.onlinePeers), 2)): + peer = self.pickOnlinePeer() + if peer in triedPeers: + continue + triedPeers.append(peer) + url = 'http://' + peer + '/public/upload/' + data = {'block': block.Block(self.blockToUpload).getRaw()} + proxyType = '' + if peer.endswith('.onion'): + proxyType = 'tor' + elif peer.endswith('.i2p'): + proxyType = 'i2p' + logger.info("Uploading block") + self._core._utils.doPostRequest(url, data=data, proxyType=proxyType) + def announce(self, peer): - '''Announce to peers''' - announceCount = 0 - announceAmount = 2 - for peer in self._core.listAdders(): - announceCount += 1 - if self.peerAction(peer, 'announce', self._core.hsAdder) == 'Success': - logger.info('Successfully introduced node to ' + peer) - break - else: - if announceCount == announceAmount: - logger.warn('Could not introduce node. Try again soon') - break + '''Announce to peers our address''' + if self.daemonTools.announceNode(): + logger.info('Successfully introduced node to ' + peer) + else: + logger.warn('Could not introduce node.') def detectAPICrash(self): '''exit if the api server crashes/stops''' @@ -300,6 +471,7 @@ class OnionrCommunicatorDaemon: time.sleep(1) else: # This executes if the api is NOT detected to be running + events.event('daemon_crash', onionr = None, data = {}) logger.error('Daemon detected API crash (or otherwise unable to reach API after long time), stopping...') self.shutdown = True self.decrementThreadCount('detectAPICrash') @@ -308,15 +480,16 @@ class OnionrCommunicatorDaemon: if os.path.exists('static-data/header.txt'): with open('static-data/header.txt', 'rb') as file: # only to stdout, not file or log or anything - print(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n')) + sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('V', onionr.ONIONR_VERSION)) logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n') class OnionrCommunicatorTimers: - def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5): + def __init__(self, daemonInstance, timerFunction, frequency, makeThread=True, threadAmount=1, maxThreads=5, requiresPeer=False): self.timerFunction = timerFunction self.frequency = frequency self.threadAmount = threadAmount self.makeThread = makeThread + self.requiresPeer = requiresPeer self.daemonInstance = daemonInstance self.maxThreads = maxThreads self._core = self.daemonInstance._core @@ -325,25 +498,33 @@ class OnionrCommunicatorTimers: self.count = 0 def processTimer(self): - self.count += 1 + + # mark how many instances of a thread we have (decremented at thread end) try: self.daemonInstance.threadCounts[self.timerFunction.__name__] except KeyError: self.daemonInstance.threadCounts[self.timerFunction.__name__] = 0 + # execute thread if it is time, and we are not missing *required* online peer if self.count == self.frequency: - if self.makeThread: - for i in range(self.threadAmount): - if self.daemonInstance.threadCounts[self.timerFunction.__name__] >= self.maxThreads: - logger.warn(self.timerFunction.__name__ + ' has too many current threads to start anymore.') - else: - self.daemonInstance.threadCounts[self.timerFunction.__name__] += 1 - newThread = threading.Thread(target=self.timerFunction) - newThread.start() + try: + if self.requiresPeer and len(self.daemonInstance.onlinePeers) == 0: + raise onionrexceptions.OnlinePeerNeeded + except onionrexceptions.OnlinePeerNeeded: + pass else: - self.timerFunction() - self.count = 0 - + if self.makeThread: + for i in range(self.threadAmount): + if self.daemonInstance.threadCounts[self.timerFunction.__name__] >= self.maxThreads: + logger.warn(self.timerFunction.__name__ + ' has too many current threads to start anymore.') + else: + self.daemonInstance.threadCounts[self.timerFunction.__name__] += 1 + newThread = threading.Thread(target=self.timerFunction) + newThread.start() + else: + self.timerFunction() + self.count = -1 # negative 1 because its incremented at bottom + self.count += 1 shouldRun = False debug = True @@ -358,8 +539,5 @@ except IndexError: if shouldRun: try: OnionrCommunicatorDaemon(debug, developmentMode) - except KeyboardInterrupt: - sys.exit(1) - pass except Exception as e: logger.error('Error occured in Communicator', error = e, timestamp = False) diff --git a/onionr/core.py b/onionr/core.py index fe1147b7..9d97e3f1 100644 --- a/onionr/core.py +++ b/onionr/core.py @@ -21,7 +21,8 @@ import sqlite3, os, sys, time, math, base64, tarfile, getpass, simplecrypt, hash from onionrblockapi import Block import onionrutils, onionrcrypto, onionrproofs, onionrevents as events, onionrexceptions, onionrvalues - +import onionrblacklist +import dbcreator if sys.version_info < (3, 6): try: import sha3 @@ -40,11 +41,15 @@ class Core: self.blockDB = 'data/blocks.db' self.blockDataLocation = 'data/blocks/' self.addressDB = 'data/address.db' - self.hsAdder = '' - + self.hsAddress = '' self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt' self.bootstrapList = [] self.requirements = onionrvalues.OnionrValues() + self.torPort = torPort + self.dataNonceFile = 'data/block-nonces.dat' + self.dbCreate = dbcreator.DBCreator(self) + + self.usageFile = 'data/disk-usage.txt' if not os.path.exists('data/'): os.mkdir('data/') @@ -55,7 +60,7 @@ class Core: if os.path.exists('data/hs/hostname'): with open('data/hs/hostname', 'r') as hs: - self.hsAdder = hs.read() + self.hsAddress = hs.read().strip() # Load bootstrap address list if os.path.exists(self.bootstrapFileLocation): @@ -69,6 +74,7 @@ class Core: self._utils = onionrutils.OnionrUtils(self) # Initialize the crypto object self._crypto = onionrcrypto.OnionrCrypto(self) + self._blacklist = onionrblacklist.OnionrBlackList(self) except Exception as error: logger.error('Failed to initialize core Onionr library.', error=error) @@ -76,6 +82,12 @@ class Core: sys.exit(1) return + def refreshFirstStartVars(self): + '''Hack to refresh some vars which may not be set on first start''' + if os.path.exists('data/hs/hostname'): + with open('data/hs/hostname', 'r') as hs: + self.hsAddress = hs.read().strip() + def addPeer(self, peerID, powID, name=''): ''' Adds a public key to the key database (misleading function name) @@ -123,7 +135,6 @@ class Core: for i in c.execute("SELECT * FROM adders where address = '" + address + "';"): try: if i[0] == address: - logger.warn('Not adding existing address') conn.close() return False except ValueError: @@ -156,14 +167,13 @@ class Core: conn.close() events.event('address_remove', data = {'address': address}, onionr = None) - return True else: return False def removeBlock(self, block): ''' - remove a block from this node + remove a block from this node (does not automatically blacklist) ''' if self._utils.validateHash(block): conn = sqlite3.connect(self.blockDB) @@ -180,85 +190,20 @@ class Core: def createAddressDB(self): ''' Generate the address database - - types: - 1: I2P b32 address - 2: Tor v2 (like facebookcorewwwi.onion) - 3: Tor v3 ''' - conn = sqlite3.connect(self.addressDB) - c = conn.cursor() - c.execute('''CREATE TABLE adders( - address text, - type int, - knownPeer text, - speed int, - success int, - DBHash text, - powValue text, - failure int, - lastConnect int - ); - ''') - conn.commit() - conn.close() + self.dbCreate.createAddressDB() def createPeerDB(self): ''' Generate the peer sqlite3 database and populate it with the peers table. ''' - # generate the peer database - conn = sqlite3.connect(self.peerDB) - c = conn.cursor() - c.execute('''CREATE TABLE peers( - ID text not null, - name text, - adders text, - blockDBHash text, - forwardKey text, - dateSeen not null, - bytesStored int, - trust int, - pubkeyExchanged int, - hashID text, - pow text not null); - ''') - conn.commit() - conn.close() - return + self.dbCreate.createPeerDB() def createBlockDB(self): ''' Create a database for blocks - - hash - the hash of a block - dateReceived - the date the block was recieved, not necessarily when it was created - decrypted - if we can successfully decrypt the block (does not describe its current state) - dataType - data type of the block - dataFound - if the data has been found for the block - dataSaved - if the data has been saved for the block - sig - optional signature by the author (not optional if author is specified) - author - multi-round partial sha3-256 hash of authors public key ''' - if os.path.exists(self.blockDB): - raise Exception("Block database already exists") - conn = sqlite3.connect(self.blockDB) - c = conn.cursor() - c.execute('''CREATE TABLE hashes( - hash text not null, - dateReceived int, - decrypted int, - dataType text, - dataFound int, - dataSaved int, - sig text, - author text - ); - ''') - conn.commit() - conn.close() - - return + self.dbCreate.createBlockDB() def addToBlockDB(self, newHash, selfInsert=False, dataSaved=False): ''' @@ -298,16 +243,24 @@ class Core: return data - def setData(self, data): - ''' - Set the data assciated with a hash - ''' - data = data + def _getSha3Hash(self, data): hasher = hashlib.sha3_256() if not type(data) is bytes: data = data.encode() hasher.update(data) dataHash = hasher.hexdigest() + return dataHash + + def setData(self, data): + ''' + Set the data assciated with a hash + ''' + data = data + if not type(data) is bytes: + data = data.encode() + + dataHash = self._getSha3Hash(data) + if type(dataHash) is bytes: dataHash = dataHash.decode() blockFileName = self.blockDataLocation + dataHash + '.dat' @@ -384,13 +337,13 @@ class Core: else: if retData != False: c.execute('DELETE FROM commands WHERE id=?;', (retData[3],)) - conn.commit() - conn.close() + conn.commit() + conn.close() events.event('queue_pop', data = {'data': retData}, onionr = None) return retData - + def makeDaemonDB(self): '''generate the daemon queue db''' conn = sqlite3.connect(self.queueDB) @@ -569,33 +522,15 @@ class Core: c = conn.cursor() command = (data, address) # TODO: validate key on whitelist - if key not in ('address', 'type', 'knownPeer', 'speed', 'success', 'DBHash', 'failure', 'lastConnect'): + if key not in ('address', 'type', 'knownPeer', 'speed', 'success', 'DBHash', 'failure', 'lastConnect', 'lastConnectAttempt'): raise Exception("Got invalid database key when setting address info") - c.execute('UPDATE adders SET ' + key + ' = ? WHERE address=?', command) - conn.commit() - conn.close() + else: + c.execute('UPDATE adders SET ' + key + ' = ? WHERE address=?', command) + conn.commit() + conn.close() return - def handle_direct_connection(self, data): - ''' - Handles direct messages - ''' - try: - data = json.loads(data) - - # TODO: Determine the sender, verify, etc - if ('callback' in data) and (data['callback'] is True): - # then this is a response to the message we sent earlier - self.daemonQueueAdd('checkCallbacks', json.dumps(data)) - else: - # then we should handle it and respond accordingly - self.daemonQueueAdd('incomingDirectConnection', json.dumps(data)) - except Exception as e: - logger.warn('Failed to handle incoming direct message: %s' % str(e)) - - return - - def getBlockList(self, unsaved = False): # TODO: Use unsaved + def getBlockList(self, unsaved = False): # TODO: Use unsaved?? ''' Get list of our blocks ''' @@ -604,7 +539,7 @@ class Core: if unsaved: execute = 'SELECT hash FROM hashes WHERE dataSaved != 1 ORDER BY RANDOM();' else: - execute = 'SELECT hash FROM hashes ORDER BY RANDOM();' + execute = 'SELECT hash FROM hashes ORDER BY dateReceived DESC;' rows = list() for row in c.execute(execute): for i in row: @@ -626,13 +561,16 @@ class Core: return None - def getBlocksByType(self, blockType): + def getBlocksByType(self, blockType, orderDate=True): ''' Returns a list of blocks by the type ''' conn = sqlite3.connect(self.blockDB) c = conn.cursor() - execute = 'SELECT hash FROM hashes WHERE dataType=?;' + if orderDate: + execute = 'SELECT hash FROM hashes WHERE dataType=? ORDER BY dateReceived;' + else: + execute = 'SELECT hash FROM hashes WHERE dataType=?;' args = (blockType,) rows = list() for row in c.execute(execute, args): @@ -656,9 +594,19 @@ class Core: def updateBlockInfo(self, hash, key, data): ''' sets info associated with a block + + hash - the hash of a block + dateReceived - the date the block was recieved, not necessarily when it was created + decrypted - if we can successfully decrypt the block (does not describe its current state) + dataType - data type of the block + dataFound - if the data has been found for the block + dataSaved - if the data has been saved for the block + sig - optional signature by the author (not optional if author is specified) + author - multi-round partial sha3-256 hash of authors public key + dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is ''' - if key not in ('dateReceived', 'decrypted', 'dataType', 'dataFound', 'dataSaved', 'sig', 'author'): + if key not in ('dateReceived', 'decrypted', 'dataType', 'dataFound', 'dataSaved', 'sig', 'author', 'dateClaimed'): return False conn = sqlite3.connect(self.blockDB) @@ -669,27 +617,42 @@ class Core: conn.close() return True - def insertBlock(self, data, header='txt', sign=False, encryptType='', symKey='', asymPeer='', meta = {}): + def insertBlock(self, data, header='txt', sign=False, encryptType='', symKey='', asymPeer='', meta = None): ''' Inserts a block into the network encryptType must be specified to encrypt a block ''' + retData = False + # check nonce + dataNonce = self._utils.bytesToStr(self._crypto.sha3Hash(data)) try: - data.decode() - except AttributeError: - data = data.encode() + with open(self.dataNonceFile, 'r') as nonces: + if dataNonce in nonces: + return retData + except FileNotFoundError: + pass + # record nonce + with open(self.dataNonceFile, 'a') as nonceFile: + nonceFile.write(dataNonce + '\n') + + if meta is None: + meta = dict() + + if type(data) is bytes: + data = data.decode() + data = str(data) retData = '' signature = '' signer = '' metadata = {} + # metadata is full block metadata, meta is internal, user specified metadata # only use header if not set in provided meta - try: - meta['type'] - except KeyError: - meta['type'] = header # block type + if not header is None: + meta['type'] = header + meta['type'] = str(meta['type']) jsonMeta = json.dumps(meta) @@ -698,52 +661,52 @@ class Core: else: raise onionrexceptions.InvalidMetadata('encryptType must be asym or sym, or blank') + try: + data = data.encode() + except AttributeError: + pass # sign before encrypt, as unauthenticated crypto should not be a problem here if sign: - signature = self._crypto.edSign(jsonMeta + data, key=self._crypto.privKey, encodeResult=True) - signer = self._crypto.pubKeyHashID() + signature = self._crypto.edSign(jsonMeta.encode() + data, key=self._crypto.privKey, encodeResult=True) + signer = self._crypto.pubKey if len(jsonMeta) > 1000: raise onionrexceptions.InvalidMetadata('meta in json encoded form must not exceed 1000 bytes') - + # encrypt block metadata/sig/content if encryptType == 'sym': if len(symKey) < self.requirements.passwordLength: raise onionrexceptions.SecurityError('Weak encryption key') - jsonMeta = self._crypto.symmetricEncrypt(jsonMeta, key=symKey, returnEncoded=True) - data = self._crypto.symmetricEncrypt(data, key=symKey, returnEncoded=True) - signature = self._crypto.symmetricEncrypt(signature, key=symKey, returnEncoded=True) - signer = self._crypto.symmetricEncrypt(signer, key=symKey, returnEncoded=True) + jsonMeta = self._crypto.symmetricEncrypt(jsonMeta, key=symKey, returnEncoded=True).decode() + data = self._crypto.symmetricEncrypt(data, key=symKey, returnEncoded=True).decode() + signature = self._crypto.symmetricEncrypt(signature, key=symKey, returnEncoded=True).decode() + signer = self._crypto.symmetricEncrypt(signer, key=symKey, returnEncoded=True).decode() elif encryptType == 'asym': if self._utils.validatePubKey(asymPeer): - jsonMeta = self._crypto.pubKeyEncrypt(jsonMeta, asymPeer, encodedData=True) - data = self._crypto.pubKeyEncrypt(data, asymPeer, encodedData=True) - signature = self._crypto.pubKeyEncrypt(signature, asymPeer, encodedData=True) + jsonMeta = self._crypto.pubKeyEncrypt(jsonMeta, asymPeer, encodedData=True, anonymous=True).decode() + data = self._crypto.pubKeyEncrypt(data, asymPeer, encodedData=True, anonymous=True).decode() + signature = self._crypto.pubKeyEncrypt(signature, asymPeer, encodedData=True, anonymous=True).decode() + signer = self._crypto.pubKeyEncrypt(signer, asymPeer, encodedData=True, anonymous=True).decode() else: raise onionrexceptions.InvalidPubkey(asymPeer + ' is not a valid base32 encoded ed25519 key') - powProof = onionrproofs.POW(data) - - # wait for proof to complete - powToken = powProof.waitForResult() - - powToken = base64.b64encode(powToken[1]) - try: - powToken = powToken.decode() - except AttributeError: - pass - # compile metadata metadata['meta'] = jsonMeta metadata['sig'] = signature metadata['signer'] = signer - metadata['powRandomToken'] = powToken metadata['time'] = str(self._utils.getEpoch()) + + # send block data (and metadata) to POW module to get tokenized block data + proof = onionrproofs.POW(metadata, data) + payload = proof.waitForResult() + if payload != False: + retData = self.setData(payload) + self.addToBlockDB(retData, selfInsert=True, dataSaved=True) + self.setBlockType(retData, meta['type']) + self.daemonQueueAdd('uploadBlock', retData) - payload = json.dumps(metadata).encode() + b'\n' + data - retData = self.setData(payload) - self.addToBlockDB(retData, selfInsert=True, dataSaved=True) - + if retData != False: + events.event('insertBlock', onionr = None, threaded = False) return retData def introduceNode(self): diff --git a/onionr/dbcreator.py b/onionr/dbcreator.py new file mode 100644 index 00000000..19a9a7bd --- /dev/null +++ b/onionr/dbcreator.py @@ -0,0 +1,109 @@ +''' + Onionr - P2P Anonymous Data Storage & Sharing + + DBCreator, creates sqlite3 databases used by Onionr +''' +''' + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' +import sqlite3, os +class DBCreator: + def __init__(self, coreInst): + self.core = coreInst + + def createAddressDB(self): + ''' + Generate the address database + + types: + 1: I2P b32 address + 2: Tor v2 (like facebookcorewwwi.onion) + 3: Tor v3 + ''' + conn = sqlite3.connect(self.core.addressDB) + c = conn.cursor() + c.execute('''CREATE TABLE adders( + address text, + type int, + knownPeer text, + speed int, + success int, + DBHash text, + powValue text, + failure int, + lastConnect int, + lastConnectAttempt int, + trust int + ); + ''') + conn.commit() + conn.close() + + def createPeerDB(self): + ''' + Generate the peer sqlite3 database and populate it with the peers table. + ''' + # generate the peer database + conn = sqlite3.connect(self.core.peerDB) + c = conn.cursor() + c.execute('''CREATE TABLE peers( + ID text not null, + name text, + adders text, + blockDBHash text, + forwardKey text, + dateSeen not null, + bytesStored int, + trust int, + pubkeyExchanged int, + hashID text, + pow text not null); + ''') + conn.commit() + conn.close() + return + + def createBlockDB(self): + ''' + Create a database for blocks + + hash - the hash of a block + dateReceived - the date the block was recieved, not necessarily when it was created + decrypted - if we can successfully decrypt the block (does not describe its current state) + dataType - data type of the block + dataFound - if the data has been found for the block + dataSaved - if the data has been saved for the block + sig - optional signature by the author (not optional if author is specified) + author - multi-round partial sha3-256 hash of authors public key + dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is + ''' + if os.path.exists(self.core.blockDB): + raise Exception("Block database already exists") + conn = sqlite3.connect(self.core.blockDB) + c = conn.cursor() + c.execute('''CREATE TABLE hashes( + hash text not null, + dateReceived int, + decrypted int, + dataType text, + dataFound int, + dataSaved int, + sig text, + author text, + dateClaimed int + ); + ''') + conn.commit() + conn.close() + return \ No newline at end of file diff --git a/onionr/logger.py b/onionr/logger.py index c915f2f9..1c299054 100644 --- a/onionr/logger.py +++ b/onionr/logger.py @@ -123,18 +123,18 @@ def get_file(): return _outputfile -def raw(data): +def raw(data, fd = sys.stdout): ''' Outputs raw data to console without formatting ''' if get_settings() & OUTPUT_TO_CONSOLE: - print(data) + ts = fd.write('%s\n' % data) if get_settings() & OUTPUT_TO_FILE: with open(_outputfile, "a+") as f: f.write(colors.filter(data) + '\n') -def log(prefix, data, color = '', timestamp=True): +def log(prefix, data, color = '', timestamp=True, fd = sys.stdout, prompt = True): ''' Logs the data prefix : The prefix to the output @@ -145,11 +145,11 @@ def log(prefix, data, color = '', timestamp=True): if timestamp: curTime = time.strftime("%m-%d %H:%M:%S") + ' ' - output = colors.reset + str(color) + '[' + colors.bold + str(prefix) + colors.reset + str(color) + '] ' + curTime + str(data) + colors.reset + output = colors.reset + str(color) + ('[' + colors.bold + str(prefix) + colors.reset + str(color) + '] ' if prompt is True else '') + curTime + str(data) + colors.reset if not get_settings() & USE_ANSI: output = colors.filter(output) - raw(output) + raw(output, fd = fd) def readline(message = ''): ''' @@ -201,31 +201,37 @@ def confirm(default = 'y', message = 'Are you sure %s? '): return default == 'y' # debug: when there is info that could be useful for debugging purposes only -def debug(data, timestamp=True): +def debug(data, error = None, timestamp = True, prompt = True): if get_level() <= LEVEL_DEBUG: - log('/', data, timestamp=timestamp) + log('/', data, timestamp=timestamp, prompt = prompt) + if not error is None: + debug('Error: ' + str(error) + parse_error()) # info: when there is something to notify the user of, such as the success of a process -def info(data, timestamp=False): +def info(data, timestamp = False, prompt = True): if get_level() <= LEVEL_INFO: - log('+', data, colors.fg.green, timestamp=timestamp) + log('+', data, colors.fg.green, timestamp = timestamp, prompt = prompt) # warn: when there is a potential for something bad to happen -def warn(data, timestamp=True): +def warn(data, error = None, timestamp = True, prompt = True): + if not error is None: + debug('Error: ' + str(error) + parse_error()) if get_level() <= LEVEL_WARN: - log('!', data, colors.fg.orange, timestamp=timestamp) + log('!', data, colors.fg.orange, timestamp = timestamp, prompt = prompt) # error: when only one function, module, or process of the program encountered a problem and must stop -def error(data, error=None, timestamp=True): +def error(data, error = None, timestamp = True, prompt = True): if get_level() <= LEVEL_ERROR: - log('-', data, colors.fg.red, timestamp=timestamp) + log('-', data, colors.fg.red, timestamp = timestamp, fd = sys.stderr, prompt = prompt) if not error is None: debug('Error: ' + str(error) + parse_error()) # fatal: when the something so bad has happened that the program must stop -def fatal(data, timestamp=True): +def fatal(data, error = None, timestamp=True, prompt = True): + if not error is None: + debug('Error: ' + str(error) + parse_error()) if get_level() <= LEVEL_FATAL: - log('#', data, colors.bg.red + colors.fg.green + colors.bold, timestamp=timestamp) + log('#', data, colors.bg.red + colors.fg.green + colors.bold, timestamp=timestamp, fd = sys.stderr, prompt = prompt) # returns a formatted error message def parse_error(): diff --git a/onionr/netcontroller.py b/onionr/netcontroller.py index 7ad74cc0..3749ce7a 100644 --- a/onionr/netcontroller.py +++ b/onionr/netcontroller.py @@ -18,7 +18,7 @@ along with this program. If not, see . ''' -import subprocess, os, random, sys, logger, time, signal +import subprocess, os, random, sys, logger, time, signal, config from onionrblockapi import Block class NetController: @@ -33,6 +33,7 @@ class NetController: self.hsPort = hsPort self._torInstnace = '' self.myID = '' + config.reload() ''' if os.path.exists(self.torConfigLocation): torrc = open(self.torConfigLocation, 'r') @@ -47,11 +48,15 @@ class NetController: ''' Generate a torrc file for our tor instance ''' - + hsVer = '# v2 onions' + if config.get('tor.v3onions'): + hsVer = 'HiddenServiceVersion 3' + logger.info('Using v3 onions :)') if os.path.exists(self.torConfigLocation): os.remove(self.torConfigLocation) torrcData = '''SocksPort ''' + str(self.socksPort) + ''' HiddenServiceDir data/hs/ +\n''' + hsVer + '''\n HiddenServicePort 80 127.0.0.1:''' + str(self.hsPort) + ''' DataDirectory data/tordata/ ''' @@ -97,10 +102,10 @@ DataDirectory data/tordata/ elif 'Opening Socks listener' in line.decode(): logger.debug(line.decode().replace('\n', '')) else: - logger.fatal('Failed to start Tor. Try killing any other Tor processes owned by this user.') + logger.fatal('Failed to start Tor. Maybe a stray instance of Tor used by Onionr is still running?') return False except KeyboardInterrupt: - logger.fatal("Got keyboard interrupt") + logger.fatal("Got keyboard interrupt.") return False logger.debug('Finished starting Tor.', timestamp=True) diff --git a/onionr/onionr.py b/onionr/onionr.py index 914dded9..1736c3f9 100755 --- a/onionr/onionr.py +++ b/onionr/onionr.py @@ -25,7 +25,7 @@ import sys if sys.version_info[0] == 2 or sys.version_info[1] < 5: print('Error, Onionr requires Python 3.4+') sys.exit(1) -import os, base64, random, getpass, shutil, subprocess, requests, time, platform, datetime, re, json, getpass +import os, base64, random, getpass, shutil, subprocess, requests, time, platform, datetime, re, json, getpass, sqlite3 from threading import Thread import api, core, config, logger, onionrplugins as plugins, onionrevents as events import onionrutils @@ -40,9 +40,9 @@ except ImportError: raise Exception("You need the PySocks module (for use with socks5 proxy to use Tor)") ONIONR_TAGLINE = 'Anonymous P2P Platform - GPLv3 - https://Onionr.VoidNet.Tech' -ONIONR_VERSION = '0.1.0' # for debugging and stuff +ONIONR_VERSION = '0.2.0' # for debugging and stuff ONIONR_VERSION_TUPLE = tuple(ONIONR_VERSION.split('.')) # (MAJOR, MINOR, VERSION) -API_VERSION = '3' # increments of 1; only change when something fundemental about how the API works changes. This way other nodes knows how to communicate without learning too much information about you. +API_VERSION = '4' # increments of 1; only change when something fundemental about how the API works changes. This way other nodes know how to communicate without learning too much information about you. class Onionr: def __init__(self): @@ -50,7 +50,6 @@ class Onionr: Main Onionr class. This is for the CLI program, and does not handle much of the logic. In general, external programs and plugins should not use this class. ''' - try: os.chdir(sys.path[0]) except FileNotFoundError: @@ -92,8 +91,6 @@ class Onionr: self.onionrCore = core.Core() self.onionrUtils = OnionrUtils(self.onionrCore) - self.userOS = platform.system() - # Handle commands self.debug = False # Whole application debugging @@ -138,18 +135,18 @@ class Onionr: self.onionrCore.createAddressDB() # Get configuration - - if not data_exists: - # Generate default config - # Hostname should only be set if different from 127.x.x.x. Important for DNS rebinding attack prevention. - if self.debug: - randomPort = 8080 - else: - while True: - randomPort = random.randint(1024, 65535) - if self.onionrUtils.checkPort(randomPort): - break - config.set('client', {'participate': True, 'hmac': base64.b16encode(os.urandom(32)).decode('utf-8'), 'port': randomPort, 'api_version': API_VERSION}, True) + if type(config.get('client.hmac')) is type(None): + config.set('client.hmac', base64.b16encode(os.urandom(32)).decode('utf-8'), savefile=True) + if type(config.get('client.port')) is type(None): + randomPort = 0 + while randomPort < 1024: + randomPort = self.onionrCore._crypto.secrets.randbelow(65535) + config.set('client.port', randomPort, savefile=True) + if type(config.get('client.participate')) is type(None): + config.set('client.participate', True, savefile=True) + if type(config.get('client.api_version')) is type(None): + config.set('client.api_version', API_VERSION, savefile=True) + self.cmds = { '': self.showHelpSuggestion, @@ -181,21 +178,15 @@ class Onionr: 'listkeys': self.listKeys, 'list-keys': self.listKeys, - 'addmsg': self.addMessage, - 'addmessage': self.addMessage, - 'add-msg': self.addMessage, - 'add-message': self.addMessage, - 'pm': self.sendEncrypt, - - 'getpms': self.getPMs, - 'get-pms': self.getPMs, - 'addpeer': self.addPeer, 'add-peer': self.addPeer, 'add-address': self.addAddress, 'add-addr': self.addAddress, 'addaddr': self.addAddress, 'addaddress': self.addAddress, + 'list-peers': self.listPeers, + + 'blacklist-block': self.banBlock, 'add-file': self.addFile, 'addfile': self.addFile, @@ -206,8 +197,20 @@ class Onionr: 'introduce': self.onionrCore.introduceNode, 'connect': self.addAddress, + 'kex': self.doKEX, + 'pex': self.doPEX, - 'getpassword': self.getWebPassword + 'ui' : self.openUI, + 'gui' : self.openUI, + + 'getpassword': self.printWebPassword, + 'get-password': self.printWebPassword, + 'getpwd': self.printWebPassword, + 'get-pwd': self.printWebPassword, + 'getpass': self.printWebPassword, + 'get-pass': self.printWebPassword, + 'getpasswd': self.printWebPassword, + 'get-passwd': self.printWebPassword } self.cmdhelp = { @@ -217,19 +220,19 @@ class Onionr: 'start': 'Starts the Onionr daemon', 'stop': 'Stops the Onionr daemon', 'stats': 'Displays node statistics', - 'getpassword': 'Displays the web password', + 'get-password': 'Displays the web password', 'enable-plugin': 'Enables and starts a plugin', 'disable-plugin': 'Disables and stops a plugin', 'reload-plugin': 'Reloads a plugin', 'create-plugin': 'Creates directory structure for a plugin', 'add-peer': 'Adds a peer to database', 'list-peers': 'Displays a list of peers', - 'add-msg': 'Broadcasts a message to the Onionr network', - 'pm': 'Adds a private message to block', - 'get-pms': 'Shows private messages sent to you', 'add-file': 'Create an Onionr block from a file', 'import-blocks': 'import blocks from the disk (Onionr is transport-agnostic!)', 'listconn': 'list connected peers', + 'kex': 'exchange keys with peers (done automatically)', + 'pex': 'exchange addresses with peers (done automatically)', + 'blacklist-block': 'deletes a block by hash and permanently removes it from your node', 'introduce': 'Introduce your node to the public Onionr network', } @@ -258,12 +261,40 @@ class Onionr: def getCommands(self): return self.cmds + def banBlock(self): + try: + ban = sys.argv[2] + except IndexError: + ban = logger.readline('Enter a block hash:') + if self.onionrUtils.validateHash(ban): + if not self.onionrCore._blacklist.inBlacklist(ban): + try: + self.onionrCore._blacklist.addToDB(ban) + self.onionrCore.removeBlock(ban) + except Exception as error: + logger.error('Could not blacklist block', error=error) + else: + logger.info('Block blacklisted') + else: + logger.warn('That block is already blacklisted') + else: + logger.error('Invalid block hash') + return + def listConn(self): self.onionrCore.daemonQueueAdd('connectedPeers') + def listPeers(self): + logger.info('Peer transport address list:') + for i in self.onionrCore.listAdders(): + logger.info(i) + def getWebPassword(self): return config.get('client.hmac') + def printWebPassword(self): + print(self.getWebPassword()) + def getHelp(self): return self.cmdhelp @@ -328,31 +359,15 @@ class Onionr: return - def sendEncrypt(self): - ''' - Create a private message and send it - ''' - - invalidID = True - while invalidID: - try: - peer = logger.readline('Peer to send to: ') - except KeyboardInterrupt: - break - else: - if self.onionrUtils.validatePubKey(peer): - invalidID = False - else: - logger.error('Invalid peer ID') - else: - try: - message = logger.readline("Enter a message: ") - except KeyboardInterrupt: - pass - else: - logger.info("Sending message to: " + logger.colors.underline + peer) - self.onionrUtils.sendPM(peer, message) + def doKEX(self): + '''make communicator do kex''' + logger.info('Sending kex to command queue...') + self.onionrCore.daemonQueueAdd('kex') + def doPEX(self): + '''make communicator do pex''' + logger.info('Sending pex to command queue...') + self.onionrCore.daemonQueueAdd('pex') def listKeys(self): ''' @@ -377,7 +392,7 @@ class Onionr: return if not '-' in newPeer: logger.info('Since no POW token was supplied for that key, one is being generated') - proof = onionrproofs.POW(newPeer) + proof = onionrproofs.DataPOW(newPeer) while True: result = proof.getResult() if result == False: @@ -428,19 +443,12 @@ class Onionr: #addedHash = Block(type = 'txt', content = messageToAdd).save() addedHash = self.onionrCore.insertBlock(messageToAdd) - if addedHash != None: + if addedHash != None and addedHash != False and addedHash != "": logger.info("Message inserted as as block %s" % addedHash) else: logger.error('Failed to insert block.', timestamp = False) return - def getPMs(self): - ''' - display PMs sent to us - ''' - - self.onionrUtils.loadPMs() - def enablePlugin(self): ''' Enables and starts the given plugin @@ -557,29 +565,37 @@ class Onionr: ''' Starts the Onionr communication daemon ''' - communicatorDaemon = './communicator.py' - if not os.environ.get("WERKZEUG_RUN_MAIN") == "true": - if self._developmentMode: - logger.warn('DEVELOPMENT MODE ENABLED (THIS IS LESS SECURE!)', timestamp = False) - net = NetController(config.get('client.port', 59496)) - logger.info('Tor is starting...') - if not net.startTor(): - sys.exit(1) - logger.info('Started .onion service: ' + logger.colors.underline + net.myID) - logger.info('Our Public key: ' + self.onionrCore._crypto.pubKey) - time.sleep(1) - try: - if config.get('general.newCommunicator', False): - communicatorDaemon = './communicator2.py' - logger.info('Using new communicator') - except NameError: - pass - #TODO make runable on windows - subprocess.Popen([communicatorDaemon, "run", str(net.socksPort)]) - logger.debug('Started communicator') - events.event('daemon_start', onionr = self) - api.API(self.debug) + communicatorDaemon = './communicator2.py' + apiThread = Thread(target=api.API, args=(self.debug,)) + apiThread.start() + try: + time.sleep(3) + except KeyboardInterrupt: + logger.info('Got keyboard interrupt') + time.sleep(1) + self.onionrUtils.localCommand('shutdown') + else: + if apiThread.isAlive(): + if self._developmentMode: + logger.warn('DEVELOPMENT MODE ENABLED (THIS IS LESS SECURE!)', timestamp = False) + net = NetController(config.get('client.port', 59496)) + logger.info('Tor is starting...') + if not net.startTor(): + sys.exit(1) + logger.info('Started .onion service: ' + logger.colors.underline + net.myID) + logger.info('Our Public key: ' + self.onionrCore._crypto.pubKey) + time.sleep(1) + #TODO make runable on windows + subprocess.Popen([communicatorDaemon, "run", str(net.socksPort)]) + logger.debug('Started communicator') + events.event('daemon_start', onionr = self) + try: + while True: + time.sleep(5) + except KeyboardInterrupt: + self.onionrCore.daemonQueueAdd('shutdown') + self.onionrUtils.localCommand('shutdown') return def killDaemon(self): @@ -592,10 +608,10 @@ class Onionr: events.event('daemon_stop', onionr = self) net = NetController(config.get('client.port', 59496)) try: - self.onionrUtils.localCommand('shutdown') - except requests.exceptions.ConnectionError: + self.onionrCore.daemonQueueAdd('shutdown') + except sqlite3.OperationalError: pass - self.onionrCore.daemonQueueAdd('shutdown') + net.killTor() except Exception as e: logger.error('Failed to shutdown daemon.', error = e, timestamp = False) @@ -618,6 +634,7 @@ class Onionr: 'Public Key' : self.onionrCore._crypto.pubKey, 'POW Token' : powToken, 'Combined' : self.onionrCore._crypto.pubKey + '-' + powToken, + 'Human readable public key' : self.onionrCore._utils.getHumanReadableID(), 'Node Address' : self.get_hostname(), # file and folder size stats @@ -735,5 +752,12 @@ class Onionr: else: logger.error('%s add-file ' % sys.argv[0], timestamp = False) + def openUI(self): + import webbrowser + url = 'http://127.0.0.1:%s/ui/index.html?timingToken=%s' % (config.get('client.port', 59496), self.onionrUtils.getTimeBypassToken()) -Onionr() + print('Opening %s ...' % url) + webbrowser.open(url, new = 1, autoraise = True) + +if __name__ == "__main__": + Onionr() diff --git a/onionr/onionrblacklist.py b/onionr/onionrblacklist.py new file mode 100644 index 00000000..86823283 --- /dev/null +++ b/onionr/onionrblacklist.py @@ -0,0 +1,115 @@ +''' + Onionr - P2P Microblogging Platform & Social network. + + This file handles maintenence of a blacklist database, for blocks and peers +''' +''' + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' +import sqlite3, os, logger +class OnionrBlackList: + def __init__(self, coreInst): + self.blacklistDB = 'data/blacklist.db' + self._core = coreInst + + if not os.path.exists(self.blacklistDB): + self.generateDB() + return + + def inBlacklist(self, data): + hashed = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(data)) + retData = False + if not hashed.isalnum(): + raise Exception("Hashed data is not alpha numeric") + + for i in self._dbExecute("select * from blacklist where hash='%s'" % (hashed,)): + retData = True # this only executes if an entry is present by that hash + break + return retData + + def _dbExecute(self, toExec): + conn = sqlite3.connect(self.blacklistDB) + c = conn.cursor() + retData = c.execute(toExec) + conn.commit() + return retData + + def deleteBeforeDate(self, date): + # TODO, delete blacklist entries before date + return + + def deleteExpired(self, dataType=0): + '''Delete expired entries''' + deleteList = [] + curTime = self._core._utils.getEpoch() + + try: + int(dataType) + except AttributeError: + raise TypeError("dataType must be int") + + for i in self._dbExecute('select * from blacklist where dataType=%s' % (dataType,)): + if i[1] == dataType: + if (curTime - i[2]) >= i[3]: + deleteList.append(i[0]) + + for thing in deleteList: + self._dbExecute("delete from blacklist where hash='%s'" % (thing,)) + + def generateDB(self): + self._dbExecute('''CREATE TABLE blacklist( + hash text primary key not null, + dataType int, + blacklistDate int, + expire int + ); + ''') + return + + def clearDB(self): + self._dbExecute('''delete from blacklist;);''') + + def getList(self): + data = self._dbExecute('select * from blacklist') + myList = [] + for i in data: + myList.append(i[0]) + return myList + + def addToDB(self, data, dataType=0, expire=0): + '''Add to the blacklist. Intended to be block hash, block data, peers, or transport addresses + 0=block + 1=peer + 2=pubkey + ''' + # we hash the data so we can remove data entirely from our node's disk + hashed = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(data)) + + if self.inBlacklist(hashed): + return + + if not hashed.isalnum(): + raise Exception("Hashed data is not alpha numeric") + try: + int(dataType) + except ValueError: + raise Exception("dataType is not int") + try: + int(expire) + except ValueError: + raise Exception("expire is not int") + #TODO check for length sanity + insert = (hashed,) + blacklistDate = self._core._utils.getEpoch() + self._dbExecute("insert into blacklist (hash, dataType, blacklistDate, expire) VALUES('%s', %s, %s, %s);" % (hashed, dataType, blacklistDate, expire)) diff --git a/onionr/onionrblockapi.py b/onionr/onionrblockapi.py index 90a32b9a..e255c0ae 100644 --- a/onionr/onionrblockapi.py +++ b/onionr/onionrblockapi.py @@ -18,7 +18,7 @@ along with this program. If not, see . ''' -import core as onionrcore, logger, config +import core as onionrcore, logger, config, onionrexceptions, nacl.exceptions import json, os, sys, datetime, base64 class Block: @@ -28,21 +28,17 @@ class Block: def __init__(self, hash = None, core = None, type = None, content = None): # take from arguments # sometimes people input a bytes object instead of str in `hash` - try: + if (not hash is None) and isinstance(hash, bytes): hash = hash.decode() - except AttributeError: - pass + self.hash = hash self.core = core self.btype = type self.bcontent = content - # initialize variables self.valid = True self.raw = None - self.powHash = None - self.powToken = None self.signed = False self.signature = None self.signedData = None @@ -50,6 +46,10 @@ class Block: self.parent = None self.bheader = {} self.bmetadata = {} + self.isEncrypted = False + self.decrypted = False + self.signer = None + self.validSig = False # handle arguments if self.getCore() is None: @@ -57,13 +57,62 @@ class Block: # update the blocks' contents if it exists if not self.getHash() is None: - if not self.update(): + if not self.core._utils.validateHash(self.hash): + logger.debug('Block hash %s is invalid.' % self.getHash()) + raise onionrexceptions.InvalidHexHash('Block hash is invalid.') + elif not self.update(): logger.debug('Failed to open block %s.' % self.getHash()) else: - logger.debug('Did not update block') + pass + #logger.debug('Did not update block.') # logic + def decrypt(self, anonymous = True, encodedData = True): + ''' + Decrypt a block, loading decrypted data into their vars + ''' + if self.decrypted: + return True + retData = False + core = self.getCore() + # decrypt data + if self.getHeader('encryptType') == 'asym': + try: + self.bcontent = core._crypto.pubKeyDecrypt(self.bcontent, anonymous=anonymous, encodedData=encodedData) + bmeta = core._crypto.pubKeyDecrypt(self.bmetadata, anonymous=anonymous, encodedData=encodedData) + try: + bmeta = bmeta.decode() + except AttributeError: + # yet another bytes fix + pass + self.bmetadata = json.loads(bmeta) + self.signature = core._crypto.pubKeyDecrypt(self.signature, anonymous=anonymous, encodedData=encodedData) + self.signer = core._crypto.pubKeyDecrypt(self.signer, anonymous=anonymous, encodedData=encodedData) + self.signedData = json.dumps(self.bmetadata) + self.bcontent.decode() + except nacl.exceptions.CryptoError: + pass + #logger.debug('Could not decrypt block. Either invalid key or corrupted data') + else: + retData = True + self.decrypted = True + else: + logger.warn('symmetric decryption is not yet supported by this API') + return retData + + def verifySig(self): + ''' + Verify if a block's signature is signed by its claimed signer + ''' + core = self.getCore() + + if core._crypto.edVerify(data=self.signedData, key=self.signer, sig=self.signature, encodedData=True): + self.validSig = True + else: + self.validSig = False + return self.validSig + + def update(self, data = None, file = None): ''' Loads data from a block in to the current object. @@ -114,14 +163,19 @@ class Block: self.raw = str(blockdata) self.bheader = json.loads(self.getRaw()[:self.getRaw().index('\n')]) self.bcontent = self.getRaw()[self.getRaw().index('\n') + 1:] - self.bmetadata = json.loads(self.getHeader('meta', None)) + if self.bheader['encryptType'] in ('asym', 'sym'): + self.bmetadata = self.getHeader('meta', None) + self.isEncrypted = True + else: + self.bmetadata = json.loads(self.getHeader('meta', None)) self.parent = self.getMetadata('parent', None) self.btype = self.getMetadata('type', None) - self.powHash = self.getMetadata('powHash', None) - self.powToken = self.getMetadata('powToken', None) self.signed = ('sig' in self.getHeader() and self.getHeader('sig') != '') + # TODO: detect if signer is hash of pubkey or not + self.signer = self.getHeader('signer', None) self.signature = self.getHeader('sig', None) - self.signedData = (None if not self.isSigned() else self.getHeader('meta') + '\n' + self.getContent()) + # signed data is jsonMeta + block content (no linebreak) + self.signedData = (None if not self.isSigned() else self.getHeader('meta') + self.getContent()) self.date = self.getCore().getBlockDate(self.getHash()) if not self.getDate() is None: @@ -174,12 +228,14 @@ class Block: else: self.hash = self.getCore().insertBlock(self.getContent(), header = self.getType(), sign = sign) self.update() + return self.getHash() else: logger.warn('Not writing block; it is invalid.') except Exception as e: logger.error('Failed to save block.', error = e, timestamp = False) - return False + + return False # getters @@ -210,7 +266,6 @@ class Block: Outputs: - (str): the type of the block ''' - return self.btype def getRaw(self): @@ -435,7 +490,7 @@ class Block: # static functions - def getBlocks(type = None, signer = None, signed = None, reverse = False, core = None): + def getBlocks(type = None, signer = None, signed = None, parent = None, reverse = False, limit = None, core = None): ''' Returns a list of Block objects based on supplied filters @@ -453,6 +508,9 @@ class Block: try: core = (core if not core is None else onionrcore.Core()) + if (not parent is None) and (not isinstance(parent, Block)): + parent = Block(hash = parent, core = core) + relevant_blocks = list() blocks = (core.getBlockList() if type is None else core.getBlocksByType(type)) @@ -467,6 +525,8 @@ class Block: if not signer is None: if isinstance(signer, (str,)): signer = [signer] + if isinstance(signer, (bytes,)): + signer = [signer.decode()] isSigner = False for key in signer: @@ -477,14 +537,23 @@ class Block: if not isSigner: relevant = False - if relevant: + if not parent is None: + blockParent = block.getParent() + + if blockParent is None: + relevant = False + else: + relevant = parent.getHash() == blockParent.getHash() + + if relevant and (limit is None or len(relevant_Blocks) <= int(limit)): relevant_blocks.append(block) + if bool(reverse): relevant_blocks.reverse() return relevant_blocks except Exception as e: - logger.debug(('Failed to get blocks: %s' % str(e)) + logger.parse_error()) + logger.debug('Failed to get blocks.', error = e) return list() @@ -496,7 +565,6 @@ class Block: - child (str/Block): the child Block to be followed - file (str/file): the file to write the content to, instead of returning it - maximumFollows (int): the maximum number of Blocks to follow - ''' # validate data and instantiate Core diff --git a/onionr/onionrcrypto.py b/onionr/onionrcrypto.py index a8b67f22..00c5a604 100644 --- a/onionr/onionrcrypto.py +++ b/onionr/onionrcrypto.py @@ -59,7 +59,7 @@ class OnionrCrypto: with open(self._keyFile, 'w') as keyfile: keyfile.write(self.pubKey + ',' + self.privKey) with open(self.keyPowFile, 'w') as keyPowFile: - proof = onionrproofs.POW(self.pubKey) + proof = onionrproofs.DataPOW(self.pubKey) logger.info('Doing necessary work to insert our public key') while True: time.sleep(0.2) @@ -114,6 +114,11 @@ class OnionrCrypto: '''Encrypt to a public key (Curve25519, taken from base32 Ed25519 pubkey)''' retVal = '' + try: + pubkey = pubkey.encode() + except AttributeError: + pass + if encodedData: encoding = nacl.encoding.Base64Encoder else: @@ -127,7 +132,11 @@ class OnionrCrypto: elif anonymous: key = nacl.signing.VerifyKey(key=pubkey, encoder=nacl.encoding.Base32Encoder).to_curve25519_public_key() anonBox = nacl.public.SealedBox(key) - retVal = anonBox.encrypt(data.encode(), encoder=encoding) + try: + data = data.encode() + except AttributeError: + pass + retVal = anonBox.encrypt(data, encoder=encoding) return retVal def pubKeyDecrypt(self, data, pubkey='', anonymous=False, encodedData=False): @@ -238,6 +247,10 @@ class OnionrCrypto: return result def sha3Hash(self, data): + try: + data = data.encode() + except AttributeError: + pass hasher = hashlib.sha3_256() hasher.update(data) return hasher.hexdigest() @@ -249,22 +262,22 @@ class OnionrCrypto: pass return nacl.hash.blake2b(data) - def verifyPow(self, blockContent, metadata): + def verifyPow(self, blockContent): ''' Verifies the proof of work associated with a block ''' retData = False - if not 'powRandomToken' in metadata: - logger.warn('No powRandomToken') - return False - dataLen = len(blockContent) - expectedHash = self.blake2bHash(base64.b64decode(metadata['powRandomToken']) + self.blake2bHash(blockContent.encode())) - difficulty = 0 try: - expectedHash = expectedHash.decode() + blockContent = blockContent.encode() + except AttributeError: + pass + + blockHash = self.sha3Hash(blockContent) + try: + blockHash = blockHash.decode() # bytes on some versions for some reason except AttributeError: pass @@ -273,7 +286,7 @@ class OnionrCrypto: mainHash = '0000000000000000000000000000000000000000000000000000000000000000'#nacl.hash.blake2b(nacl.utils.random()).decode() puzzle = mainHash[:difficulty] - if metadata['powRandomToken'][:difficulty] == puzzle: + if blockHash[:difficulty] == puzzle: # logger.debug('Validated block pow') retData = True else: diff --git a/onionr/onionrdaemontools.py b/onionr/onionrdaemontools.py new file mode 100644 index 00000000..8410cb80 --- /dev/null +++ b/onionr/onionrdaemontools.py @@ -0,0 +1,56 @@ +''' + Onionr - P2P Microblogging Platform & Social network. + + Contains the CommunicatorUtils class which contains useful functions for the communicator daemon +''' +''' + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' +import onionrexceptions, onionrpeers, onionrproofs, base64, logger +class DaemonTools: + def __init__(self, daemon): + self.daemon = daemon + self.announceCache = {} + + def announceNode(self): + '''Announce our node to our peers''' + retData = False + + # Announce to random online peers + for i in self.daemon.onlinePeers: + if not i in self.announceCache: + peer = i + break + else: + peer = self.daemon.pickOnlinePeer() + + ourID = self.daemon._core.hsAddress.strip() + + url = 'http://' + peer + '/public/announce/' + data = {'node': ourID} + + combinedNodes = ourID + peer + + if peer in self.announceCache: + data['random'] = self.announceCache[peer] + else: + proof = onionrproofs.DataPOW(combinedNodes, forceDifficulty=4) + data['random'] = base64.b64encode(proof.waitForResult()[1]) + self.announceCache[peer] = data['random'] + + logger.info('Announcing node to ' + url) + if self.daemon._core._utils.doPostRequest(url, data) == 'Success': + retData = True + self.daemon.decrementThreadCount('announceNode') + return retData \ No newline at end of file diff --git a/onionr/onionrevents.py b/onionr/onionrevents.py index 9ecc552f..26fdc093 100644 --- a/onionr/onionrevents.py +++ b/onionr/onionrevents.py @@ -33,10 +33,10 @@ def __event_caller(event_name, data = {}, onionr = None): try: call(plugins.get_plugin(plugin), event_name, data, get_pluginapi(onionr, data)) except ModuleNotFoundError as e: - logger.warn('Disabling nonexistant plugin \"' + plugin + '\"...') + logger.warn('Disabling nonexistant plugin "%s"...' % plugin) plugins.disable(plugin, onionr, stop_event = False) except Exception as e: - logger.warn('Event \"' + event_name + '\" failed for plugin \"' + plugin + '\".') + logger.warn('Event "%s" failed for plugin "%s".' % (event_name, plugin)) logger.debug(str(e)) diff --git a/onionr/onionrexceptions.py b/onionr/onionrexceptions.py index dc6485a1..b26a97d7 100644 --- a/onionr/onionrexceptions.py +++ b/onionr/onionrexceptions.py @@ -26,6 +26,10 @@ class Unknown(Exception): class Invalid(Exception): pass +# communicator exceptions +class OnlinePeerNeeded(Exception): + pass + # crypto exceptions class InvalidPubkey(Exception): pass @@ -34,8 +38,23 @@ class InvalidPubkey(Exception): class InvalidMetadata(Exception): pass +class BlacklistedBlock(Exception): + pass + +class DataExists(Exception): + pass + +class InvalidHexHash(Exception): + '''When a string is not a valid hex string of appropriate length for a hash value''' + pass + +class InvalidProof(Exception): + '''When a proof is invalid or inadequate''' + pass + # network level exceptions class MissingPort(Exception): pass + class InvalidAddress(Exception): pass diff --git a/onionr/onionrpeers.py b/onionr/onionrpeers.py index b6ed72ec..710f698d 100644 --- a/onionr/onionrpeers.py +++ b/onionr/onionrpeers.py @@ -1,7 +1,7 @@ ''' Onionr - P2P Microblogging Platform & Social network. - This file contains both the OnionrCommunicate class for communcating with peers + This file contains both the PeerProfiles class for network profiling of Onionr nodes ''' ''' This program is free software: you can redistribute it and/or modify @@ -16,4 +16,84 @@ You should have received a copy of the GNU General Public License along with this program. If not, see . -''' \ No newline at end of file +''' +import core, config, logger, sqlite3 +class PeerProfiles: + ''' + PeerProfiles + ''' + def __init__(self, address, coreInst): + self.address = address # node address + self.score = None + self.friendSigCount = 0 + self.success = 0 + self.failure = 0 + + if not isinstance(coreInst, core.Core): + raise TypeError("coreInst must be a type of core.Core") + self.coreInst = coreInst + assert isinstance(self.coreInst, core.Core) + + self.loadScore() + return + + def loadScore(self): + '''Load the node's score from the database''' + try: + self.success = int(self.coreInst.getAddressInfo(self.address, 'success')) + except (TypeError, ValueError) as e: + self.success = 0 + self.score = self.success + + def saveScore(self): + '''Save the node's score to the database''' + self.coreInst.setAddressInfo(self.address, 'success', self.score) + return + + def addScore(self, toAdd): + '''Add to the peer's score (can add negative)''' + self.score += toAdd + self.saveScore() + +def getScoreSortedPeerList(coreInst): + if not type(coreInst is core.Core): + raise TypeError('coreInst must be instance of core.Core') + + peerList = coreInst.listAdders() + peerScores = {} + + for address in peerList: + # Load peer's profiles into a list + profile = PeerProfiles(address, coreInst) + peerScores[address] = profile.score + + # Sort peers by their score, greatest to least + peerList = sorted(peerScores, key=peerScores.get, reverse=True) + return peerList + +def peerCleanup(coreInst): + '''Removes peers who have been offline too long or score too low''' + if not type(coreInst is core.Core): + raise TypeError('coreInst must be instance of core.Core') + + logger.info('Cleaning peers...') + config.reload() + + minScore = int(config.get('peers.minimumScore')) + maxPeers = int(config.get('peers.maxStoredPeers')) + + adders = getScoreSortedPeerList(coreInst) + adders.reverse() + + for address in adders: + # Remove peers that go below the negative score + if PeerProfiles(address, coreInst).score < minScore: + coreInst.removeAddress(address) + try: + coreInst._blacklist.addToDB(address, dataType=1, expire=300) + except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue + pass + logger.warn('Removed address ' + address + '.') + + # Unban probably not malicious peers TODO improve + coreInst._blacklist.deleteExpired(dataType=1) \ No newline at end of file diff --git a/onionr/onionrpluginapi.py b/onionr/onionrpluginapi.py index bfaf73e8..0120dad7 100644 --- a/onionr/onionrpluginapi.py +++ b/onionr/onionrpluginapi.py @@ -130,6 +130,22 @@ class CommandAPI: def get_commands(self): return self.pluginapi.get_onionr().getCommands() +class WebAPI: + def __init__(self, pluginapi): + self.pluginapi = pluginapi + + def register_callback(self, action, callback, scope = 'public'): + return self.pluginapi.get_onionr().api.setCallback(action, callback, scope = scope) + + def unregister_callback(self, action, scope = 'public'): + return self.pluginapi.get_onionr().api.removeCallback(action, scope = scope) + + def get_callback(self, action, scope = 'public'): + return self.pluginapi.get_onionr().api.getCallback(action, scope= scope) + + def get_callbacks(self, scope = None): + return self.pluginapi.get_onionr().api.getCallbacks(scope = scope) + class pluginapi: def __init__(self, onionr, data): self.onionr = onionr @@ -142,6 +158,7 @@ class pluginapi: self.daemon = DaemonAPI(self) self.plugins = PluginAPI(self) self.commands = CommandAPI(self) + self.web = WebAPI(self) def get_onionr(self): return self.onionr @@ -167,5 +184,8 @@ class pluginapi: def get_commandapi(self): return self.commands + def get_webapi(self): + return self.web + def is_development_mode(self): return self.get_onionr()._developmentMode diff --git a/onionr/onionrplugins.py b/onionr/onionrplugins.py index 6160838e..a699433c 100644 --- a/onionr/onionrplugins.py +++ b/onionr/onionrplugins.py @@ -63,15 +63,17 @@ def enable(name, onionr = None, start_event = True): if exists(name): enabled_plugins = get_enabled_plugins() if not name in enabled_plugins: - enabled_plugins.append(name) - config.set('plugins.enabled', enabled_plugins, True) - - events.call(get_plugin(name), 'enable', onionr) - - if start_event is True: - start(name) - - return True + try: + events.call(get_plugin(name), 'enable', onionr) + except ImportError: # Was getting import error on Gitlab CI test "data" + return False + else: + enabled_plugins.append(name) + config.set('plugins.enabled', enabled_plugins, True) + + if start_event is True: + start(name) + return True else: return False else: diff --git a/onionr/onionrproofs.py b/onionr/onionrproofs.py index 7f65bff7..b93d5724 100644 --- a/onionr/onionrproofs.py +++ b/onionr/onionrproofs.py @@ -18,20 +18,23 @@ along with this program. If not, see . ''' -import nacl.encoding, nacl.hash, nacl.utils, time, math, threading, binascii, logger, sys, base64 +import nacl.encoding, nacl.hash, nacl.utils, time, math, threading, binascii, logger, sys, base64, json import core -class POW: - def __init__(self, data, threadCount = 5): +class DataPOW: + def __init__(self, data, forceDifficulty=0, threadCount = 5): self.foundHash = False self.difficulty = 0 self.data = data self.threadCount = threadCount - dataLen = sys.getsizeof(data) - self.difficulty = math.floor(dataLen / 1000000) - if self.difficulty <= 2: - self.difficulty = 4 + if forceDifficulty == 0: + dataLen = sys.getsizeof(data) + self.difficulty = math.floor(dataLen / 1000000) + if self.difficulty <= 2: + self.difficulty = 4 + else: + self.difficulty = forceDifficulty try: self.data = self.data.encode() @@ -97,6 +100,105 @@ class POW: self.result = False return retVal + def waitForResult(self): + ''' + Returns the result only when it has been found, False if not running and not found + ''' + result = False + try: + while True: + result = self.getResult() + if not self.hashing: + break + else: + time.sleep(2) + except KeyboardInterrupt: + self.shutdown() + logger.warn('Got keyboard interrupt while waiting for POW result, stopping') + return result + +class POW: + def __init__(self, metadata, data, threadCount = 5): + self.foundHash = False + self.difficulty = 0 + self.data = data + self.metadata = metadata + self.threadCount = threadCount + + dataLen = len(data) + len(json.dumps(metadata)) + self.difficulty = math.floor(dataLen / 1000000) + if self.difficulty <= 2: + self.difficulty = 4 + + try: + self.data = self.data.encode() + except AttributeError: + pass + + logger.info('Computing POW (difficulty: %s)...' % self.difficulty) + + self.mainHash = '0' * 64 + self.puzzle = self.mainHash[0:min(self.difficulty, len(self.mainHash))] + + myCore = core.Core() + for i in range(max(1, threadCount)): + t = threading.Thread(name = 'thread%s' % i, target = self.pow, args = (True,myCore)) + t.start() + + return + + def pow(self, reporting = False, myCore = None): + startTime = math.floor(time.time()) + self.hashing = True + self.reporting = reporting + iFound = False # if current thread is the one that found the answer + answer = '' + heartbeat = 200000 + hbCount = 0 + + while self.hashing: + rand = nacl.utils.random() + #token = nacl.hash.blake2b(rand + self.data).decode() + self.metadata['powRandomToken'] = base64.b64encode(rand).decode() + payload = json.dumps(self.metadata).encode() + b'\n' + self.data + token = myCore._crypto.sha3Hash(payload) + try: + # on some versions, token is bytes + token = token.decode() + except AttributeError: + pass + if self.puzzle == token[0:self.difficulty]: + self.hashing = False + iFound = True + self.result = payload + break + + if iFound: + endTime = math.floor(time.time()) + if self.reporting: + logger.debug('Found token after %s seconds: %s' % (endTime - startTime, token), timestamp=True) + logger.debug('Random value was: %s' % base64.b64encode(rand).decode()) + + def shutdown(self): + self.hashing = False + self.puzzle = '' + + def changeDifficulty(self, newDiff): + self.difficulty = newDiff + + def getResult(self): + ''' + Returns the result then sets to false, useful to automatically clear the result + ''' + + try: + retVal = self.result + except AttributeError: + retVal = False + + self.result = False + return retVal + def waitForResult(self): ''' Returns the result only when it has been found, False if not running and not found diff --git a/onionr/onionrutils.py b/onionr/onionrutils.py index 84225b04..6d22992c 100644 --- a/onionr/onionrutils.py +++ b/onionr/onionrutils.py @@ -18,12 +18,12 @@ along with this program. If not, see . ''' # Misc functions that do not fit in the main api, but are useful -import getpass, sys, requests, os, socket, hashlib, logger, sqlite3, config, binascii, time, base64, json, glob, shutil, math, json +import getpass, sys, requests, os, socket, hashlib, logger, sqlite3, config, binascii, time, base64, json, glob, shutil, math, json, re import nacl.signing, nacl.encoding from onionrblockapi import Block import onionrexceptions from defusedxml import minidom - +import pgpwords if sys.version_info < (3, 6): try: import sha3 @@ -33,7 +33,7 @@ if sys.version_info < (3, 6): class OnionrUtils: ''' - Various useful function + Various useful functions for validating things, etc functions, connectivity ''' def __init__(self, coreInstance): self.fingerprintFile = 'data/own-fingerprint.txt' @@ -41,6 +41,9 @@ class OnionrUtils: self.timingToken = '' + self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions + self.peerProcessing = {} # dict of current peer actions: peer, actionList + config.reload() return def getTimeBypassToken(self): @@ -49,43 +52,16 @@ class OnionrUtils: with open('data/time-bypass.txt', 'r') as bypass: self.timingToken = bypass.read() except Exception as error: - logger.error('Failed to fetch time bypass token.', error=error) + logger.error('Failed to fetch time bypass token.', error = error) - def sendPM(self, pubkey, message): + return self.timingToken + + def getRoundedEpoch(self, roundS=60): ''' - High level function to encrypt a message to a peer and insert it as a block - ''' - - try: - # We sign PMs here rather than in core.insertBlock in order to mask the sender's pubkey - payload = {'sig': '', 'msg': '', 'id': self._core._crypto.pubKey} - - sign = self._core._crypto.edSign(message, self._core._crypto.privKey, encodeResult=True) - #encrypted = self._core._crypto.pubKeyEncrypt(message, pubkey, anonymous=True, encodedData=True).decode() - - payload['sig'] = sign - payload['msg'] = message - payload = json.dumps(payload) - message = payload - encrypted = self._core._crypto.pubKeyEncrypt(message, pubkey, anonymous=True, encodedData=True).decode() - - - block = self._core.insertBlock(encrypted, header='pm', sign=False) - if block == '': - logger.error('Could not send PM') - else: - logger.info('Sent PM, hash: %s' % block) - except Exception as error: - logger.error('Failed to send PM.', error=error) - - return - - def getCurrentHourEpoch(self): - ''' - Returns the current epoch, rounded down to the hour + Returns the epoch, rounded down to given seconds (Default 60) ''' epoch = self.getEpoch() - return epoch - (epoch % 3600) + return epoch - (epoch % roundS) def incrementAddressSuccess(self, address): ''' @@ -134,7 +110,8 @@ class OnionrUtils: else: logger.warn("Failed to add key") else: - logger.warn('%s pow failed' % key[0]) + pass + #logger.debug('%s pow failed' % key[0]) return retVal except Exception as error: logger.error('Failed to merge keys.', error=error) @@ -149,12 +126,16 @@ class OnionrUtils: retVal = False if newAdderList != False: for adder in newAdderList.split(','): - if not adder in self._core.listAdders(randomOrder = False) and adder.strip() != self.getMyAddress(): + adder = adder.strip() + if not adder in self._core.listAdders(randomOrder = False) and adder != self.getMyAddress() and not self._core._blacklist.inBlacklist(adder): + if not config.get('tor.v3onions') and len(adder) == 62: + continue if self._core.addAddress(adder): logger.info('Added %s to db.' % adder, timestamp = True) retVal = True else: - logger.debug('%s is either our address or already in our DB' % adder) + pass + #logger.debug('%s is either our address or already in our DB' % adder) return retVal except Exception as error: logger.error('Failed to merge adders.', error = error) @@ -176,14 +157,17 @@ class OnionrUtils: config.reload() self.getTimeBypassToken() # TODO: URL encode parameters, just as an extra measure. May not be needed, but should be added regardless. - with open('data/host.txt', 'r') as host: - hostname = host.read() + try: + with open('data/host.txt', 'r') as host: + hostname = host.read() + except FileNotFoundError: + return False payload = 'http://%s:%s/client/?action=%s&token=%s&timingToken=%s' % (hostname, config.get('client.port'), command, config.get('client.hmac'), self.timingToken) try: retData = requests.get(payload).text except Exception as error: if not silent: - logger.error('Failed to make local request (command: %s).' % command, error=error) + logger.error('Failed to make local request (command: %s):%s' % (command, error)) retData = False return retData @@ -209,20 +193,39 @@ class OnionrUtils: return pass1 + def getHumanReadableID(self, pub=''): + '''gets a human readable ID from a public key''' + if pub == '': + pub = self._core._crypto.pubKey + pub = base64.b16encode(base64.b32decode(pub)).decode() + return '-'.join(pgpwords.wordify(pub)) + def getBlockMetadataFromData(self, blockData): ''' - accepts block contents as string and returns a tuple of metadata, meta (meta being internal metadata) + accepts block contents as string, returns a tuple of metadata, meta (meta being internal metadata, which will be returned as an encrypted base64 string if it is encrypted, dict if not). + ''' + meta = {} + metadata = {} + data = blockData try: blockData = blockData.encode() except AttributeError: pass - metadata = json.loads(blockData[:blockData.find(b'\n')].decode()) - data = blockData[blockData.find(b'\n'):].decode() + try: - meta = json.loads(metadata['meta']) - except KeyError: - meta = {} + metadata = json.loads(blockData[:blockData.find(b'\n')].decode()) + except json.decoder.JSONDecodeError: + pass + else: + data = blockData[blockData.find(b'\n'):].decode() + + if not metadata['encryptType'] in ('asym', 'sym'): + try: + meta = json.loads(metadata['meta']) + except KeyError: + pass + meta = metadata['meta'] return (metadata, meta, data) def checkPort(self, port, host=''): @@ -253,6 +256,29 @@ class OnionrUtils: else: return True + def processBlockMetadata(self, blockHash): + ''' + Read metadata from a block and cache it to the block database + ''' + myBlock = Block(blockHash, self._core) + if myBlock.isEncrypted: + myBlock.decrypt() + blockType = myBlock.getMetadata('type') # we would use myBlock.getType() here, but it is bugged with encrypted blocks + try: + if len(blockType) <= 10: + self._core.updateBlockInfo(blockHash, 'dataType', blockType) + except TypeError: + pass + + def escapeAnsi(self, line): + ''' + Remove ANSI escape codes from a string with regex + + taken or adapted from: https://stackoverflow.com/a/38662876 + ''' + ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]') + return ansi_escape.sub('', line) + def getBlockDBHash(self): ''' Return a sha3_256 hash of the blocks DB @@ -309,12 +335,12 @@ class OnionrUtils: retVal = False return retVal - - def validateMetadata(metadata): + + def validateMetadata(self, metadata, blockData): '''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string''' # TODO, make this check sane sizes retData = False - + # convert to dict if it is json string if type(metadata) is str: try: @@ -334,9 +360,30 @@ class OnionrUtils: if self._core.requirements.blockMetadataLengths[i] < len(metadata[i]): logger.warn('Block metadata key ' + i + ' exceeded maximum size') break + if i == 'time': + if not self.isIntegerString(metadata[i]): + logger.warn('Block metadata time stamp is not integer string') + break else: # if metadata loop gets no errors, it does not break, therefore metadata is valid - retData = True + # make sure we do not have another block with the same data content (prevent data duplication and replay attacks) + nonce = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(blockData)) + try: + with open(self._core.dataNonceFile, 'r') as nonceFile: + if nonce in nonceFile.read(): + retData = False # we've seen that nonce before, so we can't pass metadata + raise onionrexceptions.DataExists + except FileNotFoundError: + retData = True + except onionrexceptions.DataExists: + # do not set retData to True, because nonce has been seen before + pass + else: + retData = True + if retData: + # Executes if data not seen + with open(self._core.dataNonceFile, 'a') as nonceFile: + nonceFile.write(nonce + '\n') else: logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object') @@ -357,6 +404,14 @@ class OnionrUtils: retVal = True return retVal + def isIntegerString(self, data): + '''Check if a string is a valid base10 integer''' + try: + int(data) + except ValueError: + return False + else: + return True def validateID(self, id): ''' @@ -405,52 +460,6 @@ class OnionrUtils: except: return False - def loadPMs(self): - ''' - Find, decrypt, and return array of PMs (array of dictionary, {from, text}) - ''' - blocks = Block.getBlocks(type = 'pm', core = self._core) - message = '' - sender = '' - for i in blocks: - try: - blockContent = i.getContent() - - try: - message = self._core._crypto.pubKeyDecrypt(blockContent, encodedData=True, anonymous=True) - except nacl.exceptions.CryptoError as e: - pass - else: - try: - message = message.decode() - except AttributeError: - pass - - try: - message = json.loads(message) - except json.decoder.JSONDecodeError: - pass - else: - logger.debug('Decrypted %s:' % i.getHash()) - logger.info(message["msg"]) - - signer = message["id"] - sig = message["sig"] - - if self.validatePubKey(signer): - if self._core._crypto.edVerify(message["msg"], signer, sig, encodedData=True): - logger.info("Good signature by %s" % signer) - else: - logger.warn("Bad signature by %s" % signer) - else: - logger.warn('Bad sender id: %s' % signer) - - except FileNotFoundError: - pass - except Exception as error: - logger.error('Failed to open block %s.' % i, error=error) - return - def getPeerByHashId(self, hash): ''' Return the pubkey of the user if known from the hash @@ -536,29 +545,58 @@ class OnionrUtils: '''returns epoch''' return math.floor(time.time()) - def doGetRequest(self, url, port=0, proxyType='tor'): + def doPostRequest(self, url, data={}, port=0, proxyType='tor'): ''' - Do a get request through a local tor or i2p instance + Do a POST request through a local tor or i2p instance ''' if proxyType == 'tor': if port == 0: - raise onionrexceptions.MissingPort('Socks port required for Tor HTTP get request') - proxies = {'http': 'socks5://127.0.0.1:' + str(port), 'https': 'socks5://127.0.0.1:' + str(port)} + port = self._core.torPort + proxies = {'http': 'socks4a://127.0.0.1:' + str(port), 'https': 'socks4a://127.0.0.1:' + str(port)} elif proxyType == 'i2p': proxies = {'http': 'http://127.0.0.1:4444'} else: return headers = {'user-agent': 'PyOnionr'} try: - proxies = {'http': 'socks5h://127.0.0.1:' + str(port), 'https': 'socks5h://127.0.0.1:' + str(port)} - r = requests.get(url, headers=headers, proxies=proxies, allow_redirects=False, timeout=(15, 30)) + proxies = {'http': 'socks4a://127.0.0.1:' + str(port), 'https': 'socks4a://127.0.0.1:' + str(port)} + r = requests.post(url, data=data, headers=headers, proxies=proxies, allow_redirects=False, timeout=(15, 30)) retData = r.text + except KeyboardInterrupt: + raise KeyboardInterrupt except requests.exceptions.RequestException as e: logger.debug('Error: %s' % str(e)) retData = False return retData - def getNistBeaconSalt(self, torPort=0): + def doGetRequest(self, url, port=0, proxyType='tor'): + ''' + Do a get request through a local tor or i2p instance + ''' + retData = False + if proxyType == 'tor': + if port == 0: + raise onionrexceptions.MissingPort('Socks port required for Tor HTTP get request') + proxies = {'http': 'socks4a://127.0.0.1:' + str(port), 'https': 'socks4a://127.0.0.1:' + str(port)} + elif proxyType == 'i2p': + proxies = {'http': 'http://127.0.0.1:4444'} + else: + return + headers = {'user-agent': 'PyOnionr'} + try: + proxies = {'http': 'socks4a://127.0.0.1:' + str(port), 'https': 'socks4a://127.0.0.1:' + str(port)} + r = requests.get(url, headers=headers, proxies=proxies, allow_redirects=False, timeout=(15, 30)) + retData = r.text + except KeyboardInterrupt: + raise KeyboardInterrupt + except ValueError as e: + logger.debug('Failed to make request', error = e) + except requests.exceptions.RequestException as e: + logger.debug('Error: %s' % str(e)) + retData = False + return retData + + def getNistBeaconSalt(self, torPort=0, rounding=3600): ''' Get the token for the current hour from the NIST randomness beacon ''' @@ -568,7 +606,7 @@ class OnionrUtils: except IndexError: raise onionrexceptions.MissingPort('Missing Tor socks port') retData = '' - curTime = self._core._utils.getCurrentHourEpoch + curTime = self.getRoundedEpoch(rounding) self.nistSaltTimestamp = curTime data = self.doGetRequest('https://beacon.nist.gov/rest/record/' + str(curTime), port=torPort) dataXML = minidom.parseString(data, forbid_dtd=True, forbid_entities=True, forbid_external=True) @@ -579,6 +617,19 @@ class OnionrUtils: else: self.powSalt = retData return retData + + def strToBytes(self, data): + try: + data = data.encode() + except AttributeError: + pass + return data + def bytesToStr(self, data): + try: + data = data.decode() + except AttributeError: + pass + return data def size(path='.'): ''' diff --git a/onionr/onionrvalues.py b/onionr/onionrvalues.py index b9fd4a2b..3f806702 100644 --- a/onionr/onionrvalues.py +++ b/onionr/onionrvalues.py @@ -21,4 +21,4 @@ class OnionrValues: def __init__(self): self.passwordLength = 20 - self.blockMetadataLengths = {'meta': 1000, 'sig': 88, 'signer': 64, 'time': 10, 'powRandomToken': '1000'} \ No newline at end of file + self.blockMetadataLengths = {'meta': 1000, 'sig': 200, 'signer': 200, 'time': 10, 'powRandomToken': 1000, 'encryptType': 4} #TODO properly refine values to minimum needed \ No newline at end of file diff --git a/onionr/pgpwords.py b/onionr/pgpwords.py new file mode 100644 index 00000000..6183eba9 --- /dev/null +++ b/onionr/pgpwords.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- (because 0xFF, even : "Yucatán") + +import os, re, sys + +_words = [ + ["aardvark", "adroitness"], + ["absurd", "adviser"], + ["accrue", "aftermath"], + ["acme", "aggregate"], + ["adrift", "alkali"], + ["adult", "almighty"], + ["afflict", "amulet"], + ["ahead", "amusement"], + ["aimless", "antenna"], + ["Algol", "applicant"], + ["allow", "Apollo"], + ["alone", "armistice"], + ["ammo", "article"], + ["ancient", "asteroid"], + ["apple", "Atlantic"], + ["artist", "atmosphere"], + ["assume", "autopsy"], + ["Athens", "Babylon"], + ["atlas", "backwater"], + ["Aztec", "barbecue"], + ["baboon", "belowground"], + ["backfield", "bifocals"], + ["backward", "bodyguard"], + ["banjo", "bookseller"], + ["beaming", "borderline"], + ["bedlamp", "bottomless"], + ["beehive", "Bradbury"], + ["beeswax", "bravado"], + ["befriend", "Brazilian"], + ["Belfast", "breakaway"], + ["berserk", "Burlington"], + ["billiard", "businessman"], + ["bison", "butterfat"], + ["blackjack", "Camelot"], + ["blockade", "candidate"], + ["blowtorch", "cannonball"], + ["bluebird", "Capricorn"], + ["bombast", "caravan"], + ["bookshelf", "caretaker"], + ["brackish", "celebrate"], + ["breadline", "cellulose"], + ["breakup", "certify"], + ["brickyard", "chambermaid"], + ["briefcase", "Cherokee"], + ["Burbank", "Chicago"], + ["button", "clergyman"], + ["buzzard", "coherence"], + ["cement", "combustion"], + ["chairlift", "commando"], + ["chatter", "company"], + ["checkup", "component"], + ["chisel", "concurrent"], + ["choking", "confidence"], + ["chopper", "conformist"], + ["Christmas", "congregate"], + ["clamshell", "consensus"], + ["classic", "consulting"], + ["classroom", "corporate"], + ["cleanup", "corrosion"], + ["clockwork", "councilman"], + ["cobra", "crossover"], + ["commence", "crucifix"], + ["concert", "cumbersome"], + ["cowbell", "customer"], + ["crackdown", "Dakota"], + ["cranky", "decadence"], + ["crowfoot", "December"], + ["crucial", "decimal"], + ["crumpled", "designing"], + ["crusade", "detector"], + ["cubic", "detergent"], + ["dashboard", "determine"], + ["deadbolt", "dictator"], + ["deckhand", "dinosaur"], + ["dogsled", "direction"], + ["dragnet", "disable"], + ["drainage", "disbelief"], + ["dreadful", "disruptive"], + ["drifter", "distortion"], + ["dropper", "document"], + ["drumbeat", "embezzle"], + ["drunken", "enchanting"], + ["Dupont", "enrollment"], + ["dwelling", "enterprise"], + ["eating", "equation"], + ["edict", "equipment"], + ["egghead", "escapade"], + ["eightball", "Eskimo"], + ["endorse", "everyday"], + ["endow", "examine"], + ["enlist", "existence"], + ["erase", "exodus"], + ["escape", "fascinate"], + ["exceed", "filament"], + ["eyeglass", "finicky"], + ["eyetooth", "forever"], + ["facial", "fortitude"], + ["fallout", "frequency"], + ["flagpole", "gadgetry"], + ["flatfoot", "Galveston"], + ["flytrap", "getaway"], + ["fracture", "glossary"], + ["framework", "gossamer"], + ["freedom", "graduate"], + ["frighten", "gravity"], + ["gazelle", "guitarist"], + ["Geiger", "hamburger"], + ["glitter", "Hamilton"], + ["glucose", "handiwork"], + ["goggles", "hazardous"], + ["goldfish", "headwaters"], + ["gremlin", "hemisphere"], + ["guidance", "hesitate"], + ["hamlet", "hideaway"], + ["highchair", "holiness"], + ["hockey", "hurricane"], + ["indoors", "hydraulic"], + ["indulge", "impartial"], + ["inverse", "impetus"], + ["involve", "inception"], + ["island", "indigo"], + ["jawbone", "inertia"], + ["keyboard", "infancy"], + ["kickoff", "inferno"], + ["kiwi", "informant"], + ["klaxon", "insincere"], + ["locale", "insurgent"], + ["lockup", "integrate"], + ["merit", "intention"], + ["minnow", "inventive"], + ["miser", "Istanbul"], + ["Mohawk", "Jamaica"], + ["mural", "Jupiter"], + ["music", "leprosy"], + ["necklace", "letterhead"], + ["Neptune", "liberty"], + ["newborn", "maritime"], + ["nightbird", "matchmaker"], + ["Oakland", "maverick"], + ["obtuse", "Medusa"], + ["offload", "megaton"], + ["optic", "microscope"], + ["orca", "microwave"], + ["payday", "midsummer"], + ["peachy", "millionaire"], + ["pheasant", "miracle"], + ["physique", "misnomer"], + ["playhouse", "molasses"], + ["Pluto", "molecule"], + ["preclude", "Montana"], + ["prefer", "monument"], + ["preshrunk", "mosquito"], + ["printer", "narrative"], + ["prowler", "nebula"], + ["pupil", "newsletter"], + ["puppy", "Norwegian"], + ["python", "October"], + ["quadrant", "Ohio"], + ["quiver", "onlooker"], + ["quota", "opulent"], + ["ragtime", "Orlando"], + ["ratchet", "outfielder"], + ["rebirth", "Pacific"], + ["reform", "pandemic"], + ["regain", "Pandora"], + ["reindeer", "paperweight"], + ["rematch", "paragon"], + ["repay", "paragraph"], + ["retouch", "paramount"], + ["revenge", "passenger"], + ["reward", "pedigree"], + ["rhythm", "Pegasus"], + ["ribcage", "penetrate"], + ["ringbolt", "perceptive"], + ["robust", "performance"], + ["rocker", "pharmacy"], + ["ruffled", "phonetic"], + ["sailboat", "photograph"], + ["sawdust", "pioneer"], + ["scallion", "pocketful"], + ["scenic", "politeness"], + ["scorecard", "positive"], + ["Scotland", "potato"], + ["seabird", "processor"], + ["select", "provincial"], + ["sentence", "proximate"], + ["shadow", "puberty"], + ["shamrock", "publisher"], + ["showgirl", "pyramid"], + ["skullcap", "quantity"], + ["skydive", "racketeer"], + ["slingshot", "rebellion"], + ["slowdown", "recipe"], + ["snapline", "recover"], + ["snapshot", "repellent"], + ["snowcap", "replica"], + ["snowslide", "reproduce"], + ["solo", "resistor"], + ["southward", "responsive"], + ["soybean", "retraction"], + ["spaniel", "retrieval"], + ["spearhead", "retrospect"], + ["spellbind", "revenue"], + ["spheroid", "revival"], + ["spigot", "revolver"], + ["spindle", "sandalwood"], + ["spyglass", "sardonic"], + ["stagehand", "Saturday"], + ["stagnate", "savagery"], + ["stairway", "scavenger"], + ["standard", "sensation"], + ["stapler", "sociable"], + ["steamship", "souvenir"], + ["sterling", "specialist"], + ["stockman", "speculate"], + ["stopwatch", "stethoscope"], + ["stormy", "stupendous"], + ["sugar", "supportive"], + ["surmount", "surrender"], + ["suspense", "suspicious"], + ["sweatband", "sympathy"], + ["swelter", "tambourine"], + ["tactics", "telephone"], + ["talon", "therapist"], + ["tapeworm", "tobacco"], + ["tempest", "tolerance"], + ["tiger", "tomorrow"], + ["tissue", "torpedo"], + ["tonic", "tradition"], + ["topmost", "travesty"], + ["tracker", "trombonist"], + ["transit", "truncated"], + ["trauma", "typewriter"], + ["treadmill", "ultimate"], + ["Trojan", "undaunted"], + ["trouble", "underfoot"], + ["tumor", "unicorn"], + ["tunnel", "unify"], + ["tycoon", "universe"], + ["uncut", "unravel"], + ["unearth", "upcoming"], + ["unwind", "vacancy"], + ["uproot", "vagabond"], + ["upset", "vertigo"], + ["upshot", "Virginia"], + ["vapor", "visitor"], + ["village", "vocalist"], + ["virus", "voyager"], + ["Vulcan", "warranty"], + ["waffle", "Waterloo"], + ["wallet", "whimsical"], + ["watchword", "Wichita"], + ["wayside", "Wilmington"], + ["willow", "Wyoming"], + ["woodlark", "yesteryear"], + ["Zulu", "Yucatán"]] + +hexre = re.compile("[a-fA-F0-9]+") + +def wordify(seq): + seq = filter(lambda x: x not in (' ', '\n', '\t'), seq) + seq = "".join(seq) # Python3 compatibility + + if not hexre.match(seq): + raise Exception("Input is not a valid hexadecimal value.") + + if len(seq) % 2: + raise Exception("Input contains an odd number of bytes.") + + ret = [] + for i in range(0, len(seq), 2): + ret.append(_words[int(seq[i:i+2], 16)][(i//2)%2]) + return ret + +def usage(): + print("Usage:") + print(" {0} [fingerprint...]".format(os.path.basename(sys.argv[0]))) + print("") + print("If called with multiple arguments, they will be concatenated") + print("and treated as a single fingerprint.") + print("") + print("If called with no arguments, input is read from stdin,") + print("and each line is treated as a single fingerprint. In this") + print("mode, invalid values are silently ignored.") + exit(1) + +if __name__ == '__main__': + if 1 == len(sys.argv): + fps = sys.stdin.readlines() + else: + fps = [" ".join(sys.argv[1:])] + for fp in fps: + try: + words = wordify(fp) + print("\n{0}: ".format(fp.strip())) + sys.stdout.write("\t") + for i in range(0, len(words)): + sys.stdout.write(words[i] + " ") + if (not (i+1) % 4) and not i == len(words)-1: + sys.stdout.write("\n\t") + print("") + + except Exception as e: + if len(fps) == 1: + print (e) + usage() + + print("") + diff --git a/onionr/static-data/connect-check.txt b/onionr/static-data/connect-check.txt new file mode 100644 index 00000000..009a2a9a --- /dev/null +++ b/onionr/static-data/connect-check.txt @@ -0,0 +1 @@ +https://3g2upl4pq6kufc4m.onion/robots.txt,http://expyuzz4wqqyqhjn.onion/robots.txt,https://onionr.voidnet.tech/ diff --git a/onionr/static-data/default-plugins/flow/info.json b/onionr/static-data/default-plugins/flow/info.json new file mode 100644 index 00000000..993339f1 --- /dev/null +++ b/onionr/static-data/default-plugins/flow/info.json @@ -0,0 +1,5 @@ +{ + "name" : "flow", + "version" : "1.0", + "author" : "onionr" +} diff --git a/onionr/static-data/default-plugins/flow/main.py b/onionr/static-data/default-plugins/flow/main.py new file mode 100644 index 00000000..b2fb1dfa --- /dev/null +++ b/onionr/static-data/default-plugins/flow/main.py @@ -0,0 +1,88 @@ +''' + Onionr - P2P Microblogging Platform & Social network + + This default plugin handles "flow" messages (global chatroom style communication) +''' +''' + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' + +# Imports some useful libraries +import logger, config, threading, time +from onionrblockapi import Block + +plugin_name = 'flow' + +class OnionrFlow: + def __init__(self): + self.myCore = pluginapi.get_core() + self.alreadyOutputed = [] + self.flowRunning = False + return + + def start(self): + message = "" + self.flowRunning = True + newThread = threading.Thread(target=self.showOutput) + newThread.start() + while self.flowRunning: + try: + message = logger.readline('\nInsert message into flow:').strip().replace('\n', '\\n').replace('\r', '\\r') + except EOFError: + pass + except KeyboardInterrupt: + self.flowRunning = False + if message == "q": + self.flowRunning = False + + if len(message) > 0: + Block(content = message, type = 'txt', core = self.myCore).save() + + logger.info("Flow is exiting, goodbye") + return + + def showOutput(self): + while self.flowRunning: + for block in Block.getBlocks(type = 'txt', core = self.myCore): + if block.getHash() in self.alreadyOutputed: + continue + if not self.flowRunning: + break + logger.info('\n------------------------', prompt = False) + content = block.getContent() + # Escape new lines, remove trailing whitespace, and escape ansi sequences + content = self.myCore._utils.escapeAnsi(content.replace('\n', '\\n').replace('\r', '\\r').strip()) + logger.info(block.getDate().strftime("%m/%d %H:%M") + ' - ' + logger.colors.reset + content, prompt = False) + self.alreadyOutputed.append(block.getHash()) + try: + time.sleep(5) + except KeyboardInterrupt: + self.flowRunning = False + pass + +def on_init(api, data = None): + ''' + This event is called after Onionr is initialized, but before the command + inputted is executed. Could be called when daemon is starting or when + just the client is running. + ''' + + # Doing this makes it so that the other functions can access the api object + # by simply referencing the variable `pluginapi`. + global pluginapi + pluginapi = api + flow = OnionrFlow() + api.commands.register('flow', flow.start) + api.commands.register_help('flow', 'Open the flow messaging interface') + return diff --git a/onionr/static-data/default-plugins/gui/main.py b/onionr/static-data/default-plugins/gui/main.py deleted file mode 100644 index 07e5a76e..00000000 --- a/onionr/static-data/default-plugins/gui/main.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python -''' - Onionr - P2P Microblogging Platform & Social network - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' - -# Imports some useful libraries -import logger, config, core -import os, sqlite3, threading -from onionrblockapi import Block - -plugin_name = 'gui' - -def send(): - global message - block = Block() - block.setType('txt') - block.setContent(message) - logger.debug('Sent message in block %s.' % block.save(sign = True)) - - -def sendMessage(): - global sendEntry - - global message - message = sendEntry.get() - - t = threading.Thread(target = send) - t.start() - - sendEntry.delete(0, len(message)) - -def update(): - global listedBlocks, listbox, runningCheckDelayCount, runningCheckDelay, root, daemonStatus - - for i in Block.getBlocks(type = 'txt'): - if i.getContent().strip() == '' or i.getHash() in listedBlocks: - continue - listbox.insert(99999, str(i.getContent())) - listedBlocks.append(i.getHash()) - listbox.see(99999) - - runningCheckDelayCount += 1 - - if runningCheckDelayCount == runningCheckDelay: - resp = pluginapi.daemon.local_command('ping') - if resp == 'pong': - daemonStatus.config(text = "Onionr Daemon Status: Running") - else: - daemonStatus.config(text = "Onionr Daemon Status: Not Running") - runningCheckDelayCount = 0 - root.after(10000, update) - - -def reallyOpenGUI(): - import tkinter - global root, runningCheckDelay, runningCheckDelayCount, scrollbar, listedBlocks, nodeInfo, keyInfo, idText, idEntry, pubKeyEntry, listbox, daemonStatus, sendEntry - - root = tkinter.Tk() - - root.title("Onionr GUI") - - runningCheckDelay = 5 - runningCheckDelayCount = 4 - - scrollbar = tkinter.Scrollbar(root) - scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y) - - listedBlocks = [] - - nodeInfo = tkinter.Frame(root) - keyInfo = tkinter.Frame(root) - - hostname = pluginapi.get_onionr().get_hostname() - logger.debug('Onionr Hostname: %s' % hostname) - idText = hostname - - idEntry = tkinter.Entry(nodeInfo) - tkinter.Label(nodeInfo, text = "Node Address: ").pack(side=tkinter.LEFT) - idEntry.pack() - idEntry.insert(0, idText.strip()) - idEntry.configure(state="readonly") - - nodeInfo.pack() - - pubKeyEntry = tkinter.Entry(keyInfo) - - tkinter.Label(keyInfo, text="Public key: ").pack(side=tkinter.LEFT) - - pubKeyEntry.pack() - pubKeyEntry.insert(0, pluginapi.get_core()._crypto.pubKey) - pubKeyEntry.configure(state="readonly") - - keyInfo.pack() - - sendEntry = tkinter.Entry(root) - sendBtn = tkinter.Button(root, text='Send Message', command=sendMessage) - sendEntry.pack(side=tkinter.TOP, pady=5) - sendBtn.pack(side=tkinter.TOP) - - listbox = tkinter.Listbox(root, yscrollcommand=tkinter.Scrollbar.set, height=15) - - listbox.pack(fill=tkinter.BOTH, pady=25) - - daemonStatus = tkinter.Label(root, text="Onionr Daemon Status: unknown") - daemonStatus.pack() - - scrollbar.config(command=tkinter.Listbox.yview) - root.after(2000, update) - root.mainloop() - -def openGUI(): - t = threading.Thread(target = reallyOpenGUI) - t.daemon = False - t.start() - -def on_init(api, data = None): - global pluginapi - pluginapi = api - - api.commands.register(['gui', 'launch-gui', 'open-gui'], openGUI) - api.commands.register_help('gui', 'Opens a graphical interface for Onionr') - - return diff --git a/onionr/static-data/default-plugins/gui/info.json b/onionr/static-data/default-plugins/pms/info.json similarity index 71% rename from onionr/static-data/default-plugins/gui/info.json rename to onionr/static-data/default-plugins/pms/info.json index 83d4489a..454b9bd6 100644 --- a/onionr/static-data/default-plugins/gui/info.json +++ b/onionr/static-data/default-plugins/pms/info.json @@ -1,5 +1,5 @@ { - "name" : "gui", + "name" : "pms", "version" : "1.0", "author" : "onionr" } diff --git a/onionr/static-data/default-plugins/pms/main.py b/onionr/static-data/default-plugins/pms/main.py new file mode 100644 index 00000000..27f56438 --- /dev/null +++ b/onionr/static-data/default-plugins/pms/main.py @@ -0,0 +1,200 @@ +''' + Onionr - P2P Microblogging Platform & Social network + + This default plugin handles private messages in an email like fashion +''' +''' + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' + +# Imports some useful libraries +import logger, config, threading, time, readline, datetime +from onionrblockapi import Block +import onionrexceptions +import locale +locale.setlocale(locale.LC_ALL, '') + +plugin_name = 'pms' +PLUGIN_VERSION = '0.0.1' + +def draw_border(text): + #https://stackoverflow.com/a/20757491 + lines = text.splitlines() + width = max(len(s) for s in lines) + res = ['┌' + '─' * width + '┐'] + for s in lines: + res.append('│' + (s + ' ' * width)[:width] + '│') + res.append('└' + '─' * width + '┘') + return '\n'.join(res) + + +class MailStrings: + def __init__(self, mailInstance): + self.mailInstance = mailInstance + + self.programTag = 'OnionrMail v%s' % (PLUGIN_VERSION) + choices = ['view inbox', 'view sentbox', 'send message', 'help', 'quit'] + self.mainMenuChoices = choices + self.mainMenu = '''\n +----------------- +1. %s +2. %s +3. %s +4. %s +5. %s''' % (choices[0], choices[1], choices[2], choices[3], choices[4]) + +class OnionrMail: + def __init__(self, pluginapi): + self.myCore = pluginapi.get_core() + #self.dataFolder = pluginapi.get_data_folder() + self.strings = MailStrings(self) + + return + + def inbox(self): + blockCount = 0 + pmBlockMap = {} + pmBlocks = {} + logger.info('Decrypting messages...') + choice = '' + + # this could use a lot of memory if someone has recieved a lot of messages + for blockHash in self.myCore.getBlocksByType('pm'): + pmBlocks[blockHash] = Block(blockHash, core=self.myCore) + pmBlocks[blockHash].decrypt() + + while choice not in ('-q', 'q', 'quit'): + blockCount = 0 + for blockHash in pmBlocks: + if not pmBlocks[blockHash].decrypted: + continue + blockCount += 1 + pmBlockMap[blockCount] = blockHash + blockDate = pmBlocks[blockHash].getDate().strftime("%m/%d %H:%M") + print('%s. %s: %s' % (blockCount, blockDate, blockHash)) + + try: + choice = logger.readline('Enter a block number, -r to refresh, or -q to stop: ').strip().lower() + except (EOFError, KeyboardInterrupt): + choice = '-q' + + if choice in ('-q', 'q', 'quit'): + continue + + if choice in ('-r', 'r', 'refresh'): + # dirty hack + self.inbox() + return + + try: + choice = int(choice) + except ValueError: + pass + else: + try: + pmBlockMap[choice] + readBlock = pmBlocks[pmBlockMap[choice]] + except KeyError: + pass + else: + cancel = '' + readBlock.verifySig() + print('Message recieved from %s' % (readBlock.signer,)) + print('Valid signature:', readBlock.validSig) + if not readBlock.validSig: + logger.warn('This message has an INVALID signature. ANYONE could have sent this message.') + cancel = logger.readline('Press enter to continue to message, or -q to not open the message (recommended).') + if cancel != '-q': + print(draw_border(self.myCore._utils.escapeAnsi(readBlock.bcontent.decode().strip()))) + return + + def draftMessage(self): + message = '' + newLine = '' + recip = '' + entering = True + + while entering: + try: + recip = logger.readline('Enter peer address, or q to stop:').strip() + if recip in ('-q', 'q'): + raise EOFError + if not self.myCore._utils.validatePubKey(recip): + raise onionrexceptions.InvalidPubkey('Must be a valid ed25519 base32 encoded public key') + except onionrexceptions.InvalidPubkey: + logger.warn('Invalid public key') + except (KeyboardInterrupt, EOFError): + entering = False + else: + break + else: + # if -q or ctrl-c/d, exit function here, otherwise we successfully got the public key + return + + print('Enter your message, stop by entering -q on a new line.') + while newLine != '-q': + try: + newLine = input() + except (KeyboardInterrupt, EOFError): + pass + if newLine == '-q': + continue + newLine += '\n' + message += newLine + + print('Inserting encrypted message as Onionr block....') + + self.myCore.insertBlock(message, header='pm', encryptType='asym', asymPeer=recip, sign=True) + + def menu(self): + choice = '' + while True: + + print(self.strings.programTag + '\n\nOur ID: ' + self.myCore._crypto.pubKey + self.strings.mainMenu.title()) # print out main menu + + try: + choice = logger.readline('Enter 1-%s:\n' % (len(self.strings.mainMenuChoices))).lower().strip() + except (KeyboardInterrupt, EOFError): + choice = '5' + + if choice in (self.strings.mainMenuChoices[0], '1'): + self.inbox() + elif choice in (self.strings.mainMenuChoices[1], '2'): + logger.warn('not implemented yet') + elif choice in (self.strings.mainMenuChoices[2], '3'): + self.draftMessage() + elif choice in (self.strings.mainMenuChoices[3], '4'): + logger.warn('not implemented yet') + elif choice in (self.strings.mainMenuChoices[4], '5'): + logger.info('Goodbye.') + break + elif choice == '': + pass + else: + logger.warn('Invalid choice.') + return + + +def on_init(api, data = None): + ''' + This event is called after Onionr is initialized, but before the command + inputted is executed. Could be called when daemon is starting or when + just the client is running. + ''' + + pluginapi = api + mail = OnionrMail(pluginapi) + api.commands.register(['mail'], mail.menu) + api.commands.register_help('mail', 'Interact with OnionrMail') + return \ No newline at end of file diff --git a/onionr/static-data/default_config.json b/onionr/static-data/default_config.json index 40958e97..5458db4a 100644 --- a/onionr/static-data/default_config.json +++ b/onionr/static-data/default_config.json @@ -3,10 +3,25 @@ "dev_mode": true, "display_header" : true, - "newCommunicator": false, + "direct_connect" : { + "respond" : true, + "execute_callbacks" : true + } + }, - "dc_response": true, - "dc_execcallbacks" : true + "www" : { + "public" : { + "run" : true + }, + + "private" : { + "run" : true + }, + + "ui" : { + "run" : true, + "private" : true + } }, "client" : { @@ -26,7 +41,7 @@ }, "tor" : { - + "v3onions": false }, "i2p":{ @@ -36,9 +51,14 @@ }, "allocations":{ - "disk": 1000000000, + "disk": 9000000000, "netTotal": 1000000000, - "blockCache" : 5000000, - "blockCacheTotal" : 50000000 + "blockCache": 5000000, + "blockCacheTotal": 50000000 + }, + "peers":{ + "minimumScore": -100, + "maxStoredPeers": 500, + "maxConnect": 5 } } diff --git a/onionr/static-data/header.txt b/onionr/static-data/header.txt index 045c8aa1..92664951 100644 --- a/onionr/static-data/header.txt +++ b/onionr/static-data/header.txt @@ -18,7 +18,7 @@ P ::: :::: ::::::: :::: :::: W:: :: :: ::: :: :: :: :: :::: ::::: P ::: ::::: :::::: :::: :::: W:: :: :: ::: :: :: :: :: ::: :: ::: P :::: ::::: ::::: ::: W :::: :: :: :: ::::: :: :: :: :: P :::: :::::: :::::: :::: -P :::: :::::::::::: :::: +P :::: :::::::::::: :::: GvPBV P ::::: :::::::: :::: P ::::: :::::: P :::::::::::::::: diff --git a/onionr/static-data/index.html b/onionr/static-data/index.html index f9df9eb7..93e48beb 100644 --- a/onionr/static-data/index.html +++ b/onionr/static-data/index.html @@ -1,5 +1,7 @@

This is an Onionr Node

-

The content on this server is not necessarily created or intentionally stored by the owner of the server.

+

The content on this server is not necessarily created by the server owner, and was not necessarily stored with the owner's knowledge.

+ +

Onionr is a decentralized, distributed data storage system, that anyone can insert data into.

To learn more about Onionr, see the website at https://Onionr.VoidNet.tech/

diff --git a/onionr/static-data/www/ui/README.md b/onionr/static-data/www/ui/README.md new file mode 100644 index 00000000..451b08ed --- /dev/null +++ b/onionr/static-data/www/ui/README.md @@ -0,0 +1,44 @@ +# Onionr UI + +## About + +The default GUI for Onionr + +## Setup + +To compile the application, simply execute the following: + +``` +python3 compile.py +``` + +If you are wanting to compile Onionr UI for another language, execute the following, replacing `[lang]` with the target language (supported languages include `eng` for English, `spa` para español, and `zho`为中国人): + +``` +python3 compile.py [lang] +``` + +## FAQ +### Why "compile" anyway? +This web application is compiled for a few reasons: +1. To make it easier to update; this way, we do not have to update the header in every file if we want to change something about it. +2. To make the application smaller in size; there is less duplicated code when the code like the header and footer can be stored in an individual file rather than every file. +3. For multi-language support; with the Python "tags" feature, we can reference strings by variable name, and based on a language file, they can be dynamically inserted into the page on compilation. +4. For compile-time customizations. + +### What exactly happens when you compile? +Upon compilation, files from the `src/` directory will be copied to `dist/` directory, header and footers will be injected in the proper places, and Python "tags" will be interpreted. + + +### How do Python "tags" work? +There are two types of Python "tags": +1. Logic tags (`<$ logic $>`): These tags allow you to perform logic at compile time. Example: `<$ import datetime; lastUpdate = datetime.datetime.now() $>`: This gets the current time while compiling, then stores it in `lastUpdate`. +2. Data tags (`<$= data $>`): These tags take whatever the return value of the statement in the tags is, and write it directly to the page. Example: `<$= 'This application was compiled at %s.' % lastUpdate $>`: This will write the message in the string in the tags to the page. + +**Note:** Logic tags take a higher priority and will always be interpreted first. + +### How does the language feature work? +When you use a data tag to write a string to the page (e.g. `<$= LANG.HELLO_WORLD $>`), the language feature simply takes dictionary of the language that is currently being used from the language map file (`lang.json`), then searches for the key (being the variable name after the characters `LANG.` in the data tag, like `HELLO_WORLD` from the example before). It then writes that string to the page. Language variables are always prefixed with `LANG.` and should always be uppercase (as they are a constant). + +### I changed a few things in the application and tried to view the updates in my browser, but nothing changed! +You most likely forgot to compile. Try running `python3 compile.py` and check again. If you are still having issues, [open up an issue](https://gitlab.com/beardog/Onionr/issues/new?issue[title]=Onionr UI not updating after compiling). \ No newline at end of file diff --git a/onionr/static-data/www/ui/common/footer.html b/onionr/static-data/www/ui/common/footer.html new file mode 100644 index 00000000..6b5cfb06 --- /dev/null +++ b/onionr/static-data/www/ui/common/footer.html @@ -0,0 +1,4 @@ + + + + diff --git a/onionr/static-data/www/ui/common/header.html b/onionr/static-data/www/ui/common/header.html new file mode 100644 index 00000000..2a2b4f56 --- /dev/null +++ b/onionr/static-data/www/ui/common/header.html @@ -0,0 +1,30 @@ +<$= LANG.ONIONR_TITLE $> + + + + + + + + + + diff --git a/onionr/static-data/www/ui/common/onionr-timeline-post.html b/onionr/static-data/www/ui/common/onionr-timeline-post.html new file mode 100644 index 00000000..68440a01 --- /dev/null +++ b/onionr/static-data/www/ui/common/onionr-timeline-post.html @@ -0,0 +1,32 @@ + +
+
+
+
+ +
+
+
+ + +
+ +
+
+ +
+ $content +
+ +
+ <$= LANG.POST_LIKE $> + <$= LANG.POST_REPLY $> +
+
+
+
+
+ diff --git a/onionr/static-data/www/ui/compile.py b/onionr/static-data/www/ui/compile.py new file mode 100755 index 00000000..2667b210 --- /dev/null +++ b/onionr/static-data/www/ui/compile.py @@ -0,0 +1,130 @@ +#!/usr/bin/python3 + +import shutil, os, re, json, traceback + +# get user's config +settings = {} +with open('config.json', 'r') as file: + settings = json.loads(file.read()) + +# "hardcoded" config, not for user to mess with +HEADER_FILE = 'common/header.html' +FOOTER_FILE = 'common/footer.html' +SRC_DIR = 'src/' +DST_DIR = 'dist/' +HEADER_STRING = '
' +FOOTER_STRING = '