Merge branch 'uploadwait' into 'master'
Wait for Upload to Share See merge request beardog/Onionr!12
This commit is contained in:
commit
9ae602f22a
@ -105,6 +105,8 @@ class API:
|
|||||||
self.mimeType = 'text/plain'
|
self.mimeType = 'text/plain'
|
||||||
self.overrideCSP = False
|
self.overrideCSP = False
|
||||||
|
|
||||||
|
self.hideBlocks = [] # Blocks to be denied sharing
|
||||||
|
|
||||||
with open(self._core.dataDir + 'time-bypass.txt', 'w') as bypass:
|
with open(self._core.dataDir + 'time-bypass.txt', 'w') as bypass:
|
||||||
bypass.write(self.timeBypassToken)
|
bypass.write(self.timeBypassToken)
|
||||||
|
|
||||||
@ -236,6 +238,15 @@ class API:
|
|||||||
self.validateHost('private')
|
self.validateHost('private')
|
||||||
if action == 'hello':
|
if action == 'hello':
|
||||||
resp = Response('Hello, World! ' + request.host)
|
resp = Response('Hello, World! ' + request.host)
|
||||||
|
elif action == 'waitForShare':
|
||||||
|
if self._core._utils.validateHash(data):
|
||||||
|
if data not in self.hideBlocks:
|
||||||
|
self.hideBlocks.append(data)
|
||||||
|
else:
|
||||||
|
self.hideBlocks.remove(data)
|
||||||
|
resp = "success"
|
||||||
|
else:
|
||||||
|
resp = "failed to validate hash"
|
||||||
elif action == 'shutdown':
|
elif action == 'shutdown':
|
||||||
# request.environ.get('werkzeug.server.shutdown')()
|
# request.environ.get('werkzeug.server.shutdown')()
|
||||||
self.http_server.stop()
|
self.http_server.stop()
|
||||||
@ -464,7 +475,11 @@ class API:
|
|||||||
elif action == 'getDBHash':
|
elif action == 'getDBHash':
|
||||||
resp = Response(self._utils.getBlockDBHash())
|
resp = Response(self._utils.getBlockDBHash())
|
||||||
elif action == 'getBlockHashes':
|
elif action == 'getBlockHashes':
|
||||||
resp = Response('\n'.join(self._core.getBlockList()))
|
bList = self._core.getBlockList()
|
||||||
|
for b in self.hideBlocks:
|
||||||
|
if b in bList:
|
||||||
|
bList.remove(b)
|
||||||
|
resp = Response('\n'.join(bList))
|
||||||
# setData should be something the communicator initiates, not this api
|
# setData should be something the communicator initiates, not this api
|
||||||
elif action == 'getData':
|
elif action == 'getData':
|
||||||
resp = ''
|
resp = ''
|
||||||
|
@ -41,7 +41,7 @@ class OnionrCommunicatorDaemon:
|
|||||||
self.nistSaltTimestamp = 0
|
self.nistSaltTimestamp = 0
|
||||||
self.powSalt = 0
|
self.powSalt = 0
|
||||||
|
|
||||||
self.blockToUpload = ''
|
self.blocksToUpload = []
|
||||||
|
|
||||||
# loop time.sleep delay in seconds
|
# loop time.sleep delay in seconds
|
||||||
self.delay = 1
|
self.delay = 1
|
||||||
@ -96,6 +96,7 @@ class OnionrCommunicatorDaemon:
|
|||||||
OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65)
|
OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65)
|
||||||
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
|
||||||
OnionrCommunicatorTimers(self, self.daemonTools.cooldownPeer, 30, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.daemonTools.cooldownPeer, 30, requiresPeer=True)
|
||||||
|
OnionrCommunicatorTimers(self, self.uploadBlock, 10, requiresPeer=True, maxThreads=1)
|
||||||
netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
|
netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
|
||||||
announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1)
|
announceTimer = OnionrCommunicatorTimers(self, self.daemonTools.announceNode, 305, requiresPeer=True, maxThreads=1)
|
||||||
cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True)
|
cleanupTimer = OnionrCommunicatorTimers(self, self.peerCleanup, 300, requiresPeer=True)
|
||||||
@ -167,7 +168,7 @@ class OnionrCommunicatorDaemon:
|
|||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
newDBHash = self.peerAction(peer, 'getDBHash') # get their db hash
|
newDBHash = self.peerAction(peer, 'getDBHash') # get their db hash
|
||||||
if newDBHash == False:
|
if newDBHash == False or not self._core._utils.validateHash(newDBHash):
|
||||||
continue # if request failed, restart loop (peer is added to offline peers automatically)
|
continue # if request failed, restart loop (peer is added to offline peers automatically)
|
||||||
triedPeers.append(peer)
|
triedPeers.append(peer)
|
||||||
if newDBHash != self._core.getAddressInfo(peer, 'DBHash'):
|
if newDBHash != self._core.getAddressInfo(peer, 'DBHash'):
|
||||||
@ -466,8 +467,7 @@ class OnionrCommunicatorDaemon:
|
|||||||
if i.timerFunction.__name__ == 'lookupAdders':
|
if i.timerFunction.__name__ == 'lookupAdders':
|
||||||
i.count = (i.frequency - 1)
|
i.count = (i.frequency - 1)
|
||||||
elif cmd[0] == 'uploadBlock':
|
elif cmd[0] == 'uploadBlock':
|
||||||
self.blockToUpload = cmd[1]
|
self.blocksToUpload.append(cmd[1])
|
||||||
threading.Thread(target=self.uploadBlock).start()
|
|
||||||
elif cmd[0] == 'startSocket':
|
elif cmd[0] == 'startSocket':
|
||||||
# Create our own socket server
|
# Create our own socket server
|
||||||
socketInfo = json.loads(cmd[1])
|
socketInfo = json.loads(cmd[1])
|
||||||
@ -488,23 +488,31 @@ class OnionrCommunicatorDaemon:
|
|||||||
'''Upload our block to a few peers'''
|
'''Upload our block to a few peers'''
|
||||||
# when inserting a block, we try to upload it to a few peers to add some deniability
|
# when inserting a block, we try to upload it to a few peers to add some deniability
|
||||||
triedPeers = []
|
triedPeers = []
|
||||||
if not self._core._utils.validateHash(self.blockToUpload):
|
finishedUploads = []
|
||||||
logger.warn('Requested to upload invalid block')
|
if len(self.blocksToUpload) != 0:
|
||||||
return
|
for bl in self.blocksToUpload:
|
||||||
for i in range(max(len(self.onlinePeers), 2)):
|
if not self._core._utils.validateHash(bl):
|
||||||
peer = self.pickOnlinePeer()
|
logger.warn('Requested to upload invalid block')
|
||||||
if peer in triedPeers:
|
return
|
||||||
continue
|
for i in range(max(len(self.onlinePeers), 2)):
|
||||||
triedPeers.append(peer)
|
peer = self.pickOnlinePeer()
|
||||||
url = 'http://' + peer + '/public/upload/'
|
if peer in triedPeers:
|
||||||
data = {'block': block.Block(self.blockToUpload).getRaw()}
|
continue
|
||||||
proxyType = ''
|
triedPeers.append(peer)
|
||||||
if peer.endswith('.onion'):
|
url = 'http://' + peer + '/public/upload/'
|
||||||
proxyType = 'tor'
|
data = {'block': block.Block(bl).getRaw()}
|
||||||
elif peer.endswith('.i2p'):
|
proxyType = ''
|
||||||
proxyType = 'i2p'
|
if peer.endswith('.onion'):
|
||||||
logger.info("Uploading block")
|
proxyType = 'tor'
|
||||||
self._core._utils.doPostRequest(url, data=data, proxyType=proxyType)
|
elif peer.endswith('.i2p'):
|
||||||
|
proxyType = 'i2p'
|
||||||
|
logger.info("Uploading block")
|
||||||
|
if not self._core._utils.doPostRequest(url, data=data, proxyType=proxyType) == False:
|
||||||
|
self._core._utils.localCommand('waitForShare', data=bl)
|
||||||
|
finishedUploads.append(bl)
|
||||||
|
for x in finishedUploads:
|
||||||
|
self.blocksToUpload.remove(x)
|
||||||
|
self.decrementThreadCount('uploadBlock')
|
||||||
|
|
||||||
def announce(self, peer):
|
def announce(self, peer):
|
||||||
'''Announce to peers our address'''
|
'''Announce to peers our address'''
|
||||||
|
@ -728,6 +728,8 @@ class Core:
|
|||||||
payload = proof.waitForResult()
|
payload = proof.waitForResult()
|
||||||
if payload != False:
|
if payload != False:
|
||||||
retData = self.setData(payload)
|
retData = self.setData(payload)
|
||||||
|
# Tell the api server through localCommand to wait for the daemon to upload this block to make stastical analysis more difficult
|
||||||
|
self._utils.localCommand('waitForShare', data=retData)
|
||||||
self.addToBlockDB(retData, selfInsert=True, dataSaved=True)
|
self.addToBlockDB(retData, selfInsert=True, dataSaved=True)
|
||||||
#self.setBlockType(retData, meta['type'])
|
#self.setBlockType(retData, meta['type'])
|
||||||
self._utils.processBlockMetadata(retData)
|
self._utils.processBlockMetadata(retData)
|
||||||
|
@ -40,7 +40,7 @@ except ImportError:
|
|||||||
raise Exception("You need the PySocks module (for use with socks5 proxy to use Tor)")
|
raise Exception("You need the PySocks module (for use with socks5 proxy to use Tor)")
|
||||||
|
|
||||||
ONIONR_TAGLINE = 'Anonymous P2P Platform - GPLv3 - https://Onionr.VoidNet.Tech'
|
ONIONR_TAGLINE = 'Anonymous P2P Platform - GPLv3 - https://Onionr.VoidNet.Tech'
|
||||||
ONIONR_VERSION = '0.3.0' # for debugging and stuff
|
ONIONR_VERSION = '0.3.1' # for debugging and stuff
|
||||||
ONIONR_VERSION_TUPLE = tuple(ONIONR_VERSION.split('.')) # (MAJOR, MINOR, VERSION)
|
ONIONR_VERSION_TUPLE = tuple(ONIONR_VERSION.split('.')) # (MAJOR, MINOR, VERSION)
|
||||||
API_VERSION = '5' # increments of 1; only change when something fundemental about how the API works changes. This way other nodes know how to communicate without learning too much information about you.
|
API_VERSION = '5' # increments of 1; only change when something fundemental about how the API works changes. This way other nodes know how to communicate without learning too much information about you.
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
'''
|
'''
|
||||||
# Misc functions that do not fit in the main api, but are useful
|
# Misc functions that do not fit in the main api, but are useful
|
||||||
import getpass, sys, requests, os, socket, hashlib, logger, sqlite3, config, binascii, time, base64, json, glob, shutil, math, json, re
|
import getpass, sys, requests, os, socket, hashlib, logger, sqlite3, config, binascii, time, base64, json, glob, shutil, math, json, re, urllib.parse
|
||||||
import nacl.signing, nacl.encoding
|
import nacl.signing, nacl.encoding
|
||||||
from onionrblockapi import Block
|
from onionrblockapi import Block
|
||||||
import onionrexceptions
|
import onionrexceptions
|
||||||
@ -150,7 +150,7 @@ class OnionrUtils:
|
|||||||
logger.error('Failed to read my address.', error = error)
|
logger.error('Failed to read my address.', error = error)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def localCommand(self, command, silent = True):
|
def localCommand(self, command, data='', silent = True):
|
||||||
'''
|
'''
|
||||||
Send a command to the local http API server, securely. Intended for local clients, DO NOT USE for remote peers.
|
Send a command to the local http API server, securely. Intended for local clients, DO NOT USE for remote peers.
|
||||||
'''
|
'''
|
||||||
@ -164,6 +164,8 @@ class OnionrUtils:
|
|||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
return False
|
return False
|
||||||
payload = 'http://%s:%s/client/?action=%s&token=%s&timingToken=%s' % (hostname, config.get('client.port'), command, config.get('client.hmac'), self.timingToken)
|
payload = 'http://%s:%s/client/?action=%s&token=%s&timingToken=%s' % (hostname, config.get('client.port'), command, config.get('client.hmac'), self.timingToken)
|
||||||
|
if data != '':
|
||||||
|
payload += '&data=' + urllib.parse.quote_plus(data)
|
||||||
try:
|
try:
|
||||||
retData = requests.get(payload).text
|
retData = requests.get(payload).text
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
|
Loading…
Reference in New Issue
Block a user