diff --git a/.gitignore b/.gitignore
index edea2eb4b..7a246f14c 100755
--- a/.gitignore
+++ b/.gitignore
@@ -30,8 +30,13 @@ osxbuild/env
/CompilerArgs.nsi
/build/bdist.win32/winexe/temp
-cppForSwig
/*.dll
/qrc_img_resources.py
/.project
/.pydevproject
+/.settings
+/*.wallet
+
+/findpass.py
+/*.txt
+/sandbox.py
diff --git a/ArmoryDB.py b/ArmoryDB.py
index bdb2b3514..9f85b784b 100644
--- a/ArmoryDB.py
+++ b/ArmoryDB.py
@@ -15,14 +15,17 @@
#
#######################################################################################################
-import leveldb
-from armoryengine import *
-import struct
import os
+import struct
+
+from armoryengine.ArmoryUtils import ARMORY_HOME_DIR, unpackVarInt
+from armoryengine.Block import PyBlockHeader
+from armoryengine.Transaction import PyTx
+import leveldb
+
#dbheaders_path = '/home/goat/.armory/databases/leveldb_headers'
#dbblkdata_path = '/home/goat/.armory/databases/leveldb_blkdata'
-
dbheaders_path = os.path.join(ARMORY_HOME_DIR, 'databases', 'leveldb_headers')
dbblkdata_path = os.path.join(ARMORY_HOME_DIR, 'databases', 'leveldb_blkdata')
@@ -245,7 +248,7 @@ def blkdataDBHasBlockHash(self, Hash):
if(len(val)==84):
key = '\x03' + val[80:84]
try:
- val = dArmoryDB.bblkdata.Get(key)
+ val = ArmoryDB.bblkdata.Get(key)
return True
except:
return False
diff --git a/ArmoryQt.py b/ArmoryQt.py
index 7e59bc275..4ec017682 100644
--- a/ArmoryQt.py
+++ b/ArmoryQt.py
@@ -1,47 +1,64 @@
#! /usr/bin/python
################################################################################
# #
-# Copyright (C) 2011-2013, Armory Technologies, Inc. #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
################################################################################
+from datetime import datetime
import hashlib
-import random
-import time
-import os
-import sys
-import shutil
+import logging
import math
-import threading
+import os
import platform
-import traceback
+import random
+import shutil
+import signal
import socket
import subprocess
-import psutil
-import signal
+import sys
+import threading
+import time
+import traceback
import webbrowser
-from datetime import datetime
+import psutil
+from copy import deepcopy
-# PyQt4 Imports
from PyQt4.QtCore import *
from PyQt4.QtGui import *
+from twisted.internet.defer import Deferred
+from twisted.internet.protocol import Protocol, ClientFactory
-# Over 20,000 lines of python to help us out
-from armoryengine import *
-from armorymodels import *
-from qtdialogs import *
-from qtdefines import *
+from armoryengine.ALL import *
from armorycolors import Colors, htmlColor, QAPP
-
+from armorymodels import *
+from ui.toolsDialogs import MessageSigningVerificationDialog
import qrc_img_resources
+from qtdefines import *
+from qtdialogs import *
+from ui.Wizards import WalletWizard, TxWizard
+from ui.VerifyOfflinePackage import VerifyOfflinePackageDialog
+from ui.UpgradeDownloader import UpgradeDownloaderDialog
+from jasvet import verifySignature, readSigBlock
+from announcefetch import AnnounceDataFetcher, ANNOUNCE_URL, ANNOUNCE_URL_BACKUP
+from armoryengine.parseAnnounce import *
+from armoryengine.PyBtcWalletRecovery import WalletConsistencyCheck
+
+# HACK ALERT: Qt has a bug in OS X where the system font settings will override
+# the app's settings when a window is activated (e.g., Armory starts, the user
+# switches to another app, and then switches back to Armory). There is a
+# workaround, as used by TeXstudio and other programs.
+# https://bugreports.qt-project.org/browse/QTBUG-5469 - Bug discussion.
+# http://sourceforge.net/p/texstudio/bugs/594/?page=1 - Fix is mentioned.
+# http://pyqt.sourceforge.net/Docs/PyQt4/qapplication.html#setDesktopSettingsAware
+# - Mentions that this must be called before the app (QAPP) is created.
+if OS_MACOSX:
+ QApplication.setDesktopSettingsAware(False)
+# PyQt4 Imports
# All the twisted/networking functionality
-from twisted.internet.protocol import Protocol, ClientFactory
-from twisted.internet.defer import Deferred
-from dialogs.toolsDialogs import MessageSigningVerificationDialog
-
if OS_WINDOWS:
from _winreg import *
@@ -50,12 +67,10 @@ class ArmoryMainWindow(QMainWindow):
""" The primary Armory window """
#############################################################################
+ @TimeThisFunction
def __init__(self, parent=None):
super(ArmoryMainWindow, self).__init__(parent)
- TimerStart('MainWindowInit')
-
- self.bornOnTime = RightNow()
# Load the settings file
self.settingsPath = CLI_OPTIONS.settingsPath
@@ -88,9 +103,6 @@ def __init__(self, parent=None):
self.newZeroConfSinceLastUpdate = []
self.lastBDMState = ['Uninitialized', None]
self.lastSDMState = 'Uninitialized'
- self.detectNotSyncQ = [0,0,0,0,0]
- self.noSyncWarnYet = True
- self.doHardReset = False
self.doShutdown = False
self.downloadDict = {}
self.notAvailErrorCount = 0
@@ -102,7 +114,45 @@ def __init__(self, parent=None):
self.satoshiExeSearchPath = None
self.initSyncCircBuff = []
self.latestVer = {}
-
+ self.lastVersionsTxtHash = ''
+ self.dlgCptWlt = None
+ self.torrentFinished = False
+ self.torrentCircBuffer = []
+ self.lastAskedUserStopTorrent = 0
+ self.wasSynchronizing = False
+ self.announceIsSetup = False
+ self.entropyAccum = []
+
+ # Full list of notifications, and notify IDs that should trigger popups
+ # when sending or receiving.
+ self.lastAnnounceUpdate = {}
+ self.changelog = []
+ self.downloadLinks = {}
+ self.almostFullNotificationList = {}
+ self.notifyOnSend = set()
+ self.notifyonRecv = set()
+ self.versionNotification = {}
+ self.notifyIgnoreLong = []
+ self.notifyIgnoreShort = []
+ self.maxPriorityID = None
+ self.satoshiVersions = ['',''] # [curr, avail]
+ self.armoryVersions = [getVersionString(BTCARMORY_VERSION), '']
+ self.NetworkingFactory = None
+
+
+ # Kick off announcement checking, unless they explicitly disabled it
+ # The fetch happens in the background, we check the results periodically
+ self.announceFetcher = None
+ self.setupAnnouncementFetcher()
+
+ #delayed URI parsing dict
+ self.delayedURIData = {}
+ self.delayedURIData['qLen'] = 0
+
+ #Setup the signal to spawn progress dialogs from the main thread
+ self.connect(self, SIGNAL('initTrigger') , self.initTrigger)
+ self.connect(self, SIGNAL('execTrigger'), self.execTrigger)
+ self.connect(self, SIGNAL('checkForNegImports'), self.checkForNegImports)
# We want to determine whether the user just upgraded to a new version
self.firstLoadNewVersion = False
@@ -110,17 +160,18 @@ def __init__(self, parent=None):
if self.settings.hasSetting('LastVersionLoad'):
lastVerStr = self.settings.get('LastVersionLoad')
if not lastVerStr==currVerStr:
+ LOGINFO('First load of new version: %s', currVerStr)
self.firstLoadNewVersion = True
self.settings.set('LastVersionLoad', currVerStr)
- # Because dynamically retrieving addresses for querying transaction
+ # Because dynamically retrieving addresses for querying transaction
# comments can be so slow, I use this txAddrMap to cache the mappings
- # between tx's and addresses relevant to our wallets. It really only
- # matters for massive tx with hundreds of outputs -- but such tx do
+ # between tx's and addresses relevant to our wallets. It really only
+ # matters for massive tx with hundreds of outputs -- but such tx do
# exist and this is needed to accommodate wallets with lots of them.
self.txAddrMap = {}
-
+
self.loadWalletsAndSettings()
eulaAgreed = self.getSettingOrSetDefault('Agreed_to_EULA', False)
@@ -139,12 +190,12 @@ def __init__(self, parent=None):
# We need to query this once at the beginning, to avoid having
# strange behavior if the user changes the setting but hasn't
# restarted yet...
- self.doManageSatoshi = \
+ self.doAutoBitcoind = \
self.getSettingOrSetDefault('ManageSatoshi', not OS_MACOSX)
# If we're going into online mode, start loading blockchain
- if self.doManageSatoshi:
+ if self.doAutoBitcoind:
self.startBitcoindIfNecessary()
else:
self.loadBlockchainIfNecessary()
@@ -156,10 +207,23 @@ def __init__(self, parent=None):
self.extraHeartbeatSpecial = []
self.extraHeartbeatOnline = []
+
+ """
+ pass a function to extraHeartbeatAlways to run on every heartbeat.
+ pass a list for more control on the function, as
+ [func, [args], keep_running],
+ where:
+ func is the function
+ [args] is a list of arguments
+ keep_running is a bool, pass False to remove the function from
+ extraHeartbeatAlways on the next iteration
+ """
+
self.extraHeartbeatAlways = []
- self.lblArmoryStatus = QRichLabel('Offline ' %
+ self.lblArmoryStatus = QRichLabel('Offline ' %
htmlColor('TextWarn'), doWrap=False)
+
self.statusBar().insertPermanentWidget(0, self.lblArmoryStatus)
# Keep a persistent printer object for paper backups
@@ -175,7 +239,7 @@ def __init__(self, parent=None):
viewWidth = 1.2*w
sectionSz = 1.3*h
viewHeight = 4.4*sectionSz
-
+
self.walletsView.setModel(self.walletModel)
self.walletsView.setSelectionBehavior(QTableView.SelectRows)
self.walletsView.setSelectionMode(QTableView.SingleSelection)
@@ -192,7 +256,7 @@ def __init__(self, parent=None):
self.connect(self.walletsView, SIGNAL('doubleClicked(QModelIndex)'), \
self.execDlgWalletDetails)
-
+
w,h = tightSizeNChar(GETFONT('var'), 100)
@@ -209,12 +273,7 @@ def __init__(self, parent=None):
self.ledgerTable = []
self.ledgerModel = LedgerDispModelSimple(self.ledgerTable, self, self)
- #self.ledgerProxy = LedgerDispSortProxy()
- #self.ledgerProxy.setSourceModel(self.ledgerModel)
- #self.ledgerProxy.setDynamicSortFilter(False)
-
self.ledgerView = QTableView()
-
self.ledgerView.setModel(self.ledgerModel)
self.ledgerView.setSortingEnabled(True)
self.ledgerView.setItemDelegate(LedgerDispDelegate(self))
@@ -240,7 +299,7 @@ def __init__(self, parent=None):
cWidth = 20 # num-confirm icon width
tWidth = 72 # date icon width
initialColResize(self.ledgerView, [cWidth, 0, dateWidth, tWidth, 0.30, 0.40, 0.3])
-
+
self.connect(self.ledgerView, SIGNAL('doubleClicked(QModelIndex)'), \
self.dblClickLedger)
@@ -249,12 +308,12 @@ def __init__(self, parent=None):
btnAddWallet = QPushButton("Create Wallet")
btnImportWlt = QPushButton("Import or Restore Wallet")
- self.connect(btnAddWallet, SIGNAL('clicked()'), self.createNewWallet)
+ self.connect(btnAddWallet, SIGNAL('clicked()'), self.startWalletWizard)
self.connect(btnImportWlt, SIGNAL('clicked()'), self.execImportWallet)
# Put the Wallet info into it's own little box
lblAvail = QLabel("Available Wallets:")
- viewHeader = makeLayoutFrame('Horiz', [lblAvail, \
+ viewHeader = makeLayoutFrame(HORIZONTAL, [lblAvail, \
'Stretch', \
btnAddWallet, \
btnImportWlt, ])
@@ -272,11 +331,7 @@ def __init__(self, parent=None):
# Put the labels into scroll areas just in case window size is small.
self.tabDashboard = QWidget()
-
-
-
- self.SetupDashboard()
-
+ self.setupDashboard()
# Combo box to filter ledger display
@@ -288,17 +343,17 @@ def __init__(self, parent=None):
# Create the new ledger twice: can't update the ledger up/down
- # widgets until we know how many ledger entries there are from
+ # widgets until we know how many ledger entries there are from
# the first call
def createLedg():
- self.createCombinedLedger()
+ self.createCombinedLedger()
if self.frmLedgUpDown.isVisible():
- self.changeNumShow()
+ self.changeNumShow()
self.connect(self.comboWltSelect, SIGNAL('activated(int)'), createLedg)
- self.lblTot = QRichLabel('Maximum Funds:', doWrap=False);
- self.lblSpd = QRichLabel('Spendable Funds:', doWrap=False);
- self.lblUcn = QRichLabel('Unconfirmed:', doWrap=False);
+ self.lblTot = QRichLabel('Maximum Funds:', doWrap=False);
+ self.lblSpd = QRichLabel('Spendable Funds:', doWrap=False);
+ self.lblUcn = QRichLabel('Unconfirmed:', doWrap=False);
self.lblTotalFunds = QRichLabel('-'*12, doWrap=False)
self.lblSpendFunds = QRichLabel('-'*12, doWrap=False)
@@ -385,7 +440,7 @@ def createLedg():
layoutUpDown.setVerticalSpacing(2)
self.frmLedgUpDown.setLayout(layoutUpDown)
self.frmLedgUpDown.setFrameStyle(STYLE_SUNKEN)
-
+
frmLower = makeHorizFrame([ frmFilter, \
'Stretch', \
@@ -397,7 +452,6 @@ def createLedg():
ledgFrame = QFrame()
ledgFrame.setFrameStyle(QFrame.Box|QFrame.Sunken)
ledgLayout = QGridLayout()
- #ledgLayout.addWidget(QLabel("Ledger:"), 0,0)
ledgLayout.addWidget(self.ledgerView, 1,0)
ledgLayout.addWidget(frmLower, 2,0)
ledgLayout.setRowStretch(0, 0)
@@ -408,25 +462,29 @@ def createLedg():
self.tabActivity = QWidget()
self.tabActivity.setLayout(ledgLayout)
+ self.tabAnnounce = QWidget()
+ self.setupAnnounceTab()
+
+
# Add the available tabs to the main tab widget
- self.MAINTABS = enum('Dashboard','Transactions')
+ self.MAINTABS = enum('Dash','Ledger','Announce')
self.mainDisplayTabs.addTab(self.tabDashboard, 'Dashboard')
self.mainDisplayTabs.addTab(self.tabActivity, 'Transactions')
+ self.mainDisplayTabs.addTab(self.tabAnnounce, 'Announcements')
btnSendBtc = QPushButton("Send Bitcoins")
btnRecvBtc = QPushButton("Receive Bitcoins")
btnWltProps = QPushButton("Wallet Properties")
btnOfflineTx = QPushButton("Offline Transactions")
-
self.connect(btnWltProps, SIGNAL('clicked()'), self.execDlgWalletDetails)
self.connect(btnRecvBtc, SIGNAL('clicked()'), self.clickReceiveCoins)
self.connect(btnSendBtc, SIGNAL('clicked()'), self.clickSendBitcoins)
self.connect(btnOfflineTx,SIGNAL('clicked()'), self.execOfflineTx)
- verStr = 'Armory %s-beta / %s' % (getVersionString(BTCARMORY_VERSION), \
+ verStr = 'Armory %s - %s User' % (getVersionString(BTCARMORY_VERSION), \
UserModeStr(self.usermode))
lblInfo = QRichLabel(verStr, doWrap=False)
lblInfo.setFont(GETFONT('var',10))
@@ -447,7 +505,7 @@ def createLedg():
btnFrame.sizeHint = lambda: QSize(logoWidth*1.0, 10)
btnFrame.setMaximumWidth(logoWidth*1.2)
btnFrame.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
-
+
layout = QGridLayout()
layout.addWidget(btnFrame, 0, 0, 1, 1)
layout.addWidget(wltFrame, 0, 1, 1, 1)
@@ -462,11 +520,7 @@ def createLedg():
self.setMinimumSize(750,500)
# Start the user at the dashboard
- self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dashboard)
-
- from twisted.internet import reactor
- # Show the appropriate information on the dashboard
- self.setDashboardDetails(INIT=True)
+ self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
##########################################################################
@@ -484,7 +538,7 @@ def createLedg():
self.menusList.append( self.menu.addMenu('&Help') )
#self.menusList.append( self.menu.addMenu('&Network') )
-
+
def exportTx():
if not TheBDM.getBDMState()=='BlockchainReady':
QMessageBox.warning(self, 'Transactions Unavailable', \
@@ -494,7 +548,7 @@ def exportTx():
return
else:
DlgExportTxHistory(self,self).exec_()
-
+
actExportTx = self.createAction('&Export Transactions', exportTx)
actSettings = self.createAction('&Settings', self.openSettings)
@@ -507,12 +561,12 @@ def exportTx():
self.menusList[MENUS.File].addAction(actExportLog)
self.menusList[MENUS.File].addAction(actCloseApp)
-
- def chngStd(b):
+
+ def chngStd(b):
if b: self.setUserMode(USERMODE.Standard)
- def chngAdv(b):
+ def chngAdv(b):
if b: self.setUserMode(USERMODE.Advanced)
- def chngDev(b):
+ def chngDev(b):
if b: self.setUserMode(USERMODE.Expert)
modeActGrp = QActionGroup(self)
@@ -533,13 +587,13 @@ def chngDev(b):
LOGINFO('Usermode: %s', currmode)
self.firstModeSwitch=True
if currmode=='Standard':
- self.usermode = USERMODE.Standard
+ self.usermode = USERMODE.Standard
actSetModeStd.setChecked(True)
elif currmode=='Advanced':
- self.usermode = USERMODE.Advanced
+ self.usermode = USERMODE.Advanced
actSetModeAdv.setChecked(True)
elif currmode=='Expert':
- self.usermode = USERMODE.Expert
+ self.usermode = USERMODE.Expert
actSetModeDev.setChecked(True)
def openMsgSigning():
@@ -564,41 +618,58 @@ def openMsgSigning():
self.menusList[MENUS.Addresses].addAction(actImportKey)
self.menusList[MENUS.Addresses].addAction(actSweepKey)
- actCreateNew = self.createAction('&Create New Wallet', self.createNewWallet)
+ actCreateNew = self.createAction('&Create New Wallet', self.startWalletWizard)
actImportWlt = self.createAction('&Import or Restore Wallet', self.execImportWallet)
actAddressBook = self.createAction('View &Address Book', self.execAddressBook)
+ actRecoverWlt = self.createAction('Fix Damaged Wallet', self.RecoverWallet)
#actRescanOnly = self.createAction('Rescan Blockchain', self.forceRescanDB)
#actRebuildAll = self.createAction('Rescan with Database Rebuild', self.forceRebuildAndRescan)
self.menusList[MENUS.Wallets].addAction(actCreateNew)
self.menusList[MENUS.Wallets].addAction(actImportWlt)
self.menusList[MENUS.Wallets].addSeparator()
+ self.menusList[MENUS.Wallets].addAction(actRecoverWlt)
#self.menusList[MENUS.Wallets].addAction(actRescanOnly)
#self.menusList[MENUS.Wallets].addAction(actRebuildAll)
#self.menusList[MENUS.Wallets].addAction(actMigrateSatoshi)
#self.menusList[MENUS.Wallets].addAction(actAddressBook)
+ def execVersion():
+ self.explicitCheckAnnouncements()
+ self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Announce)
execAbout = lambda: DlgHelpAbout(self).exec_()
- execVersion = lambda: self.checkForLatestVersion(wasRequested=True)
execTrouble = lambda: webbrowser.open('https://bitcoinarmory.com/troubleshooting/')
- actAboutWindow = self.createAction('About Armory', execAbout)
- actTroubleshoot = self.createAction('Troubleshooting Armory', execTrouble)
- actVersionCheck = self.createAction('Armory Version...', execVersion)
- actFactoryReset = self.createAction('Revert All Settings', self.factoryReset)
- actClearMemPool = self.createAction('Clear All Unconfirmed', self.clearMemoryPool)
- actRescanDB = self.createAction('Rescan Databases', self.rescanNextLoad)
- actRebuildDB = self.createAction('Rebuild and Rescan Databases', self.rebuildNextLoad)
+ execBugReport = lambda: DlgBugReport(self, self).exec_()
+
+
+ execVerifySigned = lambda: VerifyOfflinePackageDialog(self, self).exec_()
+ actAboutWindow = self.createAction(tr('About Armory'), execAbout)
+ actVersionCheck = self.createAction(tr('Armory Version...'), execVersion)
+ actDownloadUpgrade = self.createAction(tr('Update Software...'), self.openDownloaderAll)
+ actVerifySigned = self.createAction(tr('Verify Signed Package...'), execVerifySigned)
+ actTroubleshoot = self.createAction(tr('Troubleshooting Armory'), execTrouble)
+ actSubmitBug = self.createAction(tr('Submit Bug Report'), execBugReport)
+ actClearMemPool = self.createAction(tr('Clear All Unconfirmed'), self.clearMemoryPool)
+ actRescanDB = self.createAction(tr('Rescan Databases'), self.rescanNextLoad)
+ actRebuildDB = self.createAction(tr('Rebuild and Rescan Databases'), self.rebuildNextLoad)
+ actFactoryReset = self.createAction(tr('Factory Reset'), self.factoryReset)
+ actPrivacyPolicy = self.createAction(tr('Armory Privacy Policy'), self.showPrivacyGeneric)
self.menusList[MENUS.Help].addAction(actAboutWindow)
- self.menusList[MENUS.Help].addAction(actTroubleshoot)
self.menusList[MENUS.Help].addAction(actVersionCheck)
- self.menusList[MENUS.Help].addAction(actFactoryReset)
+ self.menusList[MENUS.Help].addAction(actDownloadUpgrade)
+ self.menusList[MENUS.Help].addAction(actVerifySigned)
+ self.menusList[MENUS.Help].addSeparator()
+ self.menusList[MENUS.Help].addAction(actTroubleshoot)
+ self.menusList[MENUS.Help].addAction(actSubmitBug)
+ self.menusList[MENUS.Help].addAction(actPrivacyPolicy)
self.menusList[MENUS.Help].addSeparator()
self.menusList[MENUS.Help].addAction(actClearMemPool)
self.menusList[MENUS.Help].addAction(actRescanDB)
self.menusList[MENUS.Help].addAction(actRebuildDB)
+ self.menusList[MENUS.Help].addAction(actFactoryReset)
# Restore any main-window geometry saved in the settings file
hexgeom = self.settings.get('MainGeometry')
@@ -614,22 +685,40 @@ def openMsgSigning():
self.ledgerView.setColumnWidth(LEDGERCOLS.NumConf, 20)
self.ledgerView.setColumnWidth(LEDGERCOLS.TxDir, 72)
+ haveGUI[0] = True
+ haveGUI[1] = self
+ BDMcurrentBlock[1] = 1
- TimerStop('MainWindowInit')
+ if DO_WALLET_CHECK:
+ self.checkWallets()
+ self.setDashboardDetails()
+
+ from twisted.internet import reactor
reactor.callLater(0.1, self.execIntroDialog)
reactor.callLater(1, self.Heartbeat)
+ if self.getSettingOrSetDefault('MinimizeOnOpen', False) and not CLI_ARGS:
+ LOGINFO('MinimizeOnOpen is True')
+ reactor.callLater(0, self.minimizeArmory)
+
+
if CLI_ARGS:
reactor.callLater(1, self.uriLinkClicked, CLI_ARGS[0])
- elif not self.firstLoad:
- # Don't need to bother the user on the first load with updating
- reactor.callLater(0.2, self.checkForLatestVersion)
+ ####################################################
+ def getWatchingOnlyWallets(self):
+ result = []
+ for wltID in self.walletIDList:
+ if self.walletMap[wltID].watchingOnly:
+ result.append(wltID)
+ return result
+
####################################################
def factoryReset(self):
- reply = QMessageBox.information(self,'Revert all Settings?', \
+ """
+ reply = QMessageBox.information(self,'Factory Reset', \
'You are about to revert all Armory settings '
'to the state they were in when Armory was first installed. '
'
'
@@ -640,57 +729,192 @@ def factoryReset(self):
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
- self.doHardReset = True
+ self.removeSettingsOnClose = True
self.closeForReal()
-
+ """
+
+ if DlgFactoryReset(self,self).exec_():
+ # The dialog already wrote all the flag files, just close now
+ self.closeForReal()
+
+
+ ####################################################
+ def showPrivacyGeneric(self):
+ DlgPrivacyPolicy().exec_()
+
####################################################
def clearMemoryPool(self):
- touchFile( os.path.join(ARMORY_HOME_DIR, 'clearmempool.txt') )
+ touchFile( os.path.join(ARMORY_HOME_DIR, 'clearmempool.flag') )
msg = tr("""
The next time you restart Armory, all unconfirmed transactions will
be cleared allowing you to retry any stuck transactions.""")
- if not self.getSettingOrSetDefault('ManageSatoshi', True):
+ if not self.doAutoBitcoind:
msg += tr("""
-
Make sure you also restart Bitcoin-Qt
- (or bitcoind) and let it synchronize again before you restart
+
Make sure you also restart Bitcoin-Qt
+ (or bitcoind) and let it synchronize again before you restart
Armory. Doing so will clear its memory pool, as well""")
QMessageBox.information(self, tr('Memory Pool'), msg, QMessageBox.Ok)
+
+
+ ####################################################
+ def registerWidgetActivateTime(self, widget):
+ # This is a bit of a hack, but it's a very isolated method to make
+ # it easy to link widgets to my entropy accumulator
+
+ # I just realized this doesn't do exactly what I originally intended...
+ # I wanted it to work on arbitrary widgets like QLineEdits, but using
+ # super is not the answer. What I want is the original class method
+ # to be called after logging keypress, not its superclass method.
+ # Nonetheless, it does do what I need it to, as long as you only
+ # registered frames and dialogs, not individual widgets/controls.
+ mainWindow = self
+
+ def newKPE(wself, event=None):
+ mainWindow.logEntropy()
+ super(wself.__class__, wself).keyPressEvent(event)
+
+ def newKRE(wself, event=None):
+ mainWindow.logEntropy()
+ super(wself.__class__, wself).keyReleaseEvent(event)
+
+ def newMPE(wself, event=None):
+ mainWindow.logEntropy()
+ super(wself.__class__, wself).mousePressEvent(event)
+
+ def newMRE(wself, event=None):
+ mainWindow.logEntropy()
+ super(wself.__class__, wself).mouseReleaseEvent(event)
+
+ from types import MethodType
+ widget.keyPressEvent = MethodType(newKPE, widget)
+ widget.keyReleaseEvent = MethodType(newKRE, widget)
+ widget.mousePressEvent = MethodType(newMPE, widget)
+ widget.mouseReleaseEvent = MethodType(newMRE, widget)
+
+
+ ####################################################
+ def logEntropy(self):
+ try:
+ self.entropyAccum.append(RightNow())
+ self.entropyAccum.append(QCursor.pos().x())
+ self.entropyAccum.append(QCursor.pos().y())
+ except:
+ LOGEXCEPT('Error logging keypress entropy')
+
+ ####################################################
+ def getExtraEntropyForKeyGen(self):
+ # The entropyAccum var has all the timestamps, down to the microsecond,
+ # of every keypress and mouseclick made during the wallet creation
+ # wizard. Also logs mouse positions on every press, though it will
+ # be constant while typing. Either way, even, if they change no text
+ # and use a 5-char password, we will still pickup about 40 events.
+ # Then we throw in the [name,time,size] triplets of some volatile
+ # system directories, and the hash of a file in that directory that
+ # is expected to have timestamps and system-dependent parameters.
+ # Finally, take a desktop screenshot...
+ # All three of these source are likely to have sufficient entropy alone.
+ source1,self.entropyAccum = self.entropyAccum,[]
+
+ if len(source1)==0:
+ LOGERROR('Error getting extra entropy from mouse & key presses')
+
+ source2 = []
+
+ try:
+ if OS_WINDOWS:
+ tempDir = os.getenv('TEMP')
+ extraFiles = []
+ elif OS_LINUX:
+ tempDir = '/var/log'
+ extraFiles = ['/var/log/Xorg.0.log']
+ elif OS_MACOSX:
+ tempDir = '/var/log'
+ extraFiles = ['/var/log/system.log']
+
+ # A simple listing of the directory files, sizes and times is good
+ if os.path.exists(tempDir):
+ for fname in os.listdir(tempDir):
+ fullpath = os.path.join(tempDir, fname)
+ sz = os.path.getsize(fullpath)
+ tm = os.path.getmtime(fullpath)
+ source2.append([fname, sz, tm])
+
+ # On Linux we also throw in Xorg.0.log
+ for f in extraFiles:
+ if os.path.exists(f):
+ with open(f,'rb') as infile:
+ source2.append(hash256(infile.read()))
+
+ if len(source2)==0:
+ LOGWARN('Second source of supplemental entropy will be empty')
+
+ except:
+ LOGEXCEPT('Error getting extra entropy from filesystem')
+
+
+ source3 = ''
+ try:
+ pixDesk = QPixmap.grabWindow(QApplication.desktop().winId())
+ pixRaw = QByteArray()
+ pixBuf = QBuffer(pixRaw)
+ pixBuf.open(QIODevice.WriteOnly)
+ pixDesk.save(pixBuf, 'PNG')
+ source3 = pixBuf.buffer().toHex()
+ except:
+ LOGEXCEPT('Third source of entropy (desktop screenshot) failed')
+
+ if len(source3)==0:
+ LOGWARN('Error getting extra entropy from screenshot')
+
+ LOGINFO('Adding %d keypress events to the entropy pool', len(source1)/3)
+ LOGINFO('Adding %s bytes of filesystem data to the entropy pool',
+ bytesToHumanSize(len(str(source2))))
+ LOGINFO('Adding %s bytes from desktop screenshot to the entropy pool',
+ bytesToHumanSize(len(str(source3))/2))
+
+
+ allEntropy = ''.join([str(a) for a in [source1, source1, source3]])
+ return SecureBinaryData(HMAC256('Armory Entropy', allEntropy))
+
+
+
+
####################################################
def rescanNextLoad(self):
reply = QMessageBox.warning(self, tr('Queue Rescan?'), tr("""
The next time you restart Armory, it will rescan the blockchain
- database, and reconstruct your wallet histories from scratch.
+ database, and reconstruct your wallet histories from scratch.
The rescan will take 10-60 minutes depending on your system.
Do you wish to force a rescan on the next Armory restart?"""), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
- touchFile( os.path.join(ARMORY_HOME_DIR, 'rescan.txt') )
+ touchFile( os.path.join(ARMORY_HOME_DIR, 'rescan.flag') )
####################################################
def rebuildNextLoad(self):
reply = QMessageBox.warning(self, tr('Queue Rebuild?'), tr("""
- The next time you restart Armory, it will rebuild and rescan
- the entire blockchain database. This operation can take between
+ The next time you restart Armory, it will rebuild and rescan
+ the entire blockchain database. This operation can take between
30 minutes and 4 hours depending on you system speed.
Do you wish to force a rebuild on the next Armory restart?"""), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
- touchFile( os.path.join(ARMORY_HOME_DIR, 'rebuild.txt') )
+ touchFile( os.path.join(ARMORY_HOME_DIR, 'rebuild.flag') )
####################################################
def loadFailedManyTimesFunc(self, nFail):
"""
- For now, if the user is having trouble loading the blockchain, all
- we do is delete mempool.bin (which is frequently corrupted but not
- detected as such. However, we may expand this in the future, if
+ For now, if the user is having trouble loading the blockchain, all
+ we do is delete mempool.bin (which is frequently corrupted but not
+ detected as such. However, we may expand this in the future, if
it's determined that more-complicated things are necessary.
"""
LOGERROR('%d attempts to load blockchain failed. Remove mempool.bin.' % nFail)
mempoolfile = os.path.join(ARMORY_HOME_DIR,'mempool.bin')
- if os.path.exists(mempoolfile):
+ if os.path.exists(mempoolfile):
os.remove(mempoolfile)
else:
LOGERROR('File mempool.bin does not exist. Nothing deleted.')
@@ -730,7 +954,7 @@ def changeNumShow(self):
else:
self.currLedgMax = self.currLedgMin + prefWidth - 1
self.currLedgWidth = prefWidth
-
+
self.applyLedgerRange()
@@ -765,7 +989,7 @@ def applyLedgerRange(self):
self.btnLedgDn.setVisible(self.currLedgMax!=self.ledgerSize)
self.createCombinedLedger()
-
+
####################################################
@@ -812,7 +1036,7 @@ def trayRecv():
#############################################################################
@AllowAsync
def registerBitcoinWithFF(self):
- #the 3 nodes needed to add to register bitcoin as a protocol in FF
+ #the 3 nodes needed to add to register bitcoin as a protocol in FF
rdfschemehandler = 'about=\"urn:scheme:handler:bitcoin\"'
rdfscheme = 'about=\"urn:scheme:bitcoin\"'
rdfexternalApp = 'about=\"urn:scheme:externalApplication:bitcoin\"'
@@ -820,7 +1044,7 @@ def registerBitcoinWithFF(self):
#find mimeTypes.rdf file
home = os.getenv('HOME')
out,err = execAndWait('find %s -type f -name \"mimeTypes.rdf\"' % home)
-
+
for rdfs in out.split('\n'):
if rdfs:
try:
@@ -839,13 +1063,13 @@ def registerBitcoinWithFF(self):
rdfsch=i
elif rdfscheme in line:
rdfsc=i
- elif rdfexternalApp in line:
+ elif rdfexternalApp in line:
rdfea=i
i+=1
#seek to end of file
FFrdf.seek(-11, 2)
- i=0;
+ i=0;
#add the missing nodes
if rdfsch == -1:
@@ -855,23 +1079,23 @@ def registerBitcoinWithFF(self):
FFrdf.write(' \n')
FFrdf.write(' \n')
i+=1
-
+
if rdfsc == -1:
FFrdf.write(' \n')
FFrdf.write(' \n')
FFrdf.write(' \n')
i+=1
-
+
if rdfea == -1:
FFrdf.write(' \n')
+ FFrdf.write(' NC:path=\"/usr/bin/xdg-open\" />\n')
i+=1
-
+
if i != 0:
FFrdf.write('\n')
-
+
FFrdf.close()
#############################################################################
@@ -881,10 +1105,13 @@ def setupUriRegistration(self, justDoIt=False):
"""
LOGINFO('setupUriRegistration')
+ if USE_TESTNET:
+ return
+
if OS_LINUX:
out,err = execAndWait('gconftool-2 --get /desktop/gnome/url-handlers/bitcoin/command')
out2,err = execAndWait('xdg-mime query default x-scheme-handler/bitcoin')
-
+
#check FF protocol association
#checkFF_thread = threading.Thread(target=self.registerBitcoinWithFF)
#checkFF_thread.start()
@@ -903,7 +1130,7 @@ def setAsDefault():
setAsDefault()
elif (not 'armory' in out.lower() or not 'armory.desktop' in out2.lower()) and not self.firstLoad:
# If another application has it, ask for permission to change it
- # Don't bother the user on the first load with it if verification is
+ # Don't bother the user on the first load with it if verification is
# needed. They have enough to worry about with this weird new program...
if not self.getSettingOrSetDefault('DNAA_DefaultApp', False):
reply = MsgBoxWithDNAA(MSGBOX.Question, 'Default URL Handler', \
@@ -920,14 +1147,22 @@ def setAsDefault():
action = 'DoNothing'
modulepathname = '"'
if getattr(sys, 'frozen', False):
- app_dir = os.path.dirname(sys.executable)
- app_path = os.path.join(app_dir, sys.executable)
+ app_dir = os.path.dirname(sys.executable)
+ app_path = os.path.join(app_dir, sys.executable)
elif __file__:
- return #running from a .py script, not gonna register URI on Windows
+ return #running from a .py script, not gonna register URI on Windows
- modulepathname += app_path + '" %1'
- LOGWARN("running from: %s, key: %s", app_path, modulepathname)
-
+ #justDoIt = True
+ import ctypes
+ GetModuleFileNameW = ctypes.windll.kernel32.GetModuleFileNameW
+ GetModuleFileNameW.restype = ctypes.c_int
+ app_path = ctypes.create_string_buffer(1024)
+ rtlength = ctypes.c_int()
+ rtlength = GetModuleFileNameW(None, ctypes.byref(app_path), 1024)
+ passstr = str(app_path.raw)
+
+ modulepathname += unicode(passstr[0:(rtlength*2)], encoding='utf16') + u'" "%1"'
+ modulepathname = modulepathname.encode('utf8')
rootKey = 'bitcoin\\shell\\open\\command'
try:
@@ -968,7 +1203,7 @@ def setAsDefault():
action = 'DoIt'
elif action=='AskUser' and not self.firstLoad and not dontAsk:
# If another application has it, ask for permission to change it
- # Don't bother the user on the first load with it if verification is
+ # Don't bother the user on the first load with it if verification is
# needed. They have enough to worry about with this weird new program...
reply = MsgBoxWithDNAA(MSGBOX.Question, 'Default URL Handler', \
'Armory is not set as your default application for handling '
@@ -984,23 +1219,19 @@ def setAsDefault():
action = 'DoIt'
else:
LOGINFO('User requested not to use Armory as URI handler')
- return
+ return
# Finally, do it if we're supposed to!
LOGINFO('URL-register action: %s', action)
if action=='DoIt':
-
+
LOGINFO('Registering Armory for current user')
- baseDir = app_dir
+ baseDir = os.path.dirname(unicode(passstr[0:(rtlength*2)], encoding='utf16'))
regKeys = []
regKeys.append(['Software\\Classes\\bitcoin', '', 'URL:bitcoin Protocol'])
regKeys.append(['Software\\Classes\\bitcoin', 'URL Protocol', ""])
regKeys.append(['Software\\Classes\\bitcoin\\shell', '', None])
regKeys.append(['Software\\Classes\\bitcoin\\shell\\open', '', None])
- regKeys.append(['Software\\Classes\\bitcoin\\shell\\open\\command', '', \
- modulepathname])
- regKeys.append(['Software\\Classes\\bitcoin\\DefaultIcon', '', \
- '"%s\\armory48x48.ico"' % baseDir])
for key,name,val in regKeys:
dkey = '%s\\%s' % (key,name)
@@ -1009,9 +1240,19 @@ def setAsDefault():
SetValueEx(registryKey, name, 0, REG_SZ, val)
CloseKey(registryKey)
- LOGWARN('app dir: %s', app_dir)
-
-
+ regKeysU = []
+ regKeysU.append(['Software\\Classes\\bitcoin\\shell\\open\\command', '', \
+ modulepathname])
+ regKeysU.append(['Software\\Classes\\bitcoin\\DefaultIcon', '', \
+ '"%s\\armory48x48.ico"' % baseDir])
+ for key,name,val in regKeysU:
+ dkey = '%s\\%s' % (key,name)
+ LOGINFO('\tWriting key: [HKEY_CURRENT_USER\\] ' + dkey)
+ registryKey = CreateKey(HKEY_CURRENT_USER, key)
+ #hKey = ctypes.c_int(registryKey.handle)
+ #ctypes.windll.Advapi32.RegSetValueEx(hKey, None, 0, REG_SZ, val, (len(val)+1))
+ SetValueEx(registryKey, name, 0, REG_SZ, val)
+ CloseKey(registryKey)
#############################################################################
def execOfflineTx(self):
@@ -1020,28 +1261,9 @@ def execOfflineTx(self):
# If we got here, one of three buttons was clicked.
if dlgSelect.do_create:
- selectWlt = []
- for wltID in self.walletIDList:
- if self.walletMap[wltID].watchingOnly:
- selectWlt.append(wltID)
- dlg = DlgWalletSelect(self, self, 'Wallet for Offline Transaction (watching-only list)', \
- wltIDList=selectWlt)
- if not dlg.exec_():
- return
- else:
- wltID = dlg.selectedID
- wlt = self.walletMap[wltID]
- dlgSend = DlgSendBitcoins(wlt, self, self)
- dlgSend.exec_()
- return
-
- elif dlgSelect.do_review:
- dlg = DlgReviewOfflineTx(self,self)
- dlg.exec_()
-
+ DlgSendBitcoins(self.getSelectedWallet(), self, self, onlyOfflineWallets=True).exec_()
elif dlgSelect.do_broadc:
- dlg = DlgReviewOfflineTx(self,self)
- dlg.exec_()
+ DlgSignBroadcastOfflineTx(self,self).exec_()
#############################################################################
@@ -1068,13 +1290,13 @@ def execIntroDialog(self):
self.writeSetting('DNAA_IntroDialog', True)
if dlg.requestCreate:
- self.createNewWallet(initLabel='Primary Wallet')
+ self.startWalletWizard()
if dlg.requestImport:
self.execImportWallet()
-
+
#############################################################################
def makeWalletCopy(self, parent, wlt, copyType='Same', suffix='', changePass=False):
if changePass:
@@ -1086,7 +1308,7 @@ def makeWalletCopy(self, parent, wlt, copyType='Same', suffix='', changePass=Fal
fn = 'armory_%s_%s.watchonly.wallet' % (wlt.uniqueIDB58, suffix)
savePath = unicode(self.getFileSave(defaultFilename=fn))
if not len(savePath)>0:
- return
+ return False
if copyType.lower()=='same':
wlt.writeFreshWalletFile(savePath)
@@ -1094,7 +1316,7 @@ def makeWalletCopy(self, parent, wlt, copyType='Same', suffix='', changePass=Fal
if wlt.useEncryption:
dlg = DlgUnlockWallet(wlt, parent, self, 'Unlock Private Keys')
if not dlg.exec_():
- return
+ return False
# Wallet should now be unlocked
wlt.makeUnencryptedWalletCopy(savePath)
elif copyType.lower()=='encrypt':
@@ -1103,20 +1325,21 @@ def makeWalletCopy(self, parent, wlt, copyType='Same', suffix='', changePass=Fal
dlgCrypt = DlgChangePassphrase(parent, self, not wlt.useEncryption)
if not dlgCrypt.exec_():
QMessageBox.information(parent, tr('Aborted'), tr("""
- No passphrase was selected for the encrypted backup.
+ No passphrase was selected for the encrypted backup.
No backup was created"""), QMessageBox.Ok)
newPassphrase = SecureBinaryData(str(dlgCrypt.edtPasswd1.text()))
wlt.makeEncryptedWalletCopy(savePath, newPassphrase)
else:
LOGERROR('Invalid "copyType" supplied to makeWalletCopy: %s', copyType)
- return
+ return False
QMessageBox.information(parent, tr('Backup Complete'), tr("""
- Your wallet was successfully backed up to the following
+ Your wallet was successfully backed up to the following
location:
%s""") % savePath, QMessageBox.Ok)
-
-
+ return True
+
+
#############################################################################
def createAction(self, txt, slot, isCheckable=False, \
ttip=None, iconpath=None, shortcut=None):
@@ -1127,8 +1350,8 @@ def createAction(self, txt, slot, isCheckable=False, \
if iconpath:
icon = QIcon(iconpath)
- theAction = QAction(icon, txt, self)
-
+ theAction = QAction(icon, txt, self)
+
if isCheckable:
theAction.setCheckable(True)
self.connect(theAction, SIGNAL('toggled(bool)'), slot)
@@ -1141,7 +1364,7 @@ def createAction(self, txt, slot, isCheckable=False, \
if shortcut:
theAction.setShortcut(shortcut)
-
+
return theAction
@@ -1164,7 +1387,7 @@ def setUserMode(self, mode):
'the new usermode to go into effect.', QMessageBox.Ok)
self.firstModeSwitch = False
-
+
#############################################################################
@@ -1192,161 +1415,414 @@ def setPreferredDateFormat(self, fmtStr):
return True
+
#############################################################################
- def checkForLatestVersion(self, wasRequested=False):
- LOGDEBUG('checkForLatestVersion')
- # Download latest versions.txt file, accumulate changelog
- if CLI_OPTIONS.skipVerCheck:
- return
+ def setupAnnouncementFetcher(self):
+ skipChk1 = self.getSettingOrSetDefault('SkipAnnounceCheck', False)
+ skipChk2 = CLI_OPTIONS.skipAnnounceCheck
+ skipChk3 = CLI_OPTIONS.offline and not CLI_OPTIONS.testAnnounceCode
+ self.skipAnnounceCheck = skipChk1 or skipChk2 or skipChk3
+
+ url1 = ANNOUNCE_URL
+ url2 = ANNOUNCE_URL_BACKUP
+ fetchPath = os.path.join(ARMORY_HOME_DIR, 'atisignedannounce')
+ if self.announceFetcher is None:
+ self.announceFetcher = AnnounceDataFetcher(url1, url2, fetchPath)
+ self.announceFetcher.setDisabled(self.skipAnnounceCheck)
+ self.announceFetcher.start()
+
+ # Set last-updated vals to zero to force processing at startup
+ for fid in ['changelog, downloads','notify','bootstrap']:
+ self.lastAnnounceUpdate[fid] = 0
+
+ # If we recently updated the settings to enable or disable checking...
+ if not self.announceFetcher.isRunning() and not self.skipAnnounceCheck:
+ self.announceFetcher.setDisabled(False)
+ self.announceFetcher.setFetchInterval(DEFAULT_FETCH_INTERVAL)
+ self.announceFetcher.start()
+ elif self.announceFetcher.isRunning() and self.skipAnnounceCheck:
+ self.announceFetcher.setDisabled(True)
+ self.announceFetcher.shutdown()
- optChkVer = self.getSettingOrSetDefault('CheckVersion', 'Always')
- if optChkVer.lower()=='never' and not wasRequested:
- LOGINFO('User requested never check for new versions')
- return
- if wasRequested and not self.internetAvail:
- QMessageBox.critical(self, 'Offline Mode', \
- 'You are in offline mode, which means that version information '
- 'cannot be retrieved from the internet. Please visit '
- 'www.bitcoinarmory.com from an internet-connected computer '
- 'to get the latest version information.', QMessageBox.Ok)
- return
- versionFile = None
- try:
- import urllib2
- import socket
- socket.setdefaulttimeout(CLI_OPTIONS.nettimeout)
- versionLines = urllib2.urlopen(HTTP_VERSION_FILE, timeout=CLI_OPTIONS.nettimeout)
- versionLines = versionLines.readlines()
- except ImportError:
- LOGERROR('No module urllib2 -- cannot get latest version')
- return
- except (urllib2.URLError, urllib2.HTTPError):
- if wasRequested:
- QMessageBox.critical(self, 'Unavailable', \
- 'The latest Armory version information could not be retrieved.'
- 'Please check www.bitcoinarmory.com for the latest version '
- 'information.', QMessageBox.Ok)
- LOGERROR('Could not access latest Armory version information')
- LOGERROR('Tried: %s', HTTP_VERSION_FILE)
- return
-
+ #############################################################################
+ def processAnnounceData(self, forceCheck=False, forceWait=5):
+
+ adf = self.announceFetcher
+
+
+
+ # The ADF always fetches everything all the time. If forced, do the
+ # regular fetch first, then examine the individual files without forcing
+ if forceCheck:
+ adf.fetchRightNow(forceWait)
+
+ # Check each of the individual files for recent modifications
+ idFuncPairs = [
+ ['announce', self.updateAnnounceTab],
+ ['changelog', self.processChangelog],
+ ['downloads', self.processDownloads],
+ ['notify', self.processNotifications],
+ ['bootstrap', self.processBootstrap] ]
+
+ # If modified recently
+ for fid,func in idFuncPairs:
+ if not fid in self.lastAnnounceUpdate or \
+ adf.getFileModTime(fid) > self.lastAnnounceUpdate[fid]:
+ self.lastAnnounceUpdate[fid] = RightNow()
+ fileText = adf.getAnnounceFile(fid)
+ func(fileText)
- skipVerify = False
- #LOGERROR('**********************************TESTING CODE: REMOVE ME')
- #versionLines = open('versions.txt','r').readlines()
- #skipVerify = True
- #LOGERROR('**********************************TESTING CODE: REMOVE ME')
+
+
+ #############################################################################
+ def processChangelog(self, txt):
try:
- currLineIdx = [0]
+ clp = changelogParser()
+ self.changelog = clp.parseChangelogText(txt)
+ except:
+ # Don't crash on an error, but do log what happened
+ LOGEXCEPT('Failed to parse changelog data')
- def popNextLine(currIdx):
- if currIdx[0] < len(versionLines):
- outstr = versionLines[ currIdx[0] ]
- currIdx[0] += 1
- return outstr.strip()
- else:
- return None
-
- thisVerString = getVersionString(BTCARMORY_VERSION)
- changeLog = []
- vernum = ''
-
- line = popNextLine(currLineIdx)
- comments = ''
- while line != None:
- if not line.startswith('#') and len(line)>0:
- if line.startswith('VERSION'):
- vstr = line.split(' ')[-1]
- myVersionInt = getVersionInt(readVersionString(thisVerString))
- latestVerInt = getVersionInt(readVersionString(vstr))
- if myVersionInt>=latestVerInt and not wasRequested:
- break
- changeLog.append([vstr, []])
- elif line.startswith('-'):
- featureTitle = line[2:]
- changeLog[-1][1].append([featureTitle, []])
- else:
- changeLog[-1][1][-1][1].append(line)
- if line.startswith('#'):
- comments += line+'\n'
- line = popNextLine(currLineIdx)
-
- # We also store the list of latest
- self.latestVer = {}
- self.downloadDict = {}
- try:
- msg = extractSignedDataFromVersionsDotTxt(comments, doVerify=(not skipVerify))
- if len(msg)>0:
- dldict,verstrs = parseLinkList(msg)
- self.downloadDict = dldict.copy()
- self.latestVer = verstrs.copy()
- if not TheBDM.getBDMState()=='BlockchainReady':
- # Don't dump all this info to the log all the time
- LOGINFO('Latest versions:')
- LOGINFO(' Satoshi: %s', self.latestVer['SATOSHI'])
- LOGINFO(' Armory: %s', self.latestVer['ARMORY'])
- else:
- raise ECDSA_Error, 'Could not verify'
- except:
- LOGEXCEPT('Version check error, ignoring downloaded version info')
-
-
- if len(changeLog)==0 and not wasRequested:
- LOGINFO('You are running the latest version!')
- elif optChkVer[1:]==changeLog[0][0] and not wasRequested:
- LOGINFO('Latest version is %s -- Notify user on next version.', optChkVer)
+ #############################################################################
+ def processDownloads(self, txt):
+ try:
+ dlp = downloadLinkParser()
+ self.downloadLinks = dlp.parseDownloadList(txt)
+
+ if self.downloadLinks is None:
return
+
+ thisVer = getVersionInt(BTCARMORY_VERSION)
+
+ # Check ARMORY versions
+ if not 'Armory' in self.downloadLinks:
+ LOGWARN('No Armory links in the downloads list')
+ else:
+ maxVer = 0
+ self.versionNotification = {}
+ for verStr,vermap in self.downloadLinks['Armory'].iteritems():
+ dlVer = getVersionInt(readVersionString(verStr))
+ if dlVer > maxVer:
+ maxVer = dlVer
+ self.armoryVersions[1] = verStr
+ if thisVer >= maxVer:
+ continue
+
+ shortDescr = tr('Armory version %s is now available!') % verStr
+ notifyID = binary_to_hex(hash256(shortDescr)[:4])
+ self.versionNotification['UNIQUEID'] = notifyID
+ self.versionNotification['VERSION'] = '0'
+ self.versionNotification['STARTTIME'] = '0'
+ self.versionNotification['EXPIRES'] = '%d' % long(UINT64_MAX)
+ self.versionNotification['CANCELID'] = '[]'
+ self.versionNotification['MINVERSION'] = '*'
+ self.versionNotification['MAXVERSION'] = '<%s' % verStr
+ self.versionNotification['PRIORITY'] = '3072'
+ self.versionNotification['ALERTTYPE'] = 'Upgrade'
+ self.versionNotification['NOTIFYSEND'] = 'False'
+ self.versionNotification['NOTIFYRECV'] = 'False'
+ self.versionNotification['SHORTDESCR'] = shortDescr
+ self.versionNotification['LONGDESCR'] = \
+ self.getVersionNotifyLongDescr(verStr).replace('\n',' ')
+
+ if 'ArmoryTesting' in self.downloadLinks:
+ for verStr,vermap in self.downloadLinks['ArmoryTesting'].iteritems():
+ dlVer = getVersionInt(readVersionString(verStr))
+ if dlVer > maxVer:
+ maxVer = dlVer
+ self.armoryVersions[1] = verStr
+ if thisVer >= maxVer:
+ continue
+
+ shortDescr = tr('Armory Testing version %s is now available!') % verStr
+ notifyID = binary_to_hex(hash256(shortDescr)[:4])
+ self.versionNotification['UNIQUEID'] = notifyID
+ self.versionNotification['VERSION'] = '0'
+ self.versionNotification['STARTTIME'] = '0'
+ self.versionNotification['EXPIRES'] = '%d' % long(UINT64_MAX)
+ self.versionNotification['CANCELID'] = '[]'
+ self.versionNotification['MINVERSION'] = '*'
+ self.versionNotification['MAXVERSION'] = '<%s' % verStr
+ self.versionNotification['PRIORITY'] = '1024'
+ self.versionNotification['ALERTTYPE'] = 'upgrade-testing'
+ self.versionNotification['NOTIFYSEND'] = 'False'
+ self.versionNotification['NOTIFYRECV'] = 'False'
+ self.versionNotification['SHORTDESCR'] = shortDescr
+ self.versionNotification['LONGDESCR'] = \
+ self.getVersionNotifyLongDescr(verStr, True).replace('\n',' ')
+
+
+ # For Satoshi updates, we don't trigger any notifications like we
+ # do for Armory above -- we will release a proper announcement if
+ # necessary. But we want to set a flag to
+ if not 'Satoshi' in self.downloadLinks:
+ LOGWARN('No Satoshi links in the downloads list')
else:
- DlgVersionNotify(self,self, changeLog, wasRequested).exec_()
+ try:
+ maxVer = 0
+ for verStr,vermap in self.downloadLinks['Satoshi'].iteritems():
+ dlVer = getVersionInt(readVersionString(verStr))
+ if dlVer > maxVer:
+ maxVer = dlVer
+ self.satoshiVersions[1] = verStr
+
+ if not self.NetworkingFactory:
+ return
+
+ # This is to detect the running versions of Bitcoin-Qt/bitcoind
+ thisVerStr = self.NetworkingFactory.proto.peerInfo['subver']
+ thisVerStr = thisVerStr.strip('/').split(':')[-1]
+
+ if sum([0 if c in '0123456789.' else 1 for c in thisVerStr]) > 0:
+ return
+
+ self.satoshiVersions[0] = thisVerStr
+
+ except:
+ pass
+
+
+
+
except:
- if wasRequested:
- QMessageBox.critical(self, 'Parse Error', \
- 'The version information is malformed and cannot be understood. '
- 'Please check www.bitcoinarmory.com for the latest version '
- 'information.', QMessageBox.Ok)
- LOGEXCEPT('Error trying to parse versions.txt file')
-
+ # Don't crash on an error, but do log what happened
+ LOGEXCEPT('Failed to parse download link data')
+
#############################################################################
- def setupNetworking(self):
- LOGINFO('Setting up networking...')
- TimerStart('setupNetworking')
- self.internetAvail = False
+ def getVersionNotifyLongDescr(self, verStr, testing=False):
+ shortOS = None
+ if OS_WINDOWS:
+ shortOS = 'windows'
+ elif OS_LINUX:
+ shortOS = 'ubuntu'
+ elif OS_MACOSX:
+ shortOS = 'mac'
- # Prevent Armory from being opened twice
- from twisted.internet import reactor
- import twisted
- def uriClick_partial(a):
- self.uriLinkClicked(a)
+ webURL = 'https://bitcoinarmory.com/download/'
+ if shortOS is not None:
+ webURL += '#' + shortOS
- if CLI_OPTIONS.interport > 1:
- try:
- self.InstanceListener = ArmoryListenerFactory(self.bringArmoryToFront, \
- uriClick_partial )
- reactor.listenTCP(CLI_OPTIONS.interport, self.InstanceListener)
- except twisted.internet.error.CannotListenError:
- LOGWARN('Socket already occupied! This must be a duplicate Armory instance!')
- QMessageBox.warning(self, 'Only One, Please!', \
- 'Armory is already running! You can only have one instance open '
- 'at a time. Aborting...', QMessageBox.Ok)
- os._exit(0)
- else:
- LOGWARN('*** Listening port is disabled. URI-handling will not work')
-
+ if testing:
+ return tr("""
+ A new testing version of Armory is out. You can upgrade to version
+ %s through our secure downloader inside Armory (link at the bottom
+ of this notification window).
+ """) % (verStr)
+
+ return tr("""
+ Your version of Armory is now outdated. Please upgrade to version
+ %s through our secure downloader inside Armory (link at the bottom
+ of this notification window). Alternatively, you can get the new
+ version from our website downloads page at:
+
+ %s """) % (verStr, webURL, webURL)
- settingSkipCheck = self.getSettingOrSetDefault('SkipOnlineCheck', False)
- self.forceOnline = CLI_OPTIONS.forceOnline or settingSkipCheck
- if self.forceOnline:
- LOGINFO('Forced online mode: True')
- # Check general internet connection
- self.internetAvail = False
- if not self.forceOnline:
+
+ #############################################################################
+ def processBootstrap(self, binFile):
+ # Nothing to process, actually. We'll grab the bootstrap from its
+ # current location, if needed
+ pass
+
+
+
+ #############################################################################
+ def notificationIsRelevant(self, notifyID, notifyMap):
+ currTime = RightNow()
+ thisVerInt = getVersionInt(BTCARMORY_VERSION)
+
+ # Ignore transactions below the requested priority
+ minPriority = self.getSettingOrSetDefault('NotifyMinPriority', 2048)
+ if int(notifyMap['PRIORITY']) < minPriority:
+ return False
+
+ # Ignore version upgrade notifications if disabled in the settings
+ if 'upgrade' in notifyMap['ALERTTYPE'].lower() and \
+ self.getSettingOrSetDefault('DisableUpgradeNotify', False):
+ return False
+
+ if notifyID in self.notifyIgnoreShort:
+ return False
+
+ if notifyMap['STARTTIME'].isdigit():
+ if currTime < long(notifyMap['STARTTIME']):
+ return False
+
+ if notifyMap['EXPIRES'].isdigit():
+ if currTime > long(notifyMap['EXPIRES']):
+ return False
+
+
+ try:
+ minVerStr = notifyMap['MINVERSION']
+ minExclude = minVerStr.startswith('>')
+ minVerStr = minVerStr[1:] if minExclude else minVerStr
+ minVerInt = getVersionInt(readVersionString(minVerStr))
+ minVerInt += 1 if minExclude else 0
+ if thisVerInt < minVerInt:
+ return False
+ except:
+ pass
+
+
+ try:
+ maxVerStr = notifyMap['MAXVERSION']
+ maxExclude = maxVerStr.startswith('<')
+ maxVerStr = maxVerStr[1:] if maxExclude else maxVerStr
+ maxVerInt = getVersionInt(readVersionString(maxVerStr))
+ maxVerInt -= 1 if maxExclude else 0
+ if thisVerInt > maxVerInt:
+ return False
+ except:
+ pass
+
+ return True
+
+
+ #############################################################################
+ def processNotifications(self, txt):
+
+ # Keep in mind this will always be run on startup with a blank slate, as
+ # well as every 30 min while Armory is running. All notifications are
+ # "new" on startup (though we will allow the user to do-not-show-again
+ # and store the notification ID in the settings file).
+ try:
+ np = notificationParser()
+ currNotificationList = np.parseNotificationText(txt)
+ except:
+ # Don't crash on an error, but do log what happened
+ LOGEXCEPT('Failed to parse notifications')
+
+ if currNotificationList is None:
+ currNotificationList = {}
+
+ # If we have a new-version notification, it's not ignroed, and such
+ # notifications are not disabled, add it to the list
+ vnotify = self.versionNotification
+ if vnotify and 'UNIQUEID' in vnotify:
+ currNotificationList[vnotify['UNIQUEID']] = deepcopy(vnotify)
+
+ # Create a copy of almost all the notifications we have.
+ # All notifications >= 2048, unless they've explictly allowed testing
+ # notifications. This will be shown on the "Announcements" tab.
+ self.almostFullNotificationList = {}
+ currMin = self.getSettingOrSetDefault('NotifyMinPriority', \
+ DEFAULT_MIN_PRIORITY)
+ minmin = min(currMin, DEFAULT_MIN_PRIORITY)
+ for nid,valmap in currNotificationList.iteritems():
+ if int(valmap['PRIORITY']) >= minmin:
+ self.almostFullNotificationList[nid] = deepcopy(valmap)
+
+
+ tabPriority = 0
+ self.maxPriorityID = None
+
+ # Check for new notifications
+ addedNotifyIDs = set()
+ irrelevantIDs = set()
+ for nid,valmap in currNotificationList.iteritems():
+ if not self.notificationIsRelevant(nid, valmap):
+ # Can't remove while iterating over the map
+ irrelevantIDs.add(nid)
+ self.notifyIgnoreShort.add(nid)
+ continue
+
+ if valmap['PRIORITY'].isdigit():
+ if int(valmap['PRIORITY']) > tabPriority:
+ tabPriority = int(valmap['PRIORITY'])
+ self.maxPriorityID = nid
+
+ if not nid in self.almostFullNotificationList:
+ addedNotifyIDs.append(nid)
+
+ # Now remove them from the set that we are working with
+ for nid in irrelevantIDs:
+ del currNotificationList[nid]
+
+ # Check for notifications we had before but no long have
+ removedNotifyIDs = []
+ for nid,valmap in self.almostFullNotificationList.iteritems():
+ if not nid in currNotificationList:
+ removedNotifyIDs.append(nid)
+
+
+ #for nid in removedNotifyIDs:
+ #self.notifyIgnoreShort.discard(nid)
+ #self.notifyIgnoreLong.discard(nid)
+
+
+
+ # Change the "Announcements" tab color if something important is there
+ tabWidgetBar = self.mainDisplayTabs.tabBar()
+ tabColor = Colors.Foreground
+ if tabPriority >= 5120:
+ tabColor = Colors.TextRed
+ elif tabPriority >= 4096:
+ tabColor = Colors.TextRed
+ elif tabPriority >= 3072:
+ tabColor = Colors.TextBlue
+ elif tabPriority >= 2048:
+ tabColor = Colors.TextBlue
+
+ tabWidgetBar.setTabTextColor(self.MAINTABS.Announce, tabColor)
+ self.updateAnnounceTab()
+
+ # We only do popups for notifications >=4096, AND upgrade notify
+ if tabPriority >= 3072:
+ DlgNotificationWithDNAA(self, self, self.maxPriorityID, \
+ currNotificationList[self.maxPriorityID]).show()
+ elif vnotify:
+ if not vnotify['UNIQUEID'] in self.notifyIgnoreShort:
+ DlgNotificationWithDNAA(self,self,vnotify['UNIQUEID'],vnotify).show()
+
+
+
+
+
+
+
+ #############################################################################
+ @TimeThisFunction
+ def setupNetworking(self):
+ LOGINFO('Setting up networking...')
+ self.internetAvail = False
+
+ # Prevent Armory from being opened twice
+ from twisted.internet import reactor
+ import twisted
+ def uriClick_partial(a):
+ self.uriLinkClicked(a)
+
+ if CLI_OPTIONS.interport > 1:
+ try:
+ self.InstanceListener = ArmoryListenerFactory(self.bringArmoryToFront, \
+ uriClick_partial )
+ reactor.listenTCP(CLI_OPTIONS.interport, self.InstanceListener)
+ except twisted.internet.error.CannotListenError:
+ LOGWARN('Socket already occupied! This must be a duplicate Armory')
+ QMessageBox.warning(self, tr('Already Open'), tr("""
+ Armory is already running! You can only have one Armory open
+ at a time. Exiting..."""), QMessageBox.Ok)
+ os._exit(0)
+ else:
+ LOGWARN('*** Listening port is disabled. URI-handling will not work')
+
+
+ settingSkipCheck = self.getSettingOrSetDefault('SkipOnlineCheck', False)
+ self.forceOnline = CLI_OPTIONS.forceOnline or settingSkipCheck
+ if self.forceOnline:
+ LOGINFO('Forced online mode: True')
+
+ # Check general internet connection
+ self.internetAvail = False
+ if not self.forceOnline:
try:
import urllib2
response=urllib2.urlopen('http://google.com', timeout=CLI_OPTIONS.nettimeout)
@@ -1365,7 +1841,7 @@ def uriClick_partial(a):
LOGEXCEPT('Error checking for internet connection')
LOGERROR('Run --skip-online-check if you think this is an error')
self.internetAvail = False
-
+
LOGINFO('Internet connection is Available: %s', self.internetAvail)
LOGINFO('Bitcoin-Qt/bitcoind is Available: %s', self.bitcoindIsAvailable())
@@ -1373,7 +1849,119 @@ def uriClick_partial(a):
LOGINFO('Online mode currently possible: %s', self.onlineModeIsPossible())
- TimerStop('setupNetworking')
+
+
+
+ #############################################################################
+ def manageBitcoindAskTorrent(self):
+
+ if not satoshiIsAvailable():
+ reply = MsgBoxCustom(MSGBOX.Question, tr('BitTorrent Option'), tr("""
+ You are currently configured to run the core Bitcoin software
+ yourself (Bitcoin-Qt or bitcoind). Normally, you should
+ start the Bitcoin software first and wait for it to synchronize
+ with the network before starting Armory.
+
+ However, Armory can shortcut most of this initial
+ synchronization
+ for you using BitTorrent. If your firewall allows it,
+ using BitTorrent can be an order of magnitude faster (2x to 20x)
+ than letting the Bitcoin software download it via P2P.
+
+ To synchronize using BitTorrent (recommended):
+ Click "Use BitTorrent" below, and do not start the Bitcoin
+ software until after it is complete.
+
+ To synchronize using Bitcoin P2P (fallback):
+ Click "Cancel" below, then close Armory and start Bitcoin-Qt
+ (or bitcoind). Do not start Armory until you see a green checkmark
+ in the bottom-right corner of the Bitcoin-Qt window."""), \
+ wCancel=True, yesStr='Use BitTorrent')
+
+ if not reply:
+ QMessageBox.warning(self, tr('Synchronize'), tr("""
+ When you are ready to start synchronization, close Armory and
+ start Bitcoin-Qt or bitcoind. Restart Armory only when
+ synchronization is complete. If using Bitcoin-Qt, you will see
+ a green checkmark in the bottom-right corner"""), QMessageBox.Ok)
+ return False
+
+ else:
+ reply = MsgBoxCustom(MSGBOX.Question, tr('BitTorrent Option'), tr("""
+ You are currently running the core Bitcoin software, but it
+ is not fully synchronized with the network, yet. Normally,
+ you should close Armory until Bitcoin-Qt (or bitcoind) is
+ finished
+
+ However, Armory can speed up this initial
+ synchronization for you using BitTorrent. If your firewall
+ allows it, using BitTorrent can be an order of magnitude
+ faster (2x to 20x)
+ than letting the Bitcoin software download it via P2P.
+
+ To synchronize using BitTorrent (recommended):
+ Close the running Bitcoin software right now. When it is
+ closed, click "Use BitTorrent" below. Restart the Bitcoin software
+ when Armory indicates it is complete.
+
+ To synchronize using Bitcoin P2P (fallback):
+ Click "Cancel" below, and then close Armory until the Bitcoin
+ software is finished synchronizing. If using Bitcoin-Qt, you
+ will see a green checkmark in the bottom-right corner of the
+ main window."""), QMessageBox.Ok)
+
+ if reply:
+ if satoshiIsAvailable():
+ QMessageBox.warning(self, tr('Still Running'), tr("""
+ The Bitcoin software still appears to be open!
+ Close it right now
+ before clicking "Ok." The BitTorrent engine will start
+ as soon as you do."""), QMessageBox.Ok)
+ else:
+ QMessageBox.warning(self, tr('Synchronize'), tr("""
+ You chose to finish synchronizing with the network using
+ the Bitcoin software which is already running. Please close
+ Armory until it is finished. If you are running Bitcoin-Qt,
+ you will see a green checkmark in the bottom-right corner,
+ when it is time to open Armory again."""), QMessageBox.Ok)
+ return False
+
+ return True
+
+
+ ############################################################################
+ def findTorrentFileForSDM(self, forceWaitTime=0):
+ """
+ Hopefully the announcement fetcher has already gotten one for us,
+ or at least we have a default.
+ """
+
+ # Only do an explicit announce check if we have no bootstrap at all
+ # (don't need to spend time doing an explicit check if we have one)
+ if self.announceFetcher.getFileModTime('bootstrap') == 0:
+ if forceWaitTime>0:
+ self.explicitCheckAnnouncements(forceWaitTime)
+
+ # If it's still not there, look for a default file
+ if self.announceFetcher.getFileModTime('bootstrap') == 0:
+ LOGERROR('Could not get announce bootstrap; using default')
+ srcTorrent = os.path.join(GetExecDir(), '../default_bootstrap.torrent')
+ else:
+ srcTorrent = self.announceFetcher.getAnnounceFilePath('bootstrap')
+
+ # Maybe we still don't have a torrent for some reason
+ if not srcTorrent or not os.path.exists(srcTorrent):
+ return ''
+
+ torrentPath = os.path.join(ARMORY_HOME_DIR, 'bootstrap.dat.torrent')
+ LOGINFO('Using torrent file: ' + torrentPath)
+ shutil.copy(srcTorrent, torrentPath)
+
+ return torrentPath
+
+
+
+
############################################################################
def startBitcoindIfNecessary(self):
@@ -1382,29 +1970,39 @@ def startBitcoindIfNecessary(self):
LOGWARN('Not online, will not start bitcoind')
return False
- if not self.doManageSatoshi:
+ if not self.doAutoBitcoind:
LOGWARN('Tried to start bitcoind, but ManageSatoshi==False')
return False
if satoshiIsAvailable():
LOGWARN('Tried to start bitcoind, but satoshi already running')
return False
-
- self.setSatoshiPaths()
+ self.setSatoshiPaths()
TheSDM.setDisabled(False)
+
+ torrentIsDisabled = self.getSettingOrSetDefault('DisableTorrent', False)
+
+ # Give the SDM the torrent file...it will use it if it makes sense
+ if not torrentIsDisabled and TheSDM.shouldTryBootstrapTorrent():
+ torrentFile = self.findTorrentFileForSDM(2)
+ if not torrentFile or not os.path.exists(torrentFile):
+ LOGERROR('Could not find torrent file')
+ else:
+ TheSDM.tryToSetupTorrentDL(torrentFile)
+
+
try:
# "satexe" is actually just the install directory, not the direct
# path the executable. That dir tree will be searched for bitcoind
- TheSDM.setupSDM(None, self.satoshiHomePath, \
- extraExeSearch=self.satoshiExeSearchPath)
+ TheSDM.setupSDM(extraExeSearch=self.satoshiExeSearchPath)
TheSDM.startBitcoind()
LOGDEBUG('Bitcoind started without error')
return True
except:
LOGEXCEPT('Failed to setup SDM')
self.switchNetworkMode(NETWORKMODE.Offline)
-
+
############################################################################
def setSatoshiPaths(self):
@@ -1419,16 +2017,20 @@ def setSatoshiPaths(self):
else:
self.satoshiExeSearchPath = []
-
+
self.satoshiHomePath = BTC_HOME_DIR
if self.settings.hasSetting('SatoshiDatadir') and \
CLI_OPTIONS.satoshiHome=='DEFAULT':
# Setting override BTC_HOME_DIR only if it wasn't explicitly
- # set as the command line.
+ # set as the command line.
self.satoshiHomePath = self.settings.get('SatoshiDatadir')
-
+ LOGINFO('Setting satoshi datadir = %s' % self.satoshiHomePath)
+
TheBDM.setSatoshiDir(self.satoshiHomePath)
-
+ TheSDM.setSatoshiDir(self.satoshiHomePath)
+ TheTDM.setSatoshiDir(self.satoshiHomePath)
+
+
############################################################################
def loadBlockchainIfNecessary(self):
LOGINFO('loadBlockchainIfNecessary')
@@ -1453,7 +2055,7 @@ def loadBlockchainIfNecessary(self):
else:
self.switchNetworkMode(NETWORKMODE.Offline)
TheBDM.setOnlineMode(False, wait=False)
-
+
@@ -1473,7 +2075,7 @@ def onlineModeIsPossible(self):
def bitcoindIsAvailable(self):
return satoshiIsAvailable('127.0.0.1', BITCOIN_PORT)
-
+
#############################################################################
def switchNetworkMode(self, newMode):
@@ -1483,7 +2085,7 @@ def switchNetworkMode(self, newMode):
self.NetworkingFactory = FakeClientFactory()
return
elif newMode==NETWORKMODE.Full:
-
+
# Actually setup the networking, now
from twisted.internet import reactor
@@ -1493,8 +2095,8 @@ def showOfflineMsg():
self.lblArmoryStatus.setText( \
'Disconnected' % htmlColor('TextWarn'))
if not self.getSettingOrSetDefault('NotifyDiscon', not OS_MACOSX):
- return
-
+ return
+
try:
self.sysTray.showMessage('Disconnected', \
'Connection to Bitcoin-Qt client lost! Armory cannot send \n'
@@ -1509,11 +2111,11 @@ def showOnlineMsg():
self.netMode = NETWORKMODE.Full
self.setDashboardDetails()
self.lblArmoryStatus.setText(\
- 'Connected (%s blocks) ' %
+ 'Connected (%s blocks) ' %
(htmlColor('TextGreen'), self.currBlockNum))
if not self.getSettingOrSetDefault('NotifyReconn', not OS_MACOSX):
return
-
+
try:
if self.connectCount>0:
self.sysTray.showMessage('Connected', \
@@ -1522,9 +2124,10 @@ def showOnlineMsg():
self.connectCount += 1
except:
LOGEXCEPT('Failed to show reconnect notification')
-
-
+
+
self.NetworkingFactory = ArmoryClientFactory( \
+ TheBDM,
func_loseConnect=showOfflineMsg, \
func_madeConnect=showOnlineMsg, \
func_newTx=self.newTxFunc)
@@ -1532,7 +2135,7 @@ def showOnlineMsg():
reactor.callWhenRunning(reactor.connectTCP, '127.0.0.1', \
BITCOIN_PORT, self.NetworkingFactory)
-
+
#############################################################################
@@ -1556,7 +2159,7 @@ def parseUriLink(self, uriStr, clickOrEnter='click'):
uriDict = parseBitcoinURI(uriStr)
if TheBDM.getBDMState() in ('Offline','Uninitialized'):
LOGERROR('%sed "bitcoin:" link in offline mode.' % ClickOrEnter)
- self.bringArmoryToFront()
+ self.bringArmoryToFront()
QMessageBox.warning(self, 'Offline Mode',
'You %sed on a "bitcoin:" link, but Armory is in '
'offline mode, and is not capable of creating transactions. '
@@ -1564,7 +2167,7 @@ def parseUriLink(self, uriStr, clickOrEnter='click'):
'to the Bitcoin network!' % (clickOrEnter, ClickOrEnter), \
QMessageBox.Ok)
return {}
-
+
if len(uriDict)==0:
warnMsg = ('It looks like you just %sed a "bitcoin:" link, but '
'that link is malformed. ' % clickOrEnter)
@@ -1619,25 +2222,34 @@ def parseUriLink(self, uriStr, clickOrEnter='click'):
#############################################################################
def uriLinkClicked(self, uriStr):
LOGINFO('uriLinkClicked')
- if not TheBDM.getBDMState()=='BlockchainReady':
+ if TheBDM.getBDMState()=='Offline':
QMessageBox.warning(self, 'Offline', \
- 'You just clicked on a "bitcoin:" link, but Armory is offline '
+ 'You just clicked on a "bitcoin:" link, but Armory is offline '
'and cannot send transactions. Please click the link '
'again when Armory is online.', \
QMessageBox.Ok)
return
+ elif not TheBDM.getBDMState()=='BlockchainReady':
+ # BDM isnt ready yet, saved URI strings in the delayed URIDict to
+ # call later through finishLoadBlockChain
+ qLen = self.delayedURIData['qLen']
+
+ self.delayedURIData[qLen] = uriStr
+ qLen = qLen +1
+ self.delayedURIData['qLen'] = qLen
+ return
uriDict = self.parseUriLink(uriStr, 'click')
-
+
if len(uriDict)>0:
- self.bringArmoryToFront()
+ self.bringArmoryToFront()
return self.uriSendBitcoins(uriDict)
-
+
#############################################################################
+ @TimeThisFunction
def loadWalletsAndSettings(self):
LOGINFO('loadWalletsAndSettings')
- TimerStart('loadWltSettings')
self.getSettingOrSetDefault('First_Load', True)
self.getSettingOrSetDefault('Load_Count', 0)
@@ -1658,7 +2270,7 @@ def loadWalletsAndSettings(self):
self.writeSetting('Load_Count', (self.settings.get('Load_Count')+1) % 100)
firstDate = self.getSettingOrSetDefault('First_Load_Date', RightNow())
daysSinceFirst = (RightNow() - firstDate) / (60*60*24)
-
+
# Set the usermode, default to standard
self.usermode = USERMODE.Standard
@@ -1667,14 +2279,26 @@ def loadWalletsAndSettings(self):
elif self.settings.get('User_Mode') == 'Expert':
self.usermode = USERMODE.Expert
+
+ # The user may have asked to never be notified of a particular
+ # notification again. We have a short-term list (wiped on every
+ # load), and a long-term list (saved in settings). We simply
+ # initialize the short-term list with the long-term list, and add
+ # short-term ignore requests to it
+ notifyStr = self.getSettingOrSetDefault('NotifyIgnore', '')
+ nsz = len(notifyStr)
+ self.notifyIgnoreLong = set(notifyStr[8*i:8*(i+1)] for i in range(nsz/8))
+ self.notifyIgnoreShort = set(notifyStr[8*i:8*(i+1)] for i in range(nsz/8))
+
+
# Load wallets found in the .armory directory
- wltPaths = self.settings.get('Other_Wallets', expectList=True)
+ wltPaths = []
self.walletMap = {}
- self.walletIndices = {}
+ self.walletIndices = {}
self.walletIDSet = set()
# I need some linear lists for accessing by index
- self.walletIDList = []
+ self.walletIDList = []
self.combinedLedger = []
self.ledgerSize = 0
self.ledgerTable = []
@@ -1688,7 +2312,7 @@ def loadWalletsAndSettings(self):
fullPath = os.path.join(ARMORY_HOME_DIR, f)
if os.path.isfile(fullPath) and not fullPath.endswith('backup.wallet'):
openfile = open(fullPath, 'rb')
- first8 = openfile.read(8)
+ first8 = openfile.read(8)
openfile.close()
if first8=='\xbaWALLET\x00':
wltPaths.append(fullPath)
@@ -1725,12 +2349,13 @@ def loadWalletsAndSettings(self):
# Maintain some linear lists of wallet info
self.walletIDSet.add(wltID)
self.walletIDList.append(wltID)
+ wltLoad.mainWnd = self
except:
LOGEXCEPT( '***WARNING: Wallet could not be loaded: %s (skipping)', fpath)
raise
-
-
+
+
LOGINFO('Number of wallets read in: %d', len(self.walletMap))
for wltID, wlt in self.walletMap.iteritems():
dispStr = (' Wallet (%s):' % wlt.uniqueIDB58).ljust(25)
@@ -1739,6 +2364,7 @@ def loadWalletsAndSettings(self):
LOGINFO(dispStr)
# Register all wallets with TheBDM
TheBDM.registerWallet( wlt.cppWallet )
+ TheBDM.bdm.registerWallet(wlt.cppWallet)
# Get the last directory
@@ -1748,7 +2374,6 @@ def loadWalletsAndSettings(self):
self.lastDirectory = savedDir
self.writeSetting('LastDirectory', savedDir)
- TimerStop('loadWltSettings')
#############################################################################
def getFileSave(self, title='Save Wallet File', \
@@ -1761,49 +2386,55 @@ def getFileSave(self, title='Save Wallet File', \
if not defaultFilename==None:
startPath = os.path.join(startPath, defaultFilename)
-
+
types = ffilter
types.append('All files (*)')
typesStr = ';; '.join(types)
# Found a bug with Swig+Threading+PyQt+OSX -- save/load file dialogs freeze
- # User picobit discovered this is avoided if you use the Qt dialogs, instead
+ # User picobit discovered this is avoided if you use the Qt dialogs, instead
# of the native OS dialogs. Use native for all except OSX...
if not OS_MACOSX:
fullPath = unicode(QFileDialog.getSaveFileName(self, title, startPath, typesStr))
else:
fullPath = unicode(QFileDialog.getSaveFileName(self, title, startPath, typesStr,
options=QFileDialog.DontUseNativeDialog))
-
+
fdir,fname = os.path.split(fullPath)
if fdir:
self.writeSetting('LastDirectory', fdir)
return fullPath
-
+
#############################################################################
- def getFileLoad(self, title='Load Wallet File', ffilter=['Wallet files (*.wallet)']):
+ def getFileLoad(self, title='Load Wallet File', \
+ ffilter=['Wallet files (*.wallet)'], \
+ defaultDir=None):
+
LOGDEBUG('getFileLoad')
- lastDir = self.settings.get('LastDirectory')
- if len(lastDir)==0 or not os.path.exists(lastDir):
- lastDir = ARMORY_HOME_DIR
+
+ if defaultDir is None:
+ defaultDir = self.settings.get('LastDirectory')
+ if len(defaultDir)==0 or not os.path.exists(defaultDir):
+ defaultDir = ARMORY_HOME_DIR
+
types = list(ffilter)
types.append(tr('All files (*)'))
typesStr = ';; '.join(types)
# Found a bug with Swig+Threading+PyQt+OSX -- save/load file dialogs freeze
- # User picobit discovered this is avoided if you use the Qt dialogs, instead
+ # User picobit discovered this is avoided if you use the Qt dialogs, instead
# of the native OS dialogs. Use native for all except OSX...
if not OS_MACOSX:
- fullPath = unicode(QFileDialog.getOpenFileName(self, title, lastDir, typesStr))
+ fullPath = unicode(QFileDialog.getOpenFileName(self, title, defaultDir, typesStr))
else:
- fullPath = unicode(QFileDialog.getOpenFileName(self, title, lastDir, typesStr, \
+ fullPath = unicode(QFileDialog.getOpenFileName(self, title, defaultDir, typesStr, \
options=QFileDialog.DontUseNativeDialog))
self.writeSetting('LastDirectory', os.path.split(fullPath)[0])
return fullPath
-
+
##############################################################################
def getWltSetting(self, wltID, propName):
# Sometimes we need to settings specific to individual wallets -- we will
@@ -1827,8 +2458,8 @@ def toggleIsMine(self, wltID):
self.setWltSetting(wltID, 'IsMine', False)
else:
self.setWltSetting(wltID, 'IsMine', True)
-
-
+
+
#############################################################################
@@ -1852,7 +2483,7 @@ def writeSetting(self, settingName, val):
def startRescanBlockchain(self, forceFullScan=False):
if TheBDM.getBDMState() in ('Offline','Uninitialized'):
LOGWARN('Rescan requested but Armory is in offline mode')
- return
+ return
if TheBDM.getBDMState()=='Scanning':
LOGINFO('Queueing rescan after current scan completes.')
@@ -1868,6 +2499,8 @@ def startRescanBlockchain(self, forceFullScan=False):
#############################################################################
def forceRescanDB(self):
self.needUpdateAfterScan = True
+ self.lblDashModeBuild.setText( 'Build Databases', \
+ size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( 'Scanning Transaction History', \
size=4, bold=True, color='Foreground')
TheBDM.rescanBlockchain('ForceRescan', wait=False)
@@ -1876,8 +2509,10 @@ def forceRescanDB(self):
#############################################################################
def forceRebuildAndRescan(self):
self.needUpdateAfterScan = True
- self.lblDashModeScan.setText( 'Preparing Databases', \
+ self.lblDashModeBuild.setText( 'Preparing Databases', \
size=4, bold=True, color='Foreground')
+ self.lblDashModeScan.setText( 'Scan Transaction History', \
+ size=4, bold=True, color='DisableFG')
#self.resetBdmBeforeScan() # this resets BDM and then re-registeres wlts
TheBDM.rescanBlockchain('ForceRebuild', wait=False)
self.setDashboardDetails()
@@ -1887,50 +2522,83 @@ def forceRebuildAndRescan(self):
#############################################################################
+ @TimeThisFunction
+ def initialWalletSync(self):
+ for wltID in self.walletMap.iterkeys():
+ LOGINFO('Syncing wallet: %s', wltID)
+ self.walletMap[wltID].setBlockchainSyncFlag(BLOCKCHAIN_READONLY)
+ # Used to do "sync-lite" when we had to rescan for new addresses,
+ self.walletMap[wltID].syncWithBlockchainLite(0)
+ #self.walletMap[wltID].syncWithBlockchain(0)
+ self.walletMap[wltID].detectHighestUsedIndex(True) # expand wlt if necessary
+ self.walletMap[wltID].fillAddressPool()
+
+ @TimeThisFunction
def finishLoadBlockchain(self):
-
- TimerStart('finishLoadBlockchain')
# Now that the blockchain is loaded, let's populate the wallet info
if TheBDM.isInitialized():
+
+ #for wltID in self.walletMap.iterkeys():
+ # TheBDM.bdm.unregisterWallet(self.walletMap[wltID].cppWallet)
self.currBlockNum = TheBDM.getTopBlockHeight()
self.setDashboardDetails()
if not self.memPoolInit:
- mempoolfile = os.path.join(ARMORY_HOME_DIR,'mempool.bin')
- clearpoolfile = os.path.join(ARMORY_HOME_DIR,'clearmempool.txt')
+ mempoolfile = os.path.join(ARMORY_HOME_DIR,'mempool.bin')
+ clearpoolfile = os.path.join(ARMORY_HOME_DIR,'clearmempool.flag')
if os.path.exists(clearpoolfile):
- LOGINFO('clearmempool.txt found. Clearing memory pool')
+ LOGINFO('clearmempool.flag found. Clearing memory pool')
os.remove(clearpoolfile)
if os.path.exists(mempoolfile):
os.remove(mempoolfile)
- else:
+ else:
self.checkMemoryPoolCorruption(mempoolfile)
TheBDM.enableZeroConf(mempoolfile)
self.memPoolInit = True
- TimerStart('initialWalletSync')
for wltID in self.walletMap.iterkeys():
LOGINFO('Syncing wallet: %s', wltID)
self.walletMap[wltID].setBlockchainSyncFlag(BLOCKCHAIN_READONLY)
self.walletMap[wltID].syncWithBlockchainLite(0)
self.walletMap[wltID].detectHighestUsedIndex(True) # expand wlt if necessary
self.walletMap[wltID].fillAddressPool()
- TimerStop('initialWalletSync')
-
self.createCombinedLedger()
self.ledgerSize = len(self.combinedLedger)
- self.statusBar().showMessage('Blockchain loaded, wallets sync\'d!', 10000)
+ self.statusBar().showMessage('Blockchain loaded, wallets sync\'d!', 10000)
if self.netMode==NETWORKMODE.Full:
LOGINFO('Current block number: %d', self.currBlockNum)
self.lblArmoryStatus.setText(\
- 'Connected (%s blocks) ' %
+ 'Connected (%s blocks) ' %
(htmlColor('TextGreen'), self.currBlockNum))
- self.blkReceived = self.getSettingOrSetDefault('LastBlkRecvTime', 0)
+
+ self.blkReceived = TheBDM.getTopBlockHeader().getTimestamp()
+ self.writeSetting('LastBlkRecv', self.currBlockNum)
+ self.writeSetting('LastBlkRecvTime', self.blkReceived)
currSyncSuccess = self.getSettingOrSetDefault("SyncSuccessCount", 0)
self.writeSetting('SyncSuccessCount', min(currSyncSuccess+1, 10))
+
+ vectMissingBlks = TheBDM.missingBlockHashes()
+ LOGINFO('Blockfile corruption check: Missing blocks: %d', len(vectMissingBlks))
+ if len(vectMissingBlks) > 0:
+ LOGINFO('Missing blocks: %d', len(vectMissingBlks))
+ QMessageBox.critical(self, tr('Blockdata Error'), tr("""
+ Armory has detected an error in the blockchain database
+ maintained by the third-party Bitcoin software (Bitcoin-Qt
+ or bitcoind). This error is not fatal, but may lead to
+ incorrect balances, inability to send coins, or application
+ instability.
+
+ It is unlikely that the error affects your wallets,
+ but it is possible. If you experience crashing,
+ or see incorrect balances on any wallets, it is strongly
+ recommended you re-download the blockchain using:
+ "Help"\xe2\x86\x92"Factory Reset"."""), \
+ QMessageBox.Ok)
+
+
if self.getSettingOrSetDefault('NotifyBlkFinish',True):
reply,remember = MsgBoxWithDNAA(MSGBOX.Info, \
'Blockchain Loaded!', 'Blockchain loading is complete. '
@@ -1938,29 +2606,43 @@ def finishLoadBlockchain(self):
'under the "Transactions" tab. You can also send and '
'receive bitcoins.', \
dnaaMsg='Do not show me this notification again ', yesStr='OK')
-
+
if remember==True:
self.writeSetting('NotifyBlkFinish',False)
- else:
- self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Transactions)
-
+ self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Ledger)
+
+
self.netMode = NETWORKMODE.Full
self.settings.set('FailedLoadCount', 0)
else:
self.statusBar().showMessage('! Blockchain loading failed !', 10000)
-
-
+
+
# This will force the table to refresh with new data
self.setDashboardDetails()
+ self.updateAnnounceTab() # make sure satoshi version info is up to date
+ self.removeBootstrapDat() # if we got here, we're *really* done with it
self.walletModel.reset()
-
- TimerStop('finishLoadBlockchain')
+
+ qLen = self.delayedURIData['qLen']
+ if qLen > 0:
+ #delayed URI parses, feed them back to the uri parser now
+ for i in range(0, qLen):
+ uriStr = self.delayedURIData[qLen-i-1]
+ self.delayedURIData['qLen'] = qLen -i -1
+ self.uriLinkClicked(uriStr)
+ #############################################################################
+ def removeBootstrapDat(self):
+ bfile = os.path.join(BTC_HOME_DIR, 'bootstrap.dat.old')
+ if os.path.exists(bfile):
+ os.remove(bfile)
+
#############################################################################
def checkMemoryPoolCorruption(self, mempoolname):
- if not os.path.exists(mempoolname):
+ if not os.path.exists(mempoolname):
return
memfile = open(mempoolname, 'rb')
@@ -1975,15 +2657,13 @@ def checkMemoryPoolCorruption(self, mempoolname):
except:
os.remove(mempoolname);
LOGWARN('Memory pool file was corrupt. Deleted. (no further action is needed)')
-
-
#############################################################################
def changeLedgerSorting(self, col, order):
"""
The direct sorting was implemented to avoid having to search for comment
information for every ledger entry. Therefore, you can't sort by comments
- without getting them first, which is the original problem to avoid.
+ without getting them first, which is the original problem to avoid.
"""
if col in (LEDGERCOLS.NumConf, LEDGERCOLS.DateStr, \
LEDGERCOLS.Comment, LEDGERCOLS.Amount, LEDGERCOLS.WltName):
@@ -1991,16 +2671,13 @@ def changeLedgerSorting(self, col, order):
self.sortLedgOrder = order
self.createCombinedLedger()
-
#############################################################################
+ @TimeThisFunction
def createCombinedLedger(self, wltIDList=None, withZeroConf=True):
"""
Create a ledger to display on the main screen, that consists of ledger
entries of any SUBSET of available wallets.
"""
-
- TimerStart('createCombinedLedger')
-
start = RightNow()
if wltIDList==None:
# Create a list of [wltID, type] pairs
@@ -2017,7 +2694,7 @@ def createCombinedLedger(self, wltIDList=None, withZeroConf=True):
listWatching = [t[0] for t in filter(lambda x: x[1]==WLTTYPES.WatchOnly, typelist)]
listCrypt = [t[0] for t in filter(lambda x: x[1]==WLTTYPES.Crypt, typelist)]
listPlain = [t[0] for t in filter(lambda x: x[1]==WLTTYPES.Plain, typelist)]
-
+
if currIdx==0:
wltIDList = listOffline + listCrypt + listPlain
elif currIdx==1:
@@ -2028,12 +2705,11 @@ def createCombinedLedger(self, wltIDList=None, withZeroConf=True):
wltIDList = self.walletIDList
else:
pass
- #raise WalletExistsError, 'Bad combo-box selection: ' + str(currIdx)
+ #raise WalletExistsError('Bad combo-box selection: ' + str(currIdx))
self.writeSetting('LastFilterState', currIdx)
-
+
if wltIDList==None:
- TimerStop('createCombinedLedger')
return
self.combinedLedger = []
@@ -2073,7 +2749,7 @@ def createCombinedLedger(self, wltIDList=None, withZeroConf=True):
self.lblLedgRange.setText('%d to %d' % (self.currLedgMin, self.currLedgMax))
self.lblLedgTotal.setText('(of %d)' % self.ledgerSize)
- # Many MainWindow objects haven't been created yet...
+ # Many MainWindow objects haven't been created yet...
# let's try to update them and fail silently if they don't exist
try:
if TheBDM.getBDMState() in ('Offline', 'Scanning'):
@@ -2081,7 +2757,7 @@ def createCombinedLedger(self, wltIDList=None, withZeroConf=True):
self.lblSpendFunds.setText( '-'*12 )
self.lblUnconfFunds.setText('-'*12 )
return
-
+
uncolor = htmlColor('MoneyNeg') if unconfFunds>0 else htmlColor('Foreground')
btccolor = htmlColor('DisableFG') if spendFunds==totalFunds else htmlColor('MoneyPos')
lblcolor = htmlColor('DisableFG') if spendFunds==totalFunds else htmlColor('Foreground')
@@ -2101,20 +2777,13 @@ def createCombinedLedger(self, wltIDList=None, withZeroConf=True):
except AttributeError:
raise
- finally:
- TimerStop('createCombinedLedger')
-
-
-
#############################################################################
+ @TimeThisFunction
def convertLedgerToTable(self, ledger):
-
- TimerStart('convertLedgerTbl')
-
table2D = []
datefmt = self.getPreferredDateFormat()
- for wltID,le in ledger:
+ for wltID,le in ledger:
row = []
wlt = self.walletMap[wltID]
@@ -2129,75 +2798,56 @@ def convertLedgerToTable(self, ledger):
#amt += self.getFeeForTx(le.getTxHash())
# If this was sent-to-self... we should display the actual specified
- # value when the transaction was executed. This is pretty difficult
+ # value when the transaction was executed. This is pretty difficult
# when both "recipient" and "change" are indistinguishable... but
# They're actually not because we ALWAYS generate a new address to
- # for change , which means the change address MUST have a higher
+ # for change , which means the change address MUST have a higher
# chain index
if le.isSentToSelf():
amt = determineSentToSelfAmt(le, wlt)[0]
-
-
if le.getBlockNum() >= 0xffffffff: nConf = 0
# NumConf
row.append(nConf)
-
# UnixTime (needed for sorting)
row.append(le.getTxTime())
-
# Date
row.append(unixTimeToFormatStr(le.getTxTime(), datefmt))
-
# TxDir (actually just the amt... use the sign of the amt to determine dir)
row.append(coin2str(le.getValue(), maxZeros=2))
-
# Wlt Name
row.append(self.walletMap[wltID].labelName)
-
# Comment
row.append(self.getCommentForLE(wltID, le))
-
# Amount
row.append(coin2str(amt, maxZeros=2))
-
# Is this money mine?
row.append( determineWalletType(wlt, self)[0]==WLTTYPES.WatchOnly)
-
# WltID
row.append( wltID )
-
# TxHash
row.append( binary_to_hex(le.getTxHash() ))
-
# Is this a coinbase/generation transaction
row.append( le.isCoinbase() )
-
# Sent-to-self
row.append( le.isSentToSelf() )
-
# Tx was invalidated! (double=spend!)
row.append( not le.isValid())
-
# Finally, attach the row to the table
table2D.append(row)
-
- TimerStop('convertLedgerTbl')
-
return table2D
-
+
#############################################################################
+ @TimeThisFunction
def walletListChanged(self):
- TimerStart('wltListChanged')
self.walletModel.reset()
self.populateLedgerComboBox()
self.createCombinedLedger()
- TimerStop('wltListChanged')
#############################################################################
+ @TimeThisFunction
def populateLedgerComboBox(self):
- TimerStart('populateLedgerCombo')
self.comboWltSelect.clear()
self.comboWltSelect.addItem( 'My Wallets' )
self.comboWltSelect.addItem( 'Offline Wallets' )
@@ -2209,8 +2859,6 @@ def populateLedgerComboBox(self):
self.comboWltSelect.insertSeparator(4)
comboIdx = self.getSettingOrSetDefault('LastFilterState', 0)
self.comboWltSelect.setCurrentIndex(comboIdx)
- TimerStop('populateLedgerCombo')
-
#############################################################################
def execDlgWalletDetails(self, index=None):
@@ -2219,7 +2867,7 @@ def execDlgWalletDetails(self, index=None):
'You currently do not have any wallets. Would you like to '
'create one, now?', QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
- self.createNewWallet(initLabel='Primary Wallet')
+ self.startWalletWizard()
return
if index==None:
@@ -2233,14 +2881,14 @@ def execDlgWalletDetails(self, index=None):
QMessageBox.Ok)
return
index = index[0]
-
+
wlt = self.walletMap[self.walletIDList[index.row()]]
dialog = DlgWalletDetails(wlt, self.usermode, self, self)
dialog.exec_()
#self.walletListChanged()
-
-
-
+
+
+
#############################################################################
def updateTxCommentFromView(self, view):
index = view.selectedIndexes()[0]
@@ -2266,34 +2914,35 @@ def updateAddressCommentFromView(self, view, wlt):
dialog = DlgSetComment(currComment, 'Address', self, self)
if dialog.exec_():
newComment = str(dialog.edtComment.text())
- addr160 = addrStr_to_hash160(addrStr)
+ atype, addr160 = addrStr_to_hash160(addrStr)
+ if atype==P2SHBYTE:
+ LOGWARN('Setting comment for P2SH address: %s' % addrStr)
wlt.setComment(addr160, newComment)
#############################################################################
+ @TimeThisFunction
def getAddrCommentIfAvailAll(self, txHash):
- TimerStart('getAddrCommentIfAvail')
if not TheBDM.isInitialized():
- TimerStop('getAddrCommentIfAvail')
return ''
else:
-
+
appendedComments = []
for wltID,wlt in self.walletMap.iteritems():
cmt = wlt.getAddrCommentIfAvail(txHash)
if len(cmt)>0:
appendedComments.append(cmt)
-
+
return '; '.join(appendedComments)
-
+
#############################################################################
def getCommentForLE(self, wltID, le):
- # Smart comments for LedgerEntry objects: get any direct comments ...
+ # Smart comments for LedgerEntry objects: get any direct comments ...
# if none, then grab the one for any associated addresses.
-
+
return self.walletMap[wltID].getCommentForLE(le)
"""
txHash = le.getTxHash()
@@ -2319,7 +2968,7 @@ def addWalletToApplication(self, newWallet, walletIsNew=True):
if self.walletMap.has_key(newWltID):
return
-
+
self.walletMap[newWltID] = newWallet
self.walletIndices[newWltID] = len(self.walletMap)-1
@@ -2330,8 +2979,9 @@ def addWalletToApplication(self, newWallet, walletIsNew=True):
ledger = []
wlt = self.walletMap[newWltID]
self.walletListChanged()
+ self.mainWnd = self
+
-
#############################################################################
def removeWalletFromApplication(self, wltID):
LOGINFO('removeWalletFromApplication')
@@ -2353,78 +3003,9 @@ def removeWalletFromApplication(self, wltID):
self.walletListChanged()
-
#############################################################################
- def createNewWallet(self, initLabel=''):
- LOGINFO('createNewWallet')
- dlg = DlgNewWallet(self, self, initLabel=initLabel)
- if dlg.exec_():
-
- if dlg.selectedImport:
- self.execImportWallet()
- return
-
- name = str(dlg.edtName.text())
- descr = str(dlg.edtDescr.toPlainText())
- kdfSec = dlg.kdfSec
- kdfBytes = dlg.kdfBytes
-
- # If this will be encrypted, we'll need to get their passphrase
- passwd = []
- if dlg.chkUseCrypto.isChecked():
- dlgPasswd = DlgChangePassphrase(self, self)
- if dlgPasswd.exec_():
- passwd = SecureBinaryData(str(dlgPasswd.edtPasswd1.text()))
- else:
- return # no passphrase == abort new wallet
- else:
- return False
-
- newWallet = None
- if passwd:
- newWallet = PyBtcWallet().createNewWallet( \
- withEncrypt=True, \
- securePassphrase=passwd, \
- kdfTargSec=kdfSec, \
- kdfMaxMem=kdfBytes, \
- shortLabel=name, \
- longLabel=descr, \
- doRegisterWithBDM=False)
- else:
- newWallet = PyBtcWallet().createNewWallet( \
- withEncrypt=False, \
- shortLabel=name, \
- longLabel=descr, \
- doRegisterWithBDM=False)
-
-
- # And we must unlock it before the first fillAddressPool call
- if newWallet.useEncryption:
- newWallet.unlock(securePassphrase=passwd)
-
- # We always want to fill the address pool, right away.
- fillpool = lambda: newWallet.fillAddressPool(doRegister=False)
- DlgExecLongProcess(fillpool, 'Creating Wallet...', self, self).exec_()
-
- # Reopening from file helps make sure everything is correct -- don't
- # let the user use a wallet that triggers errors on reading it
- wltpath = newWallet.walletPath
- newWallet = None
- newWallet = PyBtcWallet().readWalletFile(wltpath)
-
-
- self.addWalletToApplication(newWallet, walletIsNew=True)
-
- if TheBDM.getBDMState() in ('Uninitialized', 'Offline'):
- TheBDM.registerWallet(newWallet, isFresh=True, wait=False)
- else:
- self.newWalletList.append([newWallet, True])
-
- # Prompt user to print paper backup if they requested it.
- if dlg.chkPrintPaper.isChecked():
- OpenPaperBackupWindow('Single', self, self, newWallet, \
- tr("Create Paper Backup"))
-
+ def RecoverWallet(self):
+ DlgWltRecoverWallet(self, self).promptWalletRecovery()
#############################################################################
@@ -2432,7 +3013,7 @@ def createSweepAddrTx(self, a160ToSweepList, sweepTo160, forceZeroFee=False):
"""
This method takes a list of addresses (likely just created from private
key data), finds all their unspent TxOuts, and creates a signed tx that
- transfers 100% of the funds to the sweepTO160 address. It doesn't
+ transfers 100% of the funds to the sweepTO160 address. It doesn't
actually execute the transaction, but it will return a broadcast-ready
PyTx object that the user can confirm. TxFee is automatically calc'd
and deducted from the output value, if necessary.
@@ -2446,18 +3027,19 @@ def createSweepAddrTx(self, a160ToSweepList, sweepTo160, forceZeroFee=False):
utxoList = getUnspentTxOutsForAddr160List(addr160List, 'Sweep', 0)
if len(utxoList)==0:
return [None, 0, 0]
-
+
outValue = sumTxOutList(utxoList)
inputSide = []
for utxo in utxoList:
# The PyCreateAndSignTx method require PyTx and PyBtcAddress objects
- CppPrevTx = TheBDM.getTxByHash(utxo.getTxHash())
+ CppPrevTx = TheBDM.getTxByHash(utxo.getTxHash())
PyPrevTx = PyTx().unserialize(CppPrevTx.serialize())
addr160 = CheckHash160(utxo.getRecipientScrAddr())
inputSide.append([getAddr(addr160), PyPrevTx, utxo.getTxOutIndex()])
- minFee = calcMinSuggestedFees(utxoList, outValue, 0)[1]
+ # Try with zero fee and exactly one output
+ minFee = calcMinSuggestedFees(utxoList, outValue, 0, 1)[1]
if minFee > 0 and \
not forceZeroFee and \
@@ -2469,13 +3051,14 @@ def createSweepAddrTx(self, a160ToSweepList, sweepTo160, forceZeroFee=False):
return [None, outValue, minFee]
outputSide = []
- outputSide.append( [PyBtcAddress().createFromPublicKeyHash160(sweepTo160), outValue] )
+ outputSide.append( [PyBtcAddress().createFromPublicKeyHash160(sweepTo160), \
+ outValue] )
pytx = PyCreateAndSignTx(inputSide, outputSide)
return (pytx, outValue, minFee)
-
+
#############################################################################
@@ -2529,7 +3112,7 @@ def confirmSweepScan(self, pybtcaddrList, targAddr160):
elif TheBDM.getBDMState()=='BlockchainReady':
msgConfirm += ( \
'Would you like to start the scan operation right now?')
-
+
msgConfirm += ('
Clicking "No" will abort the sweep operation')
confirmed = QMessageBox.question(self, 'Confirm Rescan', msgConfirm, \
@@ -2550,12 +3133,12 @@ def confirmSweepScan(self, pybtcaddrList, targAddr160):
def finishSweepScan(self):
LOGINFO('finishSweepScan')
sweepList, self.sweepAfterScanList = self.sweepAfterScanList,[]
-
+
#######################################################################
# The createSweepTx method will return instantly because the blockchain
# has already been rescanned, as described above
finishedTx, outVal, fee = self.createSweepAddrTx(sweepList, self.sweepAfterScanTarg)
-
+
gt1 = len(sweepList)>1
if finishedTx==None:
@@ -2583,13 +3166,13 @@ def finishSweepScan(self):
wltID = self.getWalletForAddr160(self.sweepAfterScanTarg)
wlt = self.walletMap[wltID]
-
+
# Finally, if we got here, we're ready to broadcast!
if gt1:
dispIn = ''
else:
dispIn = 'address %s' % sweepList[0].getAddrStr()
-
+
dispOut = 'wallet "%s" (%s) ' % (wlt.labelName, wlt.uniqueIDB58)
if DlgVerifySweep(dispIn, dispOut, outVal, fee).exec_():
self.broadcastTransaction(finishedTx, dryRun=False)
@@ -2624,8 +3207,8 @@ def broadcastTransaction(self, pytx, dryRun=False):
LOGINFO('Sending Tx, %s', binary_to_hex(newTxHash))
self.NetworkingFactory.sendTx(pytx)
LOGINFO('Transaction sent to Satoshi client...!')
-
-
+
+
def sendGetDataMsg():
msg = PyMessage('getdata')
msg.payload.invList.append( [MSG_INV_TX, newTxHash] )
@@ -2666,12 +3249,12 @@ def checkForTxInBDM():
'
If the transaction did fail, please consider '
'reporting this error the the Armory '
'developers. From the main window, go to '
- '"File"-->"Export Log File" to make a copy of your '
+ '"File"\xe2\x86\x92"Export Log File" to make a copy of your '
'log file to send via email to support@bitcoinarmory.com. ' \
% (searchstr,searchstr[:8]), \
QMessageBox.Ok)
-
- self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Transactions)
+
+ self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Ledger)
reactor.callLater(4, sendGetDataMsg)
reactor.callLater(5, checkForTxInBDM)
@@ -2686,8 +3269,8 @@ def checkForTxInBDM():
#'received. Both issues are a problem with Armory that will be fixed '
#'with the next release.', QMessageBox.Ok)
-
-
+
+
#############################################################################
def warnNoImportWhileScan(self):
extraMsg = ''
@@ -2700,14 +3283,16 @@ def warnNoImportWhileScan(self):
'Wallets and addresses cannot be imported while Armory is in '
'the middle of an existing blockchain scan. Please wait for '
'the scan to finish. ' + extraMsg, QMessageBox.Ok)
-
-
-
+
+
+
#############################################################################
def execImportWallet(self):
sdm = TheSDM.getSDMState()
bdm = TheBDM.getBDMState()
- if sdm in ['BitcoindInitializing','BitcoindSynchronizing'] or \
+ if sdm in ['BitcoindInitializing', \
+ 'BitcoindSynchronizing', \
+ 'TorrentSynchronizing'] or \
bdm in ['Scanning']:
QMessageBox.warning(self, tr('Scanning'), tr("""
Armory is currently in the middle of scanning the blockchain for
@@ -2744,14 +3329,14 @@ def execGetImportWltName(self):
shutil.copy(fn, newpath)
newWlt = PyBtcWallet().readWalletFile(newpath)
newWlt.fillAddressPool()
-
+
self.addWalletToAppAndAskAboutRescan(newWlt)
""" I think the addWalletToAppAndAskAboutRescan replaces this...
if TheBDM.getBDMState() in ('Uninitialized', 'Offline'):
self.addWalletToApplication(newWlt, walletIsNew=False)
return
-
+
if TheBDM.getBDMState()=='BlockchainReady':
doRescanNow = QMessageBox.question(self, 'Rescan Needed', \
'The wallet was imported successfully, but cannot be displayed '
@@ -2769,13 +3354,13 @@ def execGetImportWltName(self):
'The wallet was imported successfully, but its balance cannot '
'be determined until Armory performs a "recovery scan" for the '
'wallet. This scan potentially takes much longer than a regular '
- 'scan, and must be completed for all imported wallets. '
+ 'scan, and must be completed for all imported wallets. '
'
'
'Armory is already in the middle of a scan and cannot be interrupted. '
'Would you like to start the recovery scan when it is done?'
'
'
' If you click "No," the wallet import will be aborted '
- 'and you must re-import the wallet when you '
+ 'and you must re-import the wallet when you '
'are able to wait for the recovery scan.', \
QMessageBox.Yes | QMessageBox.No)
@@ -2790,7 +3375,7 @@ def execGetImportWltName(self):
QMessageBox.warning(self, 'Import Failed', \
'The wallet was not imported.', QMessageBox.Ok)
- # The wallet cannot exist without also being on disk.
+ # The wallet cannot exist without also being on disk.
# If the user aborted, we should remove the disk data.
thepath = newWlt.getWalletPath()
thepathBackup = newWlt.getWalletPath('backup')
@@ -2809,14 +3394,14 @@ def execGetImportWltName(self):
#############################################################################
def addWalletToAppAndAskAboutRescan(self, newWallet):
LOGINFO('Raw import successful.')
-
- # If we are offline, then we can't assume there will ever be a
+
+ # If we are offline, then we can't assume there will ever be a
# rescan. Just add the wallet to the application
if TheBDM.getBDMState() in ('Uninitialized', 'Offline'):
TheBDM.registerWallet(newWallet.cppWallet)
self.addWalletToApplication(newWallet, walletIsNew=False)
return
-
+
""" TODO: Temporarily removed recovery-rescan operations
elif TheBDM.getBDMState()=='BlockchainReady':
doRescanNow = QMessageBox.question(self, 'Rescan Needed', \
@@ -2847,28 +3432,28 @@ def addWalletToAppAndAskAboutRescan(self, newWallet):
if TheBDM.getBDMState()=='BlockchainReady':
doRescanNow = QMessageBox.question(self, tr('Rescan Needed'), \
- tr("""The wallet was restored successfully but its balance
- cannot be displayed until the blockchain is rescanned.
- Armory will need to go into offline mode for 5-20 minutes.
+ tr("""The wallet was restored successfully but its balance
+ cannot be displayed until the blockchain is rescanned.
+ Armory will need to go into offline mode for 5-20 minutes.
- Would you like to do the scan now? Clicking "No" will
+ Would you like to do the scan now? Clicking "No" will
abort the restore/import operation."""), \
QMessageBox.Yes | QMessageBox.No)
else:
doRescanNow = QMessageBox.question(self, tr('Rescan Needed'), \
- tr("""The wallet was restored successfully but its balance
- cannot be displayed until the blockchain is rescanned.
- However, Armory is currently in the middle of a rescan
+ tr("""The wallet was restored successfully but its balance
+ cannot be displayed until the blockchain is rescanned.
+ However, Armory is currently in the middle of a rescan
operation right now. Would you like to start a new scan
as soon as this one is finished?
Clicking "No" will abort adding the wallet to Armory."""), \
QMessageBox.Yes | QMessageBox.No)
-
+
if doRescanNow == QMessageBox.Yes:
LOGINFO('User requested rescan after wallet restore')
- #TheBDM.startWalletRecoveryScan(newWallet)
+ #TheBDM.startWalletRecoveryScan(newWallet)
TheBDM.registerWallet(newWallet.cppWallet)
self.startRescanBlockchain()
self.setDashboardDetails()
@@ -2878,7 +3463,7 @@ def addWalletToAppAndAskAboutRescan(self, newWallet):
'The wallet was not restored. To restore the wallet, reenter '
'the "Restore Wallet" dialog again when you are able to wait '
'for the rescan operation. ', QMessageBox.Ok)
- # The wallet cannot exist without also being on disk.
+ # The wallet cannot exist without also being on disk.
# If the user aborted, we should remove the disk data.
thepath = newWallet.getWalletPath()
thepathBackup = newWallet.getWalletPath('backup')
@@ -2893,38 +3478,12 @@ def addWalletToAppAndAskAboutRescan(self, newWallet):
#############################################################################
def digitalBackupWarning(self):
reply = QMessageBox.warning(self, 'Be Careful!', tr("""
- WARNING: You are about to make an
+ WARNING: You are about to make an
unencrypted backup of your wallet. It is highly recommended
that you do not ever save unencrypted wallets to your regular
hard drive. This feature is intended for saving to a USB key or
other removable media."""), QMessageBox.Ok | QMessageBox.Cancel)
return (reply==QMessageBox.Ok)
-
-
-
- #############################################################################
- def execMigrateSatoshi(self):
- reply = MsgBoxCustom(MSGBOX.Question, 'Wallet Version Warning', \
- 'This wallet migration tool only works with regular Bitcoin wallets '
- 'produced using version 0.5.X and earlier. '
- 'You can determine the version by '
- 'opening the regular Bitcoin client, then choosing "Help"'
- '-->"About Bitcoin-Qt" from the main menu. '
- '
'
- 'If you have used your wallet with any version of the regular '
- 'Bitcoin client 0.6.0 or higher, this tool will fail. '
- 'In fact, it is highly recommended that you do not even attempt '
- 'to use the tool on such wallets until it is officially supported '
- 'by Armory.'
- '
'
- 'Has your wallet ever been opened in the 0.6.0+ Bitcoin-Qt client?', \
- yesStr='Yes, Abort!', noStr='No, Carry On!')
-
- if reply:
- return
-
- DlgMigrateSatoshiWallet(self, self).exec_()
-
#############################################################################
@@ -2961,14 +3520,14 @@ def getUniqueWalletFilename(self, wltPath):
fname='%s_%02d.wallet'%(base, newIndex)
newIndex+=1
if newIndex==99:
- raise WalletExistsError, ('Cannot find unique filename for wallet.'
- 'Too many duplicates!')
+ raise WalletExistsError('Cannot find unique filename for wallet.'
+ 'Too many duplicates!')
return fname
-
+
#############################################################################
def addrViewDblClicked(self, index, wlt):
- uacfv = lambda x: self.main.updateAddressCommentFromView(self.wltAddrView, self.wlt)
+ uacfv = lambda x: self.updateAddressCommentFromView(self.wltAddrView, self.wlt)
#############################################################################
@@ -3005,10 +3564,10 @@ def showLedgerTx(self):
#############################################################################
def showContextMenuLedger(self):
menu = QMenu(self.ledgerView)
-
+
if len(self.ledgerView.selectedIndexes())==0:
return
-
+
actViewTx = menu.addAction("View Details")
actViewBlkChn = menu.addAction("View on www.blockchain.info")
actComment = menu.addAction("Change Comment")
@@ -3028,7 +3587,7 @@ def showContextMenuLedger(self):
elif action==actViewBlkChn:
try:
webbrowser.open(blkchnURL)
- except:
+ except:
LOGEXCEPT('Failed to open webbrowser')
QMessageBox.critical(self, 'Could not open browser', \
'Armory encountered an error opening your web browser. To view '
@@ -3042,11 +3601,21 @@ def showContextMenuLedger(self):
elif action==actComment:
self.updateTxCommentFromView(self.ledgerView)
elif action==actOpenWallet:
- DlgWalletDetails(self.walletMap[wltID], self.usermode, self, self).exec_()
+ DlgWalletDetails(self.getSelectedWallet(), self.usermode, self, self).exec_()
+ #############################################################################
+ def getSelectedWallet(self):
+ wltID = None
+ if len(self.walletMap) > 0:
+ wltID = self.walletMap.keys()[0]
+ wltSelect = self.walletsView.selectedIndexes()
+ if len(wltSelect) > 0:
+ row = wltSelect[0].row()
+ wltID = str(self.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
+ # Starting the send dialog with or without a wallet
+ return None if wltID == None else self.walletMap[wltID]
- #############################################################################
def clickSendBitcoins(self):
if TheBDM.getBDMState() in ('Offline', 'Uninitialized'):
QMessageBox.warning(self, 'Offline Mode', \
@@ -3067,7 +3636,6 @@ def clickSendBitcoins(self):
QMessageBox.Ok)
return
- wltID = None
selectionMade = True
if len(self.walletMap)==0:
reply = QMessageBox.information(self, 'No Wallets!', \
@@ -3075,48 +3643,31 @@ def clickSendBitcoins(self):
'receive some coins. Would you like to create a wallet?', \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
- self.createNewWallet(initLabel='Primary Wallet')
- return
- elif len(self.walletMap)==1:
- wltID = self.walletMap.keys()[0]
+ self.startWalletWizard()
else:
- wltSelect = self.walletsView.selectedIndexes()
- if len(wltSelect)>0:
- row = wltSelect[0].row()
- wltID = str(self.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
- dlg = DlgWalletSelect(self, self, 'Send from Wallet...', firstSelect=wltID, onlyMyWallets=False)
- if dlg.exec_():
- wltID = dlg.selectedID
- else:
- selectionMade = False
+ DlgSendBitcoins(self.getSelectedWallet(), self, self).exec_()
- if selectionMade:
- wlt = self.walletMap[wltID]
- wlttype = determineWalletType(wlt, self)[0]
- dlgSend = DlgSendBitcoins(wlt, self, self)
- dlgSend.exec_()
-
#############################################################################
def uriSendBitcoins(self, uriDict):
# Because Bitcoin-Qt doesn't store the message= field we have to assume
- # that the label field holds the Tx-info. So we concatenate them for
+ # that the label field holds the Tx-info. So we concatenate them for
# the display message
uri_has = lambda s: uriDict.has_key(s)
haveLbl = uri_has('label')
haveMsg = uri_has('message')
- newMsg = ''
+ newMsg = ''
if haveLbl and haveMsg:
newMsg = uriDict['label'] + ': ' + uriDict['message']
elif not haveLbl and haveMsg:
newMsg = uriDict['message']
elif haveLbl and not haveMsg:
newMsg = uriDict['label']
-
+
descrStr = ''
- descrStr = ('You just clicked on a "bitcoin:" link requesting bitcoins '
+ descrStr = ('You just clicked on a "bitcoin:" link requesting bitcoins '
'to be sent to the following address: ')
descrStr += ' --Address:\t%s ' % uriDict['address']
@@ -3141,7 +3692,7 @@ def uriSendBitcoins(self, uriDict):
descrStr += ' --Message:\t%s' % newMsg
uriDict['message'] = newMsg
-
+
if not uri_has('amount'):
descrStr += ('
There is no amount specified in the link, so '
'you can decide the amount after selecting a wallet to use '
@@ -3158,22 +3709,12 @@ def uriSendBitcoins(self, uriDict):
'currently have no wallets! Would you like to create a wallet '
'now?', QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
- self.createNewWallet(initLabel='Primary Wallet')
+ self.startWalletWizard()
return False
- elif len(self.walletMap)>1:
- dlg = DlgWalletSelect(self, self, 'Send from Wallet...', descrStr, \
- onlyMyWallets=True, atLeast=amt)
- if not dlg.exec_():
- return False
- selectedWalletID = dlg.selectedID
else:
- selectedWalletID = self.walletIDList[0]
-
- wlt = self.walletMap[selectedWalletID]
- dlgSend = DlgSendBitcoins(wlt, self, self, uriDict)
- dlgSend.exec_()
+ DlgSendBitcoins(self.getSelectedWallet(), self, self, uriDict).exec_()
return True
-
+
#############################################################################
def clickReceiveCoins(self):
@@ -3186,7 +3727,7 @@ def clickReceiveCoins(self):
'store you bitcoins! Would you like to create a wallet now?', \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
- self.createNewWallet(initLabel='Primary Wallet')
+ self.startWalletWizard()
return
elif len(self.walletMap)==1:
wltID = self.walletMap.keys()[0]
@@ -3195,9 +3736,10 @@ def clickReceiveCoins(self):
if len(wltSelect)>0:
row = wltSelect[0].row()
wltID = str(self.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
- dlg = DlgWalletSelect(self, self, 'Receive coins with wallet...', '', firstSelect=wltID, onlyMyWallets=False)
+ dlg = DlgWalletSelect(self, self, 'Receive coins with wallet...', '', \
+ firstSelect=wltID, onlyMyWallets=False)
if dlg.exec_():
- wltID = dlg.selectedID
+ wltID = dlg.selectedID
else:
selectionMade = False
@@ -3214,7 +3756,7 @@ def sysTrayActivated(self, reason):
if reason==QSystemTrayIcon.DoubleClick:
self.bringArmoryToFront()
-
+
#############################################################################
def bringArmoryToFront(self):
@@ -3229,78 +3771,160 @@ def minimizeArmory(self):
self.hide()
self.sysTray.show()
+ #############################################################################
+ def startWalletWizard(self):
+ walletWizard = WalletWizard(self, self)
+ walletWizard.exec_()
+
+ #############################################################################
+ def startTxWizard(self, prefill=None, onlyOfflineWallets=False):
+ txWizard = TxWizard(self, self, self.getSelectedWallet(), prefill, onlyOfflineWallets=onlyOfflineWallets)
+ txWizard.exec_()
+
#############################################################################
def exportLogFile(self):
LOGDEBUG('exportLogFile')
- extraStr = ''
- if self.usermode in (USERMODE.Advanced, USERMODE.Expert):
- extraStr = tr( """
-
Advanced tip: This log file is maintained at
- the following location on your hard drive:
-
%s
- Before sending the log file, you may edit it to remove information that
- does not seem relevant for debugging purposes. Or, extract the error
- messages from the log file and copy only those into a bug report email """) % \
- ARMORY_LOG_FILE
-
- #reply = QMessageBox.warning(self, 'Export Log File', \
- reply = MsgBoxCustom(MSGBOX.Warning, 'Privacy Warning', tr("""
- The log file contains information that may be considered sensitive
- by some users. Log files should be protected the same
- way you would protect a watching-only wallet, though it
- usually contains much less information than that.
+ reply = QMessageBox.warning(self, tr('Bug Reporting'), tr("""
+ As of version 0.91, Armory now includes a form for reporting
+ problems with the software. Please use
+ "Help"\xe2\x86\x92"Submit Bug Report"
+ to send a report directly to the Armory team, which will include
+ your log file automatically."""), QMessageBox.Ok | QMessageBox.Cancel)
+
+ if not reply==QMessageBox.Ok:
+ return
+
+ if self.logFilePrivacyWarning(wCancel=True):
+ self.saveCombinedLogFile()
+
+ #############################################################################
+ def getUserAgreeToPrivacy(self, getAgreement=False):
+ ptype = 'submitbug' if getAgreement else 'generic'
+ dlg = DlgPrivacyPolicy(self, self, ptype)
+ if not dlg.exec_():
+ return False
+
+ return dlg.chkUserAgrees.isChecked()
+
+ #############################################################################
+ def logFileTriplePrivacyWarning(self):
+ return MsgBoxCustom(MSGBOX.Warning, tr('Privacy Warning'), tr("""
+ ATI Privacy Policy
- No private key data is ever written to the log file.
- Some information about your wallets or balances may appear
- in the log file, but only enough to help the Armory developers
- track down bugs in the software.
+ You should review the Armory Technologies, Inc. privacy
+ policy before sending any data to ATI servers.
- Please do not send the log file to the Armory developers if you are not
- comfortable with them seeing some of your addresses and transactions.
- """) + extraStr, wCancel=True, yesStr='Export', noStr='Cancel')
-
- if reply:
-
- def getLastXBytesOfFile(filename, nBytes=500*1024):
- if not os.path.exists(filename):
- LOGERROR('File does not exist!')
- return ''
-
- sz = os.path.getsize(filename)
- with open(filename, 'rb') as fin:
- if sz > nBytes:
- fin.seek(sz - nBytes)
- return fin.read()
-
- # TODO: Interleave the C++ log and the python log. That could be a lot of work!
- defaultFn = 'armorylog_%s.txt' % unixTimeToFormatStr(RightNow(), '%Y%m%d_%H%M')
- logfn = self.getFileSave(title='Export Log File', \
+ Wallet Analysis Log Files
+
+ The wallet analysis logs contain no personally-identifiable
+ information, only a record of errors and inconsistencies
+ found in your wallet file. No private keys or even public
+ keys are included.
+
+
+ Regular Log Files
+
+ The regular log files do not contain any security-sensitive
+ information, but some users may consider the information to be
+ privacy-sensitive. The log files may identify some addresses
+ and transactions that are related to your wallets. It is always
+ recommended you include your log files with any request to the
+ Armory team, unless you are uncomfortable with the privacy
+ implications.
+
+
+ Watching-only Wallet
+
+ A watching-only wallet is a copy of a regular wallet that does not
+ contain any signing keys. This allows the holder to see the balance
+ and transaction history of the wallet, but not spend any of the funds.
+
+ You may be requested to submit a watching-only copy of your wallet
+ to Armory Technologies, Inc. to make sure that there is no
+ risk to the security of your funds. You should not even consider
+ sending your
+ watching-only wallet unless it was specifically requested by an
+ Armory representative.""") % PRIVACY_URL, yesStr="&Ok")
+
+
+ #############################################################################
+ def logFilePrivacyWarning(self, wCancel=False):
+ return MsgBoxCustom(MSGBOX.Warning, tr('Privacy Warning'), tr("""
+ ATI Privacy Policy
+
+ You should review the Armory Technologies, Inc. privacy
+ policy before sending any data to ATI servers.
+
+
+ Armory log files do not contain any security-sensitive
+ information, but some users may consider the information to be
+ privacy-sensitive. The log files may identify some addresses
+ and transactions that are related to your wallets.
+
+
+ No signing-key data is ever written to the log file.
+ Only enough data is there to help the Armory developers
+ track down bugs in the software, but it may still be considered
+ sensitive information to some users.
+
+
+ Please do not send the log file to the Armory developers if you
+ are not comfortable with the privacy implications! However, if you
+ do not send the log file, it may be very difficult or impossible
+ for us to help you with your problem.
+
+
Advanced tip: You can use
+ "File"\xe2\x86\x92"Export Log File" from the main
+ window to save a copy of the log file that you can manually
+ review."""), wCancel=wCancel, yesStr="&Ok")
+
+
+ #############################################################################
+ def saveCombinedLogFile(self, saveFile=None):
+ if saveFile is None:
+ # TODO: Interleave the C++ log and the python log.
+ # That could be a lot of work!
+ defaultFN = 'armorylog_%s.txt' % \
+ unixTimeToFormatStr(RightNow(),'%Y%m%d_%H%M')
+ saveFile = self.getFileSave(title='Export Log File', \
ffilter=['Text Files (*.txt)'], \
- defaultFilename=defaultFn)
+ defaultFilename=defaultFN)
- if len(unicode(logfn)) > 0:
- pyFilename = ARMORY_LOG_FILE
- cppFilename = os.path.join(ARMORY_HOME_DIR, 'armorycpplog.txt')
- fout = open(logfn, 'wb')
- fout.write(getLastXBytesOfFile(pyFilename, 256*1024))
- fout.write(getLastXBytesOfFile(cppFilename, 256*1024))
- fout.close()
+ def getLastBytesOfFile(filename, nBytes=500*1024):
+ if not os.path.exists(filename):
+ LOGERROR('File does not exist!')
+ return ''
+
+ sz = os.path.getsize(filename)
+ with open(filename, 'rb') as fin:
+ if sz > nBytes:
+ fin.seek(sz - nBytes)
+ return fin.read()
+
+
+ if len(unicode(saveFile)) > 0:
+ fout = open(saveFile, 'wb')
+ fout.write(getLastBytesOfFile(ARMORY_LOG_FILE, 256*1024))
+ fout.write(getLastBytesOfFile(ARMCPP_LOG_FILE, 256*1024))
+ fout.close()
+
+ LOGINFO('Log saved to %s', saveFile)
+
- LOGINFO('Log saved to %s', logfn)
#############################################################################
def blinkTaskbar(self):
self.activateWindow()
-
+
#############################################################################
def lookForBitcoind(self):
LOGDEBUG('lookForBitcoind')
if satoshiIsAvailable():
return 'Running'
-
+
self.setSatoshiPaths()
try:
@@ -3315,35 +3939,36 @@ def lookForBitcoind(self):
return 'AllGood'
#############################################################################
- def pressModeSwitchButton(self):
- LOGDEBUG('pressModeSwitchButton')
+ def executeModeSwitch(self):
+ LOGDEBUG('executeModeSwitch')
+
if TheSDM.getSDMState() == 'BitcoindExeMissing':
bitcoindStat = self.lookForBitcoind()
if bitcoindStat=='Running':
- result = QMessageBox.warning(self, 'Already running!', \
- 'The Bitcoin software appears to be installed now, but it '
- 'needs to be closed for Armory to work. Would you like Armory '
- 'to close it for you?', QMessageBox.Yes | QMessageBox.No)
+ result = QMessageBox.warning(self, tr('Already running!'), tr("""
+ The Bitcoin software appears to be installed now, but it
+ needs to be closed for Armory to work. Would you like Armory
+ to close it for you?"""), QMessageBox.Yes | QMessageBox.No)
if result==QMessageBox.Yes:
self.closeExistingBitcoin()
- self.startBitcoindIfNecessary()
+ self.startBitcoindIfNecessary()
elif bitcoindStat=='StillMissing':
- QMessageBox.warning(self, 'Still Missing', \
- 'The Bitcoin software still appears to be missing. If you '
- 'just installed it, then please adjust your settings to point '
- 'to the installation directory.', QMessageBox.Ok)
- self.startBitcoindIfNecessary()
- elif self.doManageSatoshi and not TheSDM.isRunningBitcoind():
+ QMessageBox.warning(self, tr('Still Missing'), tr("""
+ The Bitcoin software still appears to be missing. If you
+ just installed it, then please adjust your settings to point
+ to the installation directory."""), QMessageBox.Ok)
+ self.startBitcoindIfNecessary()
+ elif self.doAutoBitcoind and not TheSDM.isRunningBitcoind():
if satoshiIsAvailable():
- result = QMessageBox.warning(self, 'Still Running', \
- 'Bitcoin-Qt is still running. Armory cannot start until '
- 'it is closed. Do you want Armory to close it for you?', \
+ result = QMessageBox.warning(self, tr('Still Running'), tr("""
+ 'Bitcoin-Qt is still running. Armory cannot start until
+ 'it is closed. Do you want Armory to close it for you?"""), \
QMessageBox.Yes | QMessageBox.No)
if result==QMessageBox.Yes:
self.closeExistingBitcoin()
- self.startBitcoindIfNecessary()
+ self.startBitcoindIfNecessary()
else:
- self.startBitcoindIfNecessary()
+ self.startBitcoindIfNecessary()
elif TheBDM.getBDMState() == 'BlockchainReady' and TheBDM.isDirty():
#self.resetBdmBeforeScan()
self.startRescanBlockchain()
@@ -3357,30 +3982,29 @@ def pressModeSwitchButton(self):
self.setDashboardDetails()
-
-
+
+
#############################################################################
+ @TimeThisFunction
def resetBdmBeforeScan(self):
- if TheBDM.getBDMState()=='Scanning':
+ if TheBDM.getBDMState()=='Scanning':
LOGINFO('Aborting load')
touchFile(os.path.join(ARMORY_HOME_DIR,'abortload.txt'))
os.remove(os.path.join(ARMORY_HOME_DIR,'blkfiles.txt'))
- TimerStart("resetBdmBeforeScan")
TheBDM.Reset(wait=False)
for wid,wlt in self.walletMap.iteritems():
TheBDM.registerWallet(wlt.cppWallet)
- TimerStop("resetBdmBeforeScan")
#############################################################################
- def SetupDashboard(self):
- LOGDEBUG('SetupDashboard')
+ def setupDashboard(self):
+ LOGDEBUG('setupDashboard')
self.lblBusy = QLabel('')
if OS_WINDOWS:
# Unfortunately, QMovie objects don't work in Windows with py2exe
- # had to create my own little "Busy" icon and hook it up to the
+ # had to create my own little "Busy" icon and hook it up to the
# heartbeat
self.lblBusy.setPixmap(QPixmap(':/loadicon_0.png'))
self.numHeartBeat = 0
@@ -3398,34 +4022,65 @@ def loadBarUpdate():
self.btnModeSwitch = QPushButton('')
self.connect(self.btnModeSwitch, SIGNAL('clicked()'), \
- self.pressModeSwitchButton)
+ self.executeModeSwitch)
- # Will switch this to array/matrix of widgets if I get more than 2 rows
- self.lblDashModeSync = QRichLabel('',doWrap=False)
- self.lblDashModeScan = QRichLabel('',doWrap=False)
- self.lblDashModeSync.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
- self.lblDashModeScan.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
- self.barProgressSync = QProgressBar(self)
- self.barProgressScan = QProgressBar(self)
+ # Will switch this to array/matrix of widgets if I get more than 2 rows
+ self.lblDashModeTorrent = QRichLabel('',doWrap=False)
+ self.lblDashModeSync = QRichLabel('',doWrap=False)
+ self.lblDashModeBuild = QRichLabel('',doWrap=False)
+ self.lblDashModeScan = QRichLabel('',doWrap=False)
+
+ self.lblDashModeTorrent.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
+ self.lblDashModeSync.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
+ self.lblDashModeBuild.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
+ self.lblDashModeScan.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
+
+ self.barProgressTorrent = QProgressBar(self)
+ self.barProgressSync = QProgressBar(self)
+ self.barProgressBuild = QProgressBar(self)
+ self.barProgressScan = QProgressBar(self)
+
+ self.barProgressTorrent.setRange(0,100)
self.barProgressSync.setRange(0,100)
+ self.barProgressBuild.setRange(0,100)
self.barProgressScan.setRange(0,100)
+
+ self.lblTorrentStats = QRichLabel('', hAlign=Qt.AlignHCenter)
+
twid = relaxedSizeStr(self,'99 seconds')[0]
- self.lblTimeLeftSync = QRichLabel('')
- self.lblTimeLeftScan = QRichLabel('')
+ self.lblTimeLeftTorrent = QRichLabel('')
+ self.lblTimeLeftSync = QRichLabel('')
+ self.lblTimeLeftBuild = QRichLabel('')
+ self.lblTimeLeftScan = QRichLabel('')
+
self.lblTimeLeftSync.setMinimumWidth(twid)
self.lblTimeLeftScan.setMinimumWidth(twid)
+ self.lblStatsTorrent = QRichLabel('')
+
layoutDashMode = QGridLayout()
- layoutDashMode.addWidget(self.lblDashModeSync, 0,0)
- layoutDashMode.addWidget(self.barProgressSync, 0,1)
- layoutDashMode.addWidget(self.lblTimeLeftSync, 0,2)
- layoutDashMode.addWidget(self.lblDashModeScan, 1,0)
- layoutDashMode.addWidget(self.barProgressScan, 1,1)
- layoutDashMode.addWidget(self.lblTimeLeftScan, 1,2)
- layoutDashMode.addWidget(self.lblBusy, 0,3, 2,1)
- layoutDashMode.addWidget(self.btnModeSwitch, 0,3, 2,1)
+ layoutDashMode.addWidget(self.lblDashModeTorrent, 0,0)
+ layoutDashMode.addWidget(self.barProgressTorrent, 0,1)
+ layoutDashMode.addWidget(self.lblTimeLeftTorrent, 0,2)
+ layoutDashMode.addWidget(self.lblTorrentStats, 1,0)
+
+ layoutDashMode.addWidget(self.lblDashModeSync, 2,0)
+ layoutDashMode.addWidget(self.barProgressSync, 2,1)
+ layoutDashMode.addWidget(self.lblTimeLeftSync, 2,2)
+
+ layoutDashMode.addWidget(self.lblDashModeBuild, 3,0)
+ layoutDashMode.addWidget(self.barProgressBuild, 3,1)
+ layoutDashMode.addWidget(self.lblTimeLeftBuild, 3,2)
+
+ layoutDashMode.addWidget(self.lblDashModeScan, 4,0)
+ layoutDashMode.addWidget(self.barProgressScan, 4,1)
+ layoutDashMode.addWidget(self.lblTimeLeftScan, 4,2)
+
+ layoutDashMode.addWidget(self.lblBusy, 0,3, 5,1)
+ layoutDashMode.addWidget(self.btnModeSwitch, 0,3, 5,1)
+
self.frmDashModeSub = QFrame()
self.frmDashModeSub.setFrameStyle(STYLE_SUNKEN)
self.frmDashModeSub.setLayout(layoutDashMode)
@@ -3433,7 +4088,7 @@ def loadBarUpdate():
self.frmDashModeSub, \
'Stretch'])
-
+
self.lblDashDescr1 = QRichLabel('')
self.lblDashDescr2 = QRichLabel('')
for lbl in [self.lblDashDescr1, self.lblDashDescr2]:
@@ -3444,21 +4099,21 @@ def loadBarUpdate():
lbl.setPalette(qpal)
lbl.setOpenExternalLinks(True)
- # Set up an array of buttons in the middle of the dashboard, to be used
+ # Set up an array of buttons in the middle of the dashboard, to be used
# to help the user install bitcoind.
self.lblDashBtnDescr = QRichLabel('')
self.lblDashBtnDescr.setOpenExternalLinks(True)
BTN,LBL,TTIP = range(3)
self.dashBtns = [[None]*3 for i in range(5)]
self.dashBtns[DASHBTNS.Close ][BTN] = QPushButton('Close Bitcoin Process')
- self.dashBtns[DASHBTNS.Install ][BTN] = QPushButton('Auto-Install Bitcoin')
+ self.dashBtns[DASHBTNS.Install ][BTN] = QPushButton('Download Bitcoin')
self.dashBtns[DASHBTNS.Browse ][BTN] = QPushButton('Open www.bitcoin.org')
self.dashBtns[DASHBTNS.Instruct][BTN] = QPushButton('Installation Instructions')
self.dashBtns[DASHBTNS.Settings][BTN] = QPushButton('Change Settings')
#####
- def openBitcoinOrg():
+ def openBitcoinOrg():
webbrowser.open('http://www.bitcoin.org/en/download')
@@ -3474,18 +4129,18 @@ def openInstruct():
-
+
self.connect(self.dashBtns[DASHBTNS.Close][BTN], SIGNAL('clicked()'), \
- self.closeExistingBitcoin)
+ self.closeExistingBitcoin)
self.connect(self.dashBtns[DASHBTNS.Install][BTN], SIGNAL('clicked()'), \
- self.installSatoshiClient)
+ self.openDLSatoshi)
self.connect(self.dashBtns[DASHBTNS.Browse][BTN], SIGNAL('clicked()'), \
openBitcoinOrg)
self.connect(self.dashBtns[DASHBTNS.Settings][BTN], SIGNAL('clicked()'), \
self.openSettings)
#self.connect(self.dashBtns[DASHBTNS.Instruct][BTN], SIGNAL('clicked()'), \
- #self.openInstructWindow)
+ #self.openInstructWindow)
self.dashBtns[DASHBTNS.Close][LBL] = QRichLabel( \
'Stop existing Bitcoin processes so that Armory can open its own')
@@ -3539,15 +4194,15 @@ def openInstruct():
dist = platform.linux_distribution()
if dist[0] in ['Ubuntu','LinuxMint'] or 'debian' in dist:
self.dashBtns[DASHBTNS.Install][BTN].setEnabled(True)
- self.dashBtns[DASHBTNS.Install][LBL] = QRichLabel( \
- 'Automatic installation for Ubuntu/Debian')
- self.dashBtns[DASHBTNS.Install][TTIP] = self.createToolTipWidget( \
- 'Will download and install Bitcoin from trusted sources.')
+ self.dashBtns[DASHBTNS.Install][LBL] = QRichLabel( tr("""
+ Download and Install Bitcoin Core for Ubuntu/Debian"""))
+ self.dashBtns[DASHBTNS.Install][TTIP] = self.createToolTipWidget( tr("""
+ 'Will download and Bitcoin software and cryptographically verify it"""))
elif OS_MACOSX:
pass
else:
print 'Unrecognized OS!'
-
+
self.frmDashMgmtButtons = QFrame()
self.frmDashMgmtButtons.setFrameStyle(STYLE_SUNKEN)
@@ -3559,10 +4214,10 @@ def openInstruct():
wMin = tightSizeNChar(self, 50)[0]
self.dashBtns[r][c].setMinimumWidth(wMin)
layoutButtons.addWidget(self.dashBtns[r][c], r+1,c)
-
+
self.frmDashMgmtButtons.setLayout(layoutButtons)
self.frmDashMidButtons = makeHorizFrame(['Stretch', \
- self.frmDashMgmtButtons,
+ self.frmDashMgmtButtons,
'Stretch'])
dashLayout = QVBoxLayout()
@@ -3580,6 +4235,366 @@ def openInstruct():
scrollLayout.addWidget(self.dashScrollArea)
self.tabDashboard.setLayout(scrollLayout)
+
+
+ #############################################################################
+ def setupAnnounceTab(self):
+
+ self.lblAlertStr = QRichLabel(tr("""
+ Announcements and alerts from Armory Technologies,
+ Inc."""), doWrap=False, hAlign=Qt.AlignHCenter)
+
+ def checkUpd():
+ lastUpdate = self.announceFetcher.getLastSuccessfulFetchTime()
+ self.explicitCheckAnnouncements(5)
+ lastUpdate2 = self.announceFetcher.getLastSuccessfulFetchTime()
+ if lastUpdate==lastUpdate2:
+ QMessageBox.warning(self, tr('Not Available'), tr("""
+ Could not access the Armory
+ Technologies, Inc. announcement feeder.
+ Try again in a couple minutes.""") % \
+ htmlColor('TextGreen'), QMessageBox.Ok)
+ else:
+ QMessageBox.warning(self, tr('Update'), tr("""
+ Announcements are now up to date!"""), QMessageBox.Ok)
+
+
+ self.lblLastUpdated = QRichLabel('', doWrap=False)
+ self.btnCheckForUpdates = QPushButton(tr('Check for Updates'))
+ self.connect(self.btnCheckForUpdates, SIGNAL(CLICKED), checkUpd)
+
+
+ frmLastUpdate = makeHorizFrame(['Stretch', \
+ self.lblLastUpdated, \
+ self.btnCheckForUpdates, \
+ 'Stretch'])
+
+ self.icoArmorySWVersion = QLabel('')
+ self.lblArmorySWVersion = QRichLabel(tr("""
+ No version information is available"""), doWrap=False)
+ self.icoSatoshiSWVersion = QLabel('')
+ self.lblSatoshiSWVersion = QRichLabel('', doWrap=False)
+
+ self.btnSecureDLArmory = QPushButton(tr('Secure Downloader'))
+ self.btnSecureDLSatoshi = QPushButton(tr('Secure Downloader'))
+ self.btnSecureDLArmory.setVisible(False)
+ self.btnSecureDLSatoshi.setVisible(False)
+ self.connect(self.btnSecureDLArmory, SIGNAL(CLICKED), self.openDLArmory)
+ self.connect(self.btnSecureDLSatoshi, SIGNAL(CLICKED), self.openDLSatoshi)
+
+
+ frmVersions = QFrame()
+ layoutVersions = QGridLayout()
+ layoutVersions.addWidget(self.icoArmorySWVersion, 0,0)
+ layoutVersions.addWidget(self.lblArmorySWVersion, 0,1)
+ layoutVersions.addWidget(self.btnSecureDLArmory, 0,2)
+ layoutVersions.addWidget(self.icoSatoshiSWVersion, 1,0)
+ layoutVersions.addWidget(self.lblSatoshiSWVersion, 1,1)
+ layoutVersions.addWidget(self.btnSecureDLSatoshi, 1,2)
+ layoutVersions.setColumnStretch(0,0)
+ layoutVersions.setColumnStretch(1,1)
+ layoutVersions.setColumnStretch(2,0)
+ frmVersions.setLayout(layoutVersions)
+ frmVersions.setFrameStyle(STYLE_RAISED)
+
+ lblVerHeader = QRichLabel(tr("""
+ Software Version Updates:"""), doWrap=False, \
+ hAlign=Qt.AlignHCenter)
+ lblTableHeader = QRichLabel(tr("""
+ All Available Notifications:"""), doWrap=False, \
+ hAlign=Qt.AlignHCenter)
+
+
+ # We need to generate popups when a widget is clicked, and be able
+ # change that particular widget's target, when the table is updated.
+ # Create one of these DlgGen objects for each of the 10 rows, simply
+ # update it's nid and notifyMap when the table is updated
+ class DlgGen():
+ def setParams(self, parent, nid, notifyMap):
+ self.parent = parent
+ self.nid = nid
+ self.notifyMap = notifyMap
+
+ def __call__(self):
+ return DlgNotificationWithDNAA(self.parent, self.parent, \
+ self.nid, self.notifyMap, False).exec_()
+
+ self.announceTableWidgets = \
+ [[QLabel(''), QRichLabel(''), QLabelButton('+'), DlgGen()] \
+ for i in range(10)]
+
+
+
+ layoutTable = QGridLayout()
+ for i in range(10):
+ for j in range(3):
+ layoutTable.addWidget(self.announceTableWidgets[i][j], i,j)
+ self.connect(self.announceTableWidgets[i][2], SIGNAL(CLICKED), \
+ self.announceTableWidgets[i][3])
+
+ layoutTable.setColumnStretch(0,0)
+ layoutTable.setColumnStretch(1,1)
+ layoutTable.setColumnStretch(2,0)
+
+ frmTable = QFrame()
+ frmTable.setLayout(layoutTable)
+ frmTable.setFrameStyle(STYLE_SUNKEN)
+
+ self.updateAnnounceTable()
+
+
+ frmEverything = makeVertFrame( [ self.lblAlertStr,
+ frmLastUpdate,
+ 'Space(30)',
+ lblTableHeader,
+ frmTable,
+ 'Space(30)',
+ lblVerHeader,
+ frmVersions,
+ 'Stretch'])
+
+ frmEverything.setMinimumWidth(300)
+ frmEverything.setMaximumWidth(800)
+
+ frmFinal = makeHorizFrame(['Stretch', frmEverything, 'Stretch'])
+
+ self.announceScrollArea = QScrollArea()
+ self.announceScrollArea.setWidgetResizable(True)
+ self.announceScrollArea.setWidget(frmFinal)
+ scrollLayout = QVBoxLayout()
+ scrollLayout.addWidget(self.announceScrollArea)
+ self.tabAnnounce.setLayout(scrollLayout)
+
+ self.announceIsSetup = True
+
+
+ #############################################################################
+ def openDownloaderAll(self):
+ dl,cl = self.getDownloaderData()
+ if not dl is None and not cl is None:
+ UpgradeDownloaderDialog(self, self, None, dl, cl).exec_()
+
+ #############################################################################
+ def openDLArmory(self):
+ dl,cl = self.getDownloaderData()
+ if not dl is None and not cl is None:
+ UpgradeDownloaderDialog(self, self, 'Armory', dl, cl).exec_()
+
+ #############################################################################
+ def openDLSatoshi(self):
+ dl,cl = self.getDownloaderData()
+ if not dl is None and not cl is None:
+ UpgradeDownloaderDialog(self, self, 'Satoshi', dl, cl).exec_()
+
+
+ #############################################################################
+ def getDownloaderData(self):
+ dl = self.announceFetcher.getAnnounceFile('downloads')
+ cl = self.announceFetcher.getAnnounceFile('changelog')
+
+ dlObj = downloadLinkParser().parseDownloadList(dl)
+ clObj = changelogParser().parseChangelogText(cl)
+
+ if dlObj is None or clObj is None:
+ QMessageBox.warning(self, tr('No Data'), tr("""
+ The secure downloader has not received any download
+ data to display. Either the Armory
+ Technologies, Inc. announcement feeder is
+ down, or this computer cannot access the server.""") % \
+ htmlColor('TextGreen'), QMessageBox.Ok)
+ return None,None
+
+ lastUpdate = self.announceFetcher.getLastSuccessfulFetchTime()
+ sinceLastUpd = RightNow() - lastUpdate
+ if lastUpdate < RightNow()-1*WEEK:
+ QMessageBox.warning(self, tr('Old Data'), tr("""
+ The last update retrieved from the Armory
+ Technologies, Inc. announcement feeder was %s
+ ago. The following downloads may not be the latest
+ available.""") % (htmlColor("TextGreen"), \
+ secondsToHumanTime(sinceLastUpd)), QMessageBox.Ok)
+
+ dl = self.announceFetcher.getAnnounceFile('downloads')
+ cl = self.announceFetcher.getAnnounceFile('changelog')
+
+ return dl,cl
+
+
+
+ #############################################################################
+ def updateAnnounceTab(self, *args):
+
+ if not self.announceIsSetup:
+ return
+
+ iconArmory = ':/armory_icon_32x32.png'
+ iconSatoshi = ':/bitcoinlogo.png'
+ iconInfoFile = ':/MsgBox_info48.png'
+ iconGoodFile = ':/MsgBox_good48.png'
+ iconWarnFile = ':/MsgBox_warning48.png'
+ iconCritFile = ':/MsgBox_critical24.png'
+
+ lastUpdate = self.announceFetcher.getLastSuccessfulFetchTime()
+ noAnnounce = (lastUpdate == 0)
+
+ if noAnnounce:
+ self.lblLastUpdated.setText(tr("No announcement data was found!"))
+ self.btnSecureDLArmory.setVisible(False)
+ self.icoArmorySWVersion.setVisible(True)
+ self.lblArmorySWVersion.setText(tr(""" You are running Armory
+ version %s""") % getVersionString(BTCARMORY_VERSION))
+ else:
+ updTimeStr = unixTimeToFormatStr(lastUpdate)
+ self.lblLastUpdated.setText(tr("Last Updated: %s") % updTimeStr)
+
+
+ verStrToInt = lambda s: getVersionInt(readVersionString(s))
+
+ # Notify of Armory updates
+ self.icoArmorySWVersion.setPixmap(QPixmap(iconArmory).scaled(24,24))
+ self.icoSatoshiSWVersion.setPixmap(QPixmap(iconSatoshi).scaled(24,24))
+
+ try:
+ armCurrent = verStrToInt(self.armoryVersions[0])
+ armLatest = verStrToInt(self.armoryVersions[1])
+ if armCurrent >= armLatest:
+ dispIcon = QPixmap(iconArmory).scaled(24,24)
+ self.icoArmorySWVersion.setPixmap(dispIcon)
+ self.btnSecureDLArmory.setVisible(False)
+ self.lblArmorySWVersion.setText(tr("""
+ You are using the latest version of Armory"""))
+ else:
+ dispIcon = QPixmap(iconWarnFile).scaled(24,24)
+ self.icoArmorySWVersion.setPixmap(dispIcon)
+ self.btnSecureDLArmory.setVisible(True)
+ self.lblArmorySWVersion.setText(tr("""
+ There is a newer version of Armory available!"""))
+ self.btnSecureDLArmory.setVisible(True)
+ self.icoArmorySWVersion.setVisible(True)
+ except:
+ self.btnSecureDLArmory.setVisible(False)
+ self.lblArmorySWVersion.setText(tr(""" You are running Armory
+ version %s""") % getVersionString(BTCARMORY_VERSION))
+
+
+ try:
+ satCurrStr,satLastStr = self.satoshiVersions
+ satCurrent = verStrToInt(satCurrStr) if satCurrStr else 0
+ satLatest = verStrToInt(satLastStr) if satLastStr else 0
+
+ # Show CoreBTC updates
+ if satCurrent and satLatest:
+ if satCurrent >= satLatest:
+ dispIcon = QPixmap(iconGoodFile).scaled(24,24)
+ self.btnSecureDLSatoshi.setVisible(False)
+ self.icoSatoshiSWVersion.setPixmap(dispIcon)
+ self.lblSatoshiSWVersion.setText(tr(""" You are using
+ the latest version of core Bitcoin (%s)""") % satCurrStr)
+ else:
+ dispIcon = QPixmap(iconWarnFile).scaled(24,24)
+ self.btnSecureDLSatoshi.setVisible(True)
+ self.icoSatoshiSWVersion.setPixmap(dispIcon)
+ self.lblSatoshiSWVersion.setText(tr("""
+ There is a newer version of the core Bitcoin software
+ available!"""))
+ elif satCurrent:
+ # satLatest is not available
+ dispIcon = QPixmap(iconGoodFile).scaled(24,24)
+ self.btnSecureDLSatoshi.setVisible(False)
+ self.icoSatoshiSWVersion.setPixmap(None)
+ self.lblSatoshiSWVersion.setText(tr(""" You are using
+ core Bitcoin version %s""") % satCurrStr)
+ elif satLatest:
+ # only satLatest is avail (maybe offline)
+ dispIcon = QPixmap(iconSatoshi).scaled(24,24)
+ self.btnSecureDLSatoshi.setVisible(True)
+ self.icoSatoshiSWVersion.setPixmap(dispIcon)
+ self.lblSatoshiSWVersion.setText(tr("""Core Bitcoin version
+ %s is available.""") % satLastStr)
+ else:
+ # only satLatest is avail (maybe offline)
+ dispIcon = QPixmap(iconSatoshi).scaled(24,24)
+ self.btnSecureDLSatoshi.setVisible(False)
+ self.icoSatoshiSWVersion.setPixmap(dispIcon)
+ self.lblSatoshiSWVersion.setText(tr("""No version information
+ is available for core Bitcoin""") )
+
+
+
+
+ #self.btnSecureDLSatoshi.setVisible(False)
+ #if self.satoshiVersions[0]:
+ #self.lblSatoshiSWVersion.setText(tr(""" You are running
+ #core Bitcoin software version %s""") % self.satoshiVersions[0])
+ #else:
+ #self.lblSatoshiSWVersion.setText(tr("""No information is
+ #available for the core Bitcoin software"""))
+ except:
+ LOGEXCEPT('Failed to process satoshi versions')
+
+
+ self.updateAnnounceTable()
+
+
+ #############################################################################
+ def updateAnnounceTable(self):
+
+ # Default: Make everything non-visible except first row, middle column
+ for i in range(10):
+ for j in range(3):
+ self.announceTableWidgets[i][j].setVisible(i==0 and j==1)
+
+ if len(self.almostFullNotificationList)==0:
+ self.announceTableWidgets[0][1].setText(tr("""
+ There are no announcements or alerts to display"""))
+ return
+
+
+ alertsForSorting = []
+ for nid,nmap in self.almostFullNotificationList.iteritems():
+ alertsForSorting.append([nid, int(nmap['PRIORITY'])])
+
+ sortedAlerts = sorted(alertsForSorting, key=lambda a: -a[1])[:10]
+
+ i = 0
+ for nid,priority in sortedAlerts:
+ if priority>=4096:
+ pixm = QPixmap(':/MsgBox_critical64.png')
+ elif priority>=3072:
+ pixm = QPixmap(':/MsgBox_warning48.png')
+ elif priority>=2048:
+ pixm = QPixmap(':/MsgBox_info48.png')
+ else:
+ pixm = QPixmap(':/MsgBox_info48.png')
+
+
+ shortDescr = self.almostFullNotificationList[nid]['SHORTDESCR']
+ if priority>=4096:
+ shortDescr = '' + shortDescr + ''
+ shortDescr = shortDescr % htmlColor('TextWarn')
+
+ self.announceTableWidgets[i][0].setPixmap(pixm.scaled(24,24))
+ self.announceTableWidgets[i][1].setText(shortDescr)
+ self.announceTableWidgets[i][2].setVisible(True)
+ self.announceTableWidgets[i][3].setParams(self, nid, \
+ self.almostFullNotificationList[nid])
+
+ for j in range(3):
+ self.announceTableWidgets[i][j].setVisible(True)
+
+ i += 1
+
+
+
+
+
+ #############################################################################
+ def explicitCheckAnnouncements(self, waitTime=3):
+ self.announceFetcher.fetchRightNow(waitTime)
+ self.processAnnounceData()
+ self.updateAnnounceTab()
+
+
#############################################################################
def installSatoshiClient(self, closeWhenDone=False):
@@ -3610,7 +4625,7 @@ def installSatoshiClient(self, closeWhenDone=False):
'download the installer yourself.')
webbrowser.open('http://www.bitcoin.org/en/download')
return
-
+
print self.downloadDict['SATOSHI']['Windows']
theLink = self.downloadDict['SATOSHI']['Windows'][0]
theHash = self.downloadDict['SATOSHI']['Windows'][1]
@@ -3626,24 +4641,24 @@ def installSatoshiClient(self, closeWhenDone=False):
'to download and install Bitcoin-Qt manually.', QMessageBox.Ok)
webbrowser.open('http://www.bitcoin.org/en/download')
return
-
+
installerPath = os.path.join(ARMORY_HOME_DIR, os.path.basename(theLink))
LOGINFO('Installer path: %s', installerPath)
instFile = open(installerPath, 'wb')
instFile.write(fileData)
instFile.close()
-
+
def startInstaller():
execAndWait('"'+installerPath+'"', useStartInfo=False)
self.startBitcoindIfNecessary()
-
+
DlgExecLongProcess(startInstaller, tr("""
- Please Complete Bitcoin Installation (installer should
+ Please Complete Bitcoin Installation (installer should
have opened in your taskbar)"""), self, self).exec_()
elif OS_MACOSX:
LOGERROR('Cannot install on OSX')
-
+
if closeWhenDone:
self.closeForReal(None)
@@ -3661,55 +4676,179 @@ def closeExistingBitcoin(self):
'Attempted to kill the running Bitcoin-Qt/bitcoind instance, '
'but it was not found. ', QMessageBox.Ok)
- #############################################################################
- def getPercentageFinished(self, maxblk, lastblk):
- curr = EstimateCumulativeBlockchainSize(lastblk)
- maxb = EstimateCumulativeBlockchainSize(maxblk)
- return float(curr)/float(maxb)
+ #############################################################################
+ def getPercentageFinished(self, maxblk, lastblk):
+ curr = EstimateCumulativeBlockchainSize(lastblk)
+ maxb = EstimateCumulativeBlockchainSize(maxblk)
+ return float(curr)/float(maxb)
+
+ #############################################################################
+ def updateSyncProgress(self):
+
+ if TheTDM.getTDMState()=='Downloading':
+
+ dlSpeed = TheTDM.getLastStats('downRate')
+ timeEst = TheTDM.getLastStats('timeEst')
+ fracDone = TheTDM.getLastStats('fracDone')
+ numSeeds = TheTDM.getLastStats('numSeeds')
+ numPeers = TheTDM.getLastStats('numPeers')
+
+ self.barProgressTorrent.setVisible(True)
+ self.lblDashModeTorrent.setVisible(True)
+ self.lblTimeLeftTorrent.setVisible(True)
+ self.lblTorrentStats.setVisible(True)
+ self.barProgressTorrent.setFormat('%p%')
+
+ self.lblDashModeSync.setVisible(True)
+ self.barProgressSync.setVisible(True)
+ self.barProgressSync.setValue(0)
+ self.lblTimeLeftSync.setVisible(True)
+ self.barProgressSync.setFormat('')
+
+ self.lblDashModeBuild.setVisible(True)
+ self.barProgressBuild.setVisible(True)
+ self.barProgressBuild.setValue(0)
+ self.lblTimeLeftBuild.setVisible(True)
+ self.barProgressBuild.setFormat('')
+
+ self.lblDashModeScan.setVisible(True)
+ self.barProgressScan.setVisible(True)
+ self.barProgressScan.setValue(0)
+ self.lblTimeLeftScan.setVisible(True)
+ self.barProgressScan.setFormat('')
+
+ if not numSeeds:
+ self.barProgressTorrent.setValue(0)
+ self.lblTimeLeftTorrent.setText('')
+ self.lblTorrentStats.setText('')
+
+ self.lblDashModeTorrent.setText(tr('Initializing Torrent Engine'), \
+ size=4, bold=True, color='Foreground')
+
+ self.lblTorrentStats.setVisible(False)
+ else:
+ self.lblDashModeTorrent.setText(tr('Downloading via Armory CDN'), \
+ size=4, bold=True, color='Foreground')
+
+ if fracDone:
+ self.barProgressTorrent.setValue(int(99.9*fracDone))
+
+ if timeEst:
+ self.lblTimeLeftTorrent.setText(secondsToHumanTime(timeEst))
+
+ self.lblTorrentStats.setText(tr("""
+ Bootstrap Torrent: %s/sec from %d peers""") % \
+ (bytesToHumanSize(dlSpeed), numSeeds+numPeers))
+
+ self.lblTorrentStats.setVisible(True)
+
+
- #############################################################################
- def updateSyncProgress(self):
+ elif TheBDM.getBDMState()=='Scanning':
+ self.barProgressTorrent.setVisible(TheTDM.isStarted())
+ self.lblDashModeTorrent.setVisible(TheTDM.isStarted())
+ self.barProgressTorrent.setValue(100)
+ self.lblTimeLeftTorrent.setVisible(False)
+ self.lblTorrentStats.setVisible(False)
+ self.barProgressTorrent.setFormat('')
+
+ self.lblDashModeSync.setVisible(self.doAutoBitcoind)
+ self.barProgressSync.setVisible(self.doAutoBitcoind)
+ self.barProgressSync.setValue(100)
+ self.lblTimeLeftSync.setVisible(False)
+ self.barProgressSync.setFormat('')
+
+ self.lblDashModeBuild.setVisible(True)
+ self.barProgressBuild.setVisible(True)
+ self.lblTimeLeftBuild.setVisible(True)
+
+ self.lblDashModeScan.setVisible(True)
+ self.barProgressScan.setVisible(True)
+ self.lblTimeLeftScan.setVisible(True)
- if TheBDM.getBDMState()=='Scanning':
# Scan time is super-simple to predict: it's pretty much linear
# with the number of bytes remaining.
+
phase,pct,rate,tleft = TheBDM.predictLoadTime()
if phase==1:
- self.lblDashModeScan.setText( 'Building Databases', \
+ self.lblDashModeBuild.setText( 'Building Databases', \
size=4, bold=True, color='Foreground')
+ self.lblDashModeScan.setText( 'Scan Transaction History', \
+ size=4, bold=True, color='DisableFG')
+ self.barProgressBuild.setFormat('%p%')
+ self.barProgressScan.setFormat('')
+
elif phase==3:
+ self.lblDashModeBuild.setText( 'Build Databases', \
+ size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( 'Scanning Transaction History', \
size=4, bold=True, color='Foreground')
+ self.lblTimeLeftBuild.setVisible(False)
+ self.barProgressBuild.setFormat('')
+ self.barProgressBuild.setValue(100)
+ self.barProgressScan.setFormat('%p%')
elif phase==4:
self.lblDashModeScan.setText( 'Global Blockchain Index', \
size=4, bold=True, color='Foreground')
- self.barProgressSync.setValue(100)
tleft15 = (int(tleft-1)/15 + 1)*15
if tleft < 2:
- self.lblTimeLeftScan.setText('')
- self.barProgressScan.setValue(1)
+ tstring = ''
+ pvalue = 100
else:
- self.lblTimeLeftScan.setText(secondsToHumanTime(tleft15))
- self.barProgressScan.setValue(pct*100)
+ tstring = secondsToHumanTime(tleft15)
+ pvalue = pct*100
+
+ if phase==1:
+ self.lblTimeLeftBuild.setText(tstring)
+ self.barProgressBuild.setValue(pvalue)
+ elif phase==3:
+ self.lblTimeLeftScan.setText(tstring)
+ self.barProgressScan.setValue(pvalue)
elif TheSDM.getSDMState() in ['BitcoindInitializing','BitcoindSynchronizing']:
- ssdm = TheSDM.getSDMState()
+
+ self.barProgressTorrent.setVisible(TheTDM.isStarted())
+ self.lblDashModeTorrent.setVisible(TheTDM.isStarted())
+ self.barProgressTorrent.setValue(100)
+ self.lblTimeLeftTorrent.setVisible(False)
+ self.lblTorrentStats.setVisible(False)
+ self.barProgressTorrent.setFormat('')
+
+ self.lblDashModeSync.setVisible(True)
+ self.barProgressSync.setVisible(True)
+ self.lblTimeLeftSync.setVisible(True)
+ self.barProgressSync.setFormat('%p%')
+
+ self.lblDashModeBuild.setVisible(True)
+ self.barProgressBuild.setVisible(True)
+ self.lblTimeLeftBuild.setVisible(False)
+ self.barProgressBuild.setValue(0)
+ self.barProgressBuild.setFormat('')
+
+ self.lblDashModeScan.setVisible(True)
+ self.barProgressScan.setVisible(True)
+ self.lblTimeLeftScan.setVisible(False)
+ self.barProgressScan.setValue(0)
+ self.barProgressScan.setFormat('')
+
+ ssdm = TheSDM.getSDMState()
lastBlkNum = self.getSettingOrSetDefault('LastBlkRecv', 0)
lastBlkTime = self.getSettingOrSetDefault('LastBlkRecvTime', 0)
-
+
# Get data from SDM if it has it
info = TheSDM.getTopBlockInfo()
if len(info['tophash'])>0:
lastBlkNum = info['numblks']
lastBlkTime = info['toptime']
-
+
# Use a reference point if we are starting from scratch
- refBlock = max(231747, lastBlkNum)
- refTime = max(1366171579, lastBlkTime)
+ refBlock = max(290746, lastBlkNum)
+ refTime = max(1394922889, lastBlkTime)
+
-
- # Ten min/block is pretty accurate, even at genesis blk (about 1% slow)
+ # Ten min/block is pretty accurate, even from genesis (about 1% slow)
+ # And it gets better as we sync past the reference block above
self.approxMaxBlock = refBlock + int((RightNow() - refTime) / (10*MINUTE))
self.approxBlkLeft = self.approxMaxBlock - lastBlkNum
self.approxPctSoFar = self.getPercentageFinished(self.approxMaxBlock, \
@@ -3735,21 +4874,22 @@ def updateSyncProgress(self):
else:
timeRemain = None
-
+
intPct = int(100*self.approxPctSoFar)
strPct = '%d%%' % intPct
-
+
+
self.barProgressSync.setFormat('%p%')
if ssdm == 'BitcoindReady':
return (0,0,0.99) # because it's probably not completely done...
self.lblTimeLeftSync.setText('Almost Done...')
self.barProgressSync.setValue(99)
elif ssdm == 'BitcoindSynchronizing':
- self.barProgressSync.setValue(int(99.9*self.approxPctSoFar))
+ sdmPercent = int(99.9*self.approxPctSoFar)
if self.approxBlkLeft < 10000:
if self.approxBlkLeft < 200:
self.lblTimeLeftSync.setText('%d blocks' % self.approxBlkLeft)
- else:
+ else:
# If we're within 10k blocks, estimate based on blkspersec
if info['blkspersec'] > 0:
timeleft = int(self.approxBlkLeft/info['blkspersec'])
@@ -3757,16 +4897,20 @@ def updateSyncProgress(self):
else:
# If we're more than 10k blocks behind...
if timeRemain:
- timeRemain = min(8*HOUR, timeRemain)
+ timeRemain = min(24*HOUR, timeRemain)
self.lblTimeLeftSync.setText(secondsToHumanTime(timeRemain))
else:
self.lblTimeLeftSync.setText('')
elif ssdm == 'BitcoindInitializing':
- self.barProgressSync.setValue(0)
+ sdmPercent = 0
self.barProgressSync.setFormat('')
+ self.barProgressBuild.setFormat('')
+ self.barProgressScan.setFormat('')
else:
LOGERROR('Should not predict sync info in non init/sync SDM state')
return ('UNKNOWN','UNKNOWN', 'UNKNOWN')
+
+ self.barProgressSync.setValue(sdmPercent)
else:
LOGWARN('Called updateSyncProgress while not sync\'ing')
@@ -3774,7 +4918,7 @@ def updateSyncProgress(self):
#############################################################################
def GetDashFunctionalityText(self, func):
"""
- Outsourcing all the verbose dashboard text to here, to de-clutter the
+ Outsourcing all the verbose dashboard text to here, to de-clutter the
logic paths in the setDashboardDetails function
"""
LOGINFO('Switching Armory functional mode to "%s"', func)
@@ -3827,12 +4971,12 @@ def GetDashFunctionalityText(self, func):
'
Create transactions with watching-only wallets, '
'to be signed by an offline wallets
'
'')
-
+
#############################################################################
def GetDashStateText(self, mgmtMode, state):
"""
- Outsourcing all the verbose dashboard text to here, to de-clutter the
+ Outsourcing all the verbose dashboard text to here, to de-clutter the
logic paths in the setDashboardDetails function
"""
LOGINFO('Switching Armory state text to Mgmt:%s, State:%s', mgmtMode, state)
@@ -3840,28 +4984,28 @@ def GetDashStateText(self, mgmtMode, state):
# A few states don't care which mgmtMode you are in...
if state == 'NewUserInfo':
return tr("""
- For more information about Armory, and even Bitcoin itself, you should
- visit the frequently
- asked questions page. If
- you are experiencing problems using this software, please visit the
- Armory
- troubleshooting webpage. It will be updated frequently with
- solutions to common problems.
+ For more information about Armory, and even Bitcoin itself, you should
+ visit the frequently
+ asked questions page. If
+ you are experiencing problems using this software, please visit the
+ Armory
+ troubleshooting webpage. It will be updated frequently with
+ solutions to common problems.
""")
elif state == 'OnlineFull1':
return ( \
@@ -3879,7 +5023,7 @@ def GetDashStateText(self, mgmtMode, state):
'Bitcoin-Qt window if it is synchronized. If not, it is '
'recommended you close Armory and restart it only when you '
'see that checkmark.'
- '
' if not self.doManageSatoshi else '') + (
+ '
' if not self.doAutoBitcoind else '') + (
'Please backup your wallets! Armory wallets are '
'"deterministic", meaning they only need to be backed up '
'one time (unless you have imported external addresses/keys). '
@@ -3929,7 +5073,7 @@ def GetDashStateText(self, mgmtMode, state):
'http://www.bitcoin.org.')
# Branch the available display text based on which Satoshi-Management
- # mode Armory is using. It probably wasn't necessary to branch the
+ # mode Armory is using. It probably wasn't necessary to branch the
# the code like this, but it helped me organize the seemingly-endless
# number of dashboard screens I need
if mgmtMode.lower()=='user':
@@ -3950,7 +5094,7 @@ def GetDashStateText(self, mgmtMode, state):
bitconf = os.path.join(BTC_HOME_DIR, 'bitcoin.conf')
return ( \
'You are currently in offline mode because '
- 'Bitcoin-Qt is not running. To switch to online '
+ 'Bitcoin-Qt is not running. To switch to online '
'mode, start Bitcoin-Qt and let it synchronize with the network '
'-- you will see a green checkmark in the bottom-right corner when '
'it is complete. If Bitcoin-Qt is already running and you believe '
@@ -4038,20 +5182,25 @@ def GetDashStateText(self, mgmtMode, state):
'to manage it yourself, please adjust your settings and '
'restart Armory.')
if state == 'InitializingLongTime':
- return ( \
- 'To maximize your security, the Bitcoin engine is downloading '
- 'and verifying the global transaction ledger. This will take '
- 'several hours, but only needs to be done once! It is usually '
- 'best to leave it running over night for this initialization process. '
- 'Subsequent loads will only take a few minutes.'
- '
'
- 'While you wait, you can manage your wallets. Make new wallets, '
- 'make digital or paper backups, create Bitcoin addresses to receive '
- 'payments, '
- 'sign messages, and/or import private keys. You will always '
- 'receive Bitcoin payments regardless of whether you are online, '
- 'but you will have to verify that payment through another service '
- 'until Armory is finished this initialization.')
+ return tr("""
+ To maximize your security, the Bitcoin engine is downloading
+ and verifying the global transaction ledger. This will take
+ several hours, but only needs to be done once! It is
+ usually best to leave it running over night for this
+ initialization process. Subsequent loads will only take a few
+ minutes.
+
+ Please Note: Between Armory and the underlying Bitcoin
+ engine, you need to have 40-50 GB of spare disk space available
+ to hold the global transaction history.
+
+ While you wait, you can manage your wallets. Make new wallets,
+ make digital or paper backups, create Bitcoin addresses to receive
+ payments,
+ sign messages, and/or import private keys. You will always
+ receive Bitcoin payments regardless of whether you are online,
+ but you will have to verify that payment through another service
+ until Armory is finished this initialization.""")
if state == 'InitializingDoneSoon':
return ( \
'The software is downloading and processing the latest activity '
@@ -4128,43 +5277,42 @@ def GetDashStateText(self, mgmtMode, state):
soutDisp = 'StdOut: %s' % soutHtml
serrDisp = 'StdErr: %s' % serrHtml
if len(sout)>0 or len(serr)>0:
- return ( \
- 'There was an error starting the underlying Bitcoin engine. '
- 'This should not normally happen. Usually it occurs when you '
- 'have been using Bitcoin-Qt prior to using Armory, especially '
- 'if you have upgraded or downgraded Bitcoin-Qt recently (manually, '
- 'or through the Armory automatic installation). Output from '
- 'bitcoind: ' +
- (soutDisp if len(sout)>0 else '') +
+ return (tr("""
+ There was an error starting the underlying Bitcoin engine.
+ This should not normally happen. Usually it occurs when you
+ have been using Bitcoin-Qt prior to using Armory, especially
+ if you have upgraded or downgraded Bitcoin-Qt recently.
+ Output from bitcoind: """) + \
+ (soutDisp if len(sout)>0 else '') + \
(serrDisp if len(serr)>0 else '') )
else:
return ( tr("""
- There was an error starting the underlying Bitcoin engine.
- This should not normally happen. Usually it occurs when you
- have been using Bitcoin-Qt prior to using Armory, especially
- if you have upgraded or downgraded Bitcoin-Qt recently (manually,
- or through the Armory automatic installation).
+ There was an error starting the underlying Bitcoin engine.
+ This should not normally happen. Usually it occurs when you
+ have been using Bitcoin-Qt prior to using Armory, especially
+ if you have upgraded or downgraded Bitcoin-Qt recently.
- Unfortunately, this error is so strange, Armory does not
- recognize it. Please go to "Export Log File" from the "File"
+ Unfortunately, this error is so strange, Armory does not
+ recognize it. Please go to "Export Log File" from the "File"
menu and email at as an attachment to
- support@bitcoinarmory.com. We apologize for the
+ support@bitcoinarmory.com. We apologize for the
inconvenience!"""))
-
+
#############################################################################
+ @TimeThisFunction
def setDashboardDetails(self, INIT=False):
"""
We've dumped all the dashboard text into the above 2 methods in order
to declutter this method.
"""
-
- TimerStart('setDashboardDetails')
onlineAvail = self.onlineModeIsPossible()
sdmState = TheSDM.getSDMState()
bdmState = TheBDM.getBDMState()
+ tdmState = TheTDM.getTDMState()
+ descr = ''
descr1 = ''
descr2 = ''
@@ -4177,14 +5325,28 @@ def setSyncRowVisible(b):
self.lblDashModeSync.setVisible(b)
self.barProgressSync.setVisible(b)
self.lblTimeLeftSync.setVisible(b)
-
+
+
+ def setTorrentRowVisible(b):
+ self.lblDashModeTorrent.setVisible(b)
+ self.barProgressTorrent.setVisible(b)
+ self.lblTimeLeftTorrent.setVisible(b)
+ self.lblTorrentStats.setVisible(b)
+
+ def setBuildRowVisible(b):
+ self.lblDashModeBuild.setVisible(b)
+ self.barProgressBuild.setVisible(b)
+ self.lblTimeLeftBuild.setVisible(b)
+
def setScanRowVisible(b):
self.lblDashModeScan.setVisible(b)
self.barProgressScan.setVisible(b)
self.lblTimeLeftScan.setVisible(b)
def setOnlyDashModeVisible():
+ setTorrentRowVisible(False)
setSyncRowVisible(False)
+ setBuildRowVisible(False)
setScanRowVisible(False)
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
@@ -4206,15 +5368,18 @@ def setBtnFrameVisible(b, descr=''):
setOnlyDashModeVisible()
self.btnModeSwitch.setVisible(False)
- if self.doManageSatoshi and not sdmState=='BitcoindReady':
+ # This keeps popping up for some reason!
+ self.lblTorrentStats.setVisible(False)
+
+ if self.doAutoBitcoind and not sdmState=='BitcoindReady':
# User is letting Armory manage the Satoshi client for them.
-
+
if not sdmState==self.lastSDMState:
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
- # There's a whole bunch of stuff that has to be hidden/shown
+ # There's a whole bunch of stuff that has to be hidden/shown
# depending on the state... set some reasonable defaults here
setBtnFrameVisible(False)
setBtnRowVisible(DASHBTNS.Install, False)
@@ -4222,9 +5387,9 @@ def setBtnFrameVisible(b, descr=''):
setBtnRowVisible(DASHBTNS.Instruct, False)
setBtnRowVisible(DASHBTNS.Settings, True)
setBtnRowVisible(DASHBTNS.Close, False)
-
+
if not (self.forceOnline or self.internetAvail) or CLI_OPTIONS.offline:
- self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Transactions, False)
+ self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
setOnlyDashModeVisible()
self.lblDashModeSync.setText( 'Armory is offline', \
size=4, color='TextWarn', bold=True)
@@ -4258,9 +5423,9 @@ def setBtnFrameVisible(b, descr=''):
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
- elif not TheSDM.isRunningBitcoind():
+ elif not TheSDM.isRunningBitcoind() and not TheTDM.isRunning():
setOnlyDashModeVisible()
- self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Transactions, False)
+ self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
self.lblDashModeSync.setText( 'Armory is offline', \
size=4, color='TextWarn', bold=True)
# Bitcoind is not being managed, but we want it to be
@@ -4340,7 +5505,7 @@ def setBtnFrameVisible(b, descr=''):
self.notAvailErrorCount += 1
#if self.notAvailErrorCount < 5:
#LOGERROR('Auto-mode-switch')
- #self.pressModeSwitchButton()
+ #self.executeModeSwitch()
descr1 += ''
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
@@ -4353,33 +5518,71 @@ def setBtnFrameVisible(b, descr=''):
self.lblDashDescr2.setText(descr2)
else: # online detected/forced, and TheSDM has already been started
if sdmState in ['BitcoindWrongPassword', 'BitcoindNotAvailable']:
- setOnlyDashModeVisible()
- self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Transactions, False)
+
+ extraTxt = ''
+ if not self.wasSynchronizing:
+ setOnlyDashModeVisible()
+ else:
+ extraTxt = tr("""
+ Armory has lost connection to the
+ core Bitcoin software. If you did not do anything
+ that affects your network connection or the bitcoind
+ process, it will probably recover on its own in a
+ couple minutes
""")
+ self.lblTimeLeftSync.setVisible(False)
+ self.barProgressSync.setFormat('')
+
+
+ self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
LOGINFO('Dashboard switched to auto-BadConnection')
self.lblDashModeSync.setText( 'Armory is offline', \
size=4, color='TextWarn', bold=True)
descr1 += self.GetDashStateText('Auto', 'OfflineBadConnection')
descr2 += self.GetDashFunctionalityText('Offline')
- self.lblDashDescr1.setText(descr1)
+ self.lblDashDescr1.setText(extraTxt + descr1)
self.lblDashDescr2.setText(descr2)
- elif sdmState in ['BitcoindInitializing', 'BitcoindSynchronizing']:
+ elif sdmState in ['BitcoindInitializing', \
+ 'BitcoindSynchronizing', \
+ 'TorrentSynchronizing']:
+ self.wasSynchronizing = True
LOGINFO('Dashboard switched to auto-InitSync')
- self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Transactions, False)
+ self.lblBusy.setVisible(True)
+ self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
self.updateSyncProgress()
+
+
+ # If torrent ever ran, leave it visible
setSyncRowVisible(True)
setScanRowVisible(True)
- self.lblBusy.setVisible(True)
+ setTorrentRowVisible(TheTDM.isStarted())
- if sdmState=='BitcoindInitializing':
+ if TheTDM.isRunning():
+ self.lblDashModeTorrent.setText('Downloading via Armory CDN', \
+ size=4, bold=True, color='Foreground')
+ self.lblDashModeSync.setText( 'Synchronizing with Network', \
+ size=4, bold=True, color='DisableFG')
+ self.lblTorrentStats.setVisible(True)
+ elif sdmState=='BitcoindInitializing':
+ self.lblDashModeTorrent.setText('Download via Armory CDN', \
+ size=4, bold=True, color='DisableFG')
self.lblDashModeSync.setText( 'Initializing Bitcoin Engine', \
size=4, bold=True, color='Foreground')
+ self.lblTorrentStats.setVisible(False)
else:
+ self.lblDashModeTorrent.setText('Download via Armory CDN', \
+ size=4, bold=True, color='DisableFG')
self.lblDashModeSync.setText( 'Synchronizing with Network', \
size=4, bold=True, color='Foreground')
+ self.lblTorrentStats.setVisible(False)
- self.lblDashModeScan.setText( 'Build Databases and Scan', \
+
+ self.lblDashModeBuild.setText( 'Build Databases', \
+ size=4, bold=True, color='DisableFG')
+ self.lblDashModeScan.setText( 'Scan Transaction History', \
size=4, bold=True, color='DisableFG')
- if self.approxBlkLeft > 1440: # more than 10 days
+
+ # If more than 10 days behind, or still downloading torrent
+ if tdmState=='Downloading' or self.approxBlkLeft > 1440:
descr1 += self.GetDashStateText('Auto', 'InitializingLongTime')
descr2 += self.GetDashStateText('Auto', 'NewUserInfo')
else:
@@ -4391,7 +5594,7 @@ def setBtnFrameVisible(b, descr=''):
'Since version 0.88, Armory runs bitcoind in the '
'background. You can switch back to '
'the old way in the Settings dialog. ')
-
+
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
@@ -4401,7 +5604,7 @@ def setBtnFrameVisible(b, descr=''):
if bdmState in ('Offline', 'Uninitialized'):
if onlineAvail and not self.lastBDMState[1]==onlineAvail:
LOGINFO('Dashboard switched to user-OfflineOnlinePoss')
- self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Transactions, False)
+ self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
setOnlyDashModeVisible()
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(True)
@@ -4412,14 +5615,14 @@ def setBtnFrameVisible(b, descr=''):
descr += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr)
elif not onlineAvail and not self.lastBDMState[1]==onlineAvail:
- self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Transactions, False)
+ self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
setOnlyDashModeVisible()
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
self.btnModeSwitch.setEnabled(False)
self.lblDashModeSync.setText( 'Armory is offline', \
size=4, color='TextWarn', bold=True)
-
+
if not self.bitcoindIsAvailable():
if self.internetAvail:
descr = self.GetDashStateText('User','OfflineNoSatoshi')
@@ -4434,14 +5637,14 @@ def setBtnFrameVisible(b, descr=''):
descr = self.GetDashStateText('User', 'OfflineNoInternet')
elif not self.checkHaveBlockfiles():
descr = self.GetDashStateText('User', 'OfflineNoBlkFiles')
-
- descr += '
'
+
+ descr += '
'
descr += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr)
-
+
elif bdmState == 'BlockchainReady':
setOnlyDashModeVisible()
- self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Transactions, True)
+ self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, True)
self.lblBusy.setVisible(False)
if self.netMode == NETWORKMODE.Disconnected:
self.btnModeSwitch.setVisible(False)
@@ -4453,7 +5656,7 @@ def setBtnFrameVisible(b, descr=''):
LOGINFO('Dashboard switched to online-but-dirty mode')
self.btnModeSwitch.setVisible(True)
self.btnModeSwitch.setText('Rescan Now')
- self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dashboard)
+ self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
self.lblDashModeSync.setText( 'Armory is online, but needs to rescan ' \
'the blockchain', size=4, color='TextWarn', bold=True)
if len(self.sweepAfterScanList) > 0:
@@ -4465,12 +5668,12 @@ def setBtnFrameVisible(b, descr=''):
LOGINFO('Dashboard switched to fully-online mode')
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setText( 'Armory is online!', color='TextGreen', size=4, bold=True)
- self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Transactions, True)
+ self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, True)
descr = self.GetDashStateText('User', 'OnlineFull1')
descr += self.GetDashFunctionalityText('Online')
descr += self.GetDashStateText('User', 'OnlineFull2')
self.lblDashDescr1.setText(descr)
- #self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dashboard)
+ #self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
elif bdmState == 'Scanning':
LOGINFO('Dashboard switched to "Scanning" mode')
self.updateSyncProgress()
@@ -4492,39 +5695,47 @@ def setBtnFrameVisible(b, descr=''):
self.lblTimeLeftSync.setVisible(False)
self.lblDashModeSync.setVisible(False)
- if len(str(self.lblDashModeScan.text()).strip()) == 0:
- self.lblDashModeScan.setText( 'Preparing Databases', \
+ if len(str(self.lblDashModeBuild.text()).strip()) == 0:
+ self.lblDashModeBuild.setText( 'Preparing Databases', \
size=4, bold=True, color='Foreground')
- self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Transactions, False)
-
+
+ if len(str(self.lblDashModeScan.text()).strip()) == 0:
+ self.lblDashModeScan.setText( 'Scan Transaction History', \
+ size=4, bold=True, color='DisableFG')
+
+ self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
+
if len(self.walletMap)==0:
descr = self.GetDashStateText('User','ScanNoWallets')
else:
descr = self.GetDashStateText('User','ScanWithWallets')
-
- descr += self.GetDashStateText('Auto', 'NewUserInfo')
+
+ descr += self.GetDashStateText('Auto', 'NewUserInfo')
descr += self.GetDashFunctionalityText('Scanning') + ' '
self.lblDashDescr1.setText(descr)
self.lblDashDescr2.setText('')
- self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dashboard)
+ self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
else:
LOGERROR('What the heck blockchain mode are we in? %s', bdmState)
-
+
self.lastBDMState = [bdmState, onlineAvail]
self.lastSDMState = sdmState
- self.lblDashModeSync.setContentsMargins(50,5,50,5)
- self.lblDashModeScan.setContentsMargins(50,5,50,5)
+ self.lblDashModeTorrent.setContentsMargins( 50,5,50,5)
+ self.lblDashModeSync.setContentsMargins( 50,5,50,5)
+ self.lblDashModeBuild.setContentsMargins(50,5,50,5)
+ self.lblDashModeScan.setContentsMargins( 50,5,50,5)
vbar = self.dashScrollArea.verticalScrollBar()
- vbar.setValue(vbar.minimum())
-
- TimerStop('setDashboardDetails')
-
-
+
+ # On Macs, this causes the main window scroll area to keep bouncing back
+ # to the top. Not setting the value seems to fix it. DR - 2014/02/12
+ if not OS_MACOSX:
+ vbar.setValue(vbar.minimum())
+
#############################################################################
def createToolTipWidget(self, tiptext, iconSz=2):
"""
- The is to signal to Qt that it should be interpretted as HTML/Rich
- text even if no HTML tags are used. This appears to be necessary for Qt
+ The is to signal to Qt that it should be interpretted as HTML/Rich
+ text even if no HTML tags are used. This appears to be necessary for Qt
to wrap the tooltip text
"""
fgColor = htmlColor('ToolTipQ')
@@ -4532,68 +5743,43 @@ def createToolTipWidget(self, tiptext, iconSz=2):
lbl.setToolTip('' + tiptext)
lbl.setMaximumWidth(relaxedSizeStr(lbl, '(?)')[0])
def pressEv(ev):
- DlgTooltip(self, lbl, tiptext).exec_()
+ QWhatsThis.showText(ev.globalPos(), tiptext, self)
lbl.mousePressEvent = pressEv
return lbl
- #############################################################################
- def checkSatoshiVersion(self):
- timeAlive = long(RightNow()) - self.bornOnTime
- if not CLI_OPTIONS.skipVerCheck and \
- (timeAlive%900==0 or self.satoshiLatestVer==None):
- try:
- # Will eventually make a specially-signed file just for this
- # kind of information. For now, it's all in the versions.txt
- if not self.netMode==NETWORKMODE.Full or \
- not 'SATOSHI' in self.latestVer or \
- not 'SATOSHI' in self.downloadDict or \
- self.NetworkingFactory.proto==None:
- return
-
- LOGDEBUG('Checking Satoshi Version')
- self.checkForLatestVersion()
-
- self.satoshiLatestVer = self.latestVer['SATOSHI']
- self.satoshiLatestVer = readVersionString(self.satoshiLatestVer)
- latestVerInt = getVersionInt(self.satoshiLatestVer)
- peerVersion = self.NetworkingFactory.proto.peerInfo['subver']
- peerVersion = peerVersion.split(':')[-1][:-1]
- peerVerInt = getVersionInt(readVersionString(peerVersion))
+ #############################################################################
+ @TimeThisFunction
+ def checkNewZeroConf(self):
+ while len(self.newZeroConfSinceLastUpdate)>0:
+ rawTx = self.newZeroConfSinceLastUpdate.pop()
+ for wltID in self.walletMap.keys():
+ wlt = self.walletMap[wltID]
+ le = wlt.cppWallet.calcLedgerEntryForTxStr(rawTx)
+ if not le.getTxHash() == '\x00' * 32:
+ LOGDEBUG('ZerConf tx for wallet: %s. Adding to notify queue.' % wltID)
+ notifyIn = self.getSettingOrSetDefault('NotifyBtcIn', not OS_MACOSX)
+ notifyOut = self.getSettingOrSetDefault('NotifyBtcOut', not OS_MACOSX)
+ if (le.getValue() <= 0 and notifyOut) or (le.getValue() > 0 and notifyIn):
+ self.notifyQueue.append([wltID, le, False]) # notifiedAlready=False
+ self.createCombinedLedger()
+ self.walletModel.reset()
- self.satoshiLatestVer = '0.0'
+ #############################################################################
+ @TimeThisFunction
+ def newBlockSyncRescanZC(self, prevLedgSize):
+ didAffectUs = False
+ for wltID in self.walletMap.keys():
+ self.walletMap[wltID].syncWithBlockchainLite()
+ TheBDM.rescanWalletZeroConf(self.walletMap[wltID].cppWallet)
+ newLedgerSize = len(self.walletMap[wltID].getTxLedger())
+ didAffectUs = prevLedgSize[wltID] != newLedgerSize
- LOGINFO('Satoshi Version: Curr: %d, Latest: %d', peerVerInt, latestVerInt)
+ return didAffectUs
- if latestVerInt>peerVerInt and not self.satoshiVerWarnAlready:
- LOGINFO('New version available!')
- self.satoshiVerWarnAlready = True
- doUpgrade = QMessageBox.warning(self, 'Updates Available', \
- 'There is a new version of the Bitcoin software available. '
- 'Newer version usually contain important security updates '
- 'so it is best to upgrade as soon as possible.'
- '
'
- 'Would you like to upgrade the Bitcoin software?' % \
- (peerVersion, self.latestVer['SATOSHI']) , \
- QMessageBox.Yes | QMessageBox.No)
- if doUpgrade==QMessageBox.Yes:
- TheSDM.stopBitcoind()
- self.setDashboardDetails()
- self.installSatoshiClient(closeWhenDone=True)
-
-
- return
- except:
- LOGEXCEPT('Error in checkSatoshiVersion')
-
-
-
-
+ #############################################################################
#############################################################################
def Heartbeat(self, nextBeatSec=1):
"""
@@ -4603,13 +5789,13 @@ def Heartbeat(self, nextBeatSec=1):
"""
# Special heartbeat functions are for special windows that may need
- # to update every, say, every 0.1s
+ # to update every, say, every 0.1s
# is all that matters at that moment, like a download progress window.
# This is "special" because you are putting all other processing on
# hold while this special window is active
# IMPORTANT: Make sure that the special heartbeat function returns
# a value below zero when it's done OR if it errors out!
- # Otherwise, it should return the next heartbeat delay,
+ # Otherwise, it should return the next heartbeat delay,
# which would probably be something like 0.1 for a rapidly
# updating progress counter
for fn in self.extraHeartbeatSpecial:
@@ -4625,21 +5811,70 @@ def Heartbeat(self, nextBeatSec=1):
self.extraHeartbeatSpecial = []
reactor.callLater(1, self.Heartbeat)
return
-
+
+ # TorrentDownloadManager
+ # SatoshiDaemonManager
+ # BlockDataManager
+ tdmState = TheTDM.getTDMState()
sdmState = TheSDM.getSDMState()
bdmState = TheBDM.getBDMState()
#print '(SDM, BDM) State = (%s, %s)' % (sdmState, bdmState)
+ self.processAnnounceData()
+
try:
for func in self.extraHeartbeatAlways:
- func()
-
+ if isinstance(func, list):
+ fnc = func[0]
+ kargs = func[1]
+ keep_running = func[2]
+ if keep_running == False:
+ self.extraHeartbeatAlways.remove(func)
+ fnc(*kargs)
+ else:
+ func()
+
for idx,wltID in enumerate(self.walletIDList):
self.walletMap[wltID].checkWalletLockTimeout()
-
- if self.doManageSatoshi:
+
+
+
+ if self.doAutoBitcoind:
+ if TheTDM.isRunning():
+ if tdmState=='Downloading':
+ self.updateSyncProgress()
+
+ downRate = TheTDM.getLastStats('downRate')
+ self.torrentCircBuffer.append(downRate if downRate else 0)
+
+ # Assumes 1 sec heartbeat
+ bufsz = len(self.torrentCircBuffer)
+ if bufsz > 5*MINUTE:
+ self.torrentCircBuffer = self.torrentCircBuffer[1:]
+
+ if bufsz >= 4.99*MINUTE:
+ # If dlrate is below 30 kB/s, offer the user a way to skip it
+ avgDownRate = sum(self.torrentCircBuffer) / float(bufsz)
+ if avgDownRate < 30*KILOBYTE:
+ if (RightNow() - self.lastAskedUserStopTorrent) > 5*MINUTE:
+ self.lastAskedUserStopTorrent = RightNow()
+ reply = QMessageBox.warning(self, tr('Torrent'), tr("""
+ Armory is attempting to use BitTorrent to speed up
+ the initial synchronization, but it appears to be
+ downloading slowly or not at all.
+
+ If the torrent engine is not starting properly,
+ or is not downloading
+ at a reasonable speed for your internet connection,
+ you should disable it in
+ File\xe2\x86\x92Settings and then
+ restart Armory."""), QMessageBox.Ok)
+
+ # For now, just show once then disable
+ self.lastAskedUserStopTorrent = UINT64_MAX
+
if sdmState in ['BitcoindInitializing','BitcoindSynchronizing']:
self.updateSyncProgress()
elif sdmState == 'BitcoindReady':
@@ -4649,10 +5884,10 @@ def Heartbeat(self, nextBeatSec=1):
elif bdmState == 'Offline':
LOGERROR('Bitcoind is ready, but we are offline... ?')
elif bdmState=='Scanning':
- self.checkSatoshiVersion()
self.updateSyncProgress()
- if not sdmState==self.lastSDMState or not bdmState==self.lastBDMState[0]:
+ if not sdmState==self.lastSDMState or \
+ not bdmState==self.lastBDMState[0]:
self.setDashboardDetails()
else:
if bdmState in ('Offline','Uninitialized'):
@@ -4662,7 +5897,6 @@ def Heartbeat(self, nextBeatSec=1):
self.setDashboardDetails()
return
elif bdmState=='Scanning':
- self.checkSatoshiVersion()
self.updateSyncProgress()
@@ -4674,7 +5908,7 @@ def Heartbeat(self, nextBeatSec=1):
self.setDashboardDetails()
self.dirtyLastTime = TheBDM.isDirty()
-
+
if bdmState=='BlockchainReady':
#####
@@ -4684,7 +5918,7 @@ def Heartbeat(self, nextBeatSec=1):
self.finishLoadBlockchain()
self.needUpdateAfterScan = False
self.setDashboardDetails()
-
+
#####
# If we just rescanned to sweep an address, need to finish it
if len(self.sweepAfterScanList)>0:
@@ -4709,34 +5943,14 @@ def Heartbeat(self, nextBeatSec=1):
# Now we start the normal array of heartbeat operations
- self.checkSatoshiVersion() # this actually only checks every 15 min
newBlocks = TheBDM.readBlkFileUpdate(wait=True)
self.currBlockNum = TheBDM.getTopBlockHeight()
+ if isinstance(self.currBlockNum, int): BDMcurrentBlock[0] = self.currBlockNum
- #####
- # If we are getting lots of blocks, very rapidly, issue a warning
- # We look at a rolling sum of the last 5 heartbeat updates (5s)
if not newBlocks:
newBlocks = 0
- self.detectNotSyncQ.insert(0, newBlocks)
- self.detectNotSyncQ.pop()
- blksInLast5sec = sum(self.detectNotSyncQ)
- if( blksInLast5sec>20 ):
- LOGERROR('Detected Bitcoin-Qt/bitcoind not synchronized')
- LOGERROR('New blocks added in last 5 sec: %d', blksInLast5sec)
- if self.noSyncWarnYet:
- self.noSyncWarnYet = False
- QMessageBox.warning(self,'Bitcoin-Qt is not synchronized', \
- 'Armory has detected that Bitcoin-Qt is not synchronized '
- 'with the bitcoin network yet, and Armory may not '
- 'work properly. If you experience any unusual behavior, it is '
- 'recommended that you close Armory and only restart it '
- 'when you see the green checkmark in the bottom-right '
- 'corner of the Bitcoin-Qt window.', QMessageBox.Ok)
- return
-
-
-
+
+
# If we have new zero-conf transactions, scan them and update ledger
if len(self.newZeroConfSinceLastUpdate)>0:
self.newZeroConfSinceLastUpdate.reverse()
@@ -4744,30 +5958,13 @@ def Heartbeat(self, nextBeatSec=1):
wlt = self.walletMap[wltID]
TheBDM.rescanWalletZeroConf(wlt.cppWallet, wait=True)
- while len(self.newZeroConfSinceLastUpdate)>0:
- TimerStart('CheckNewZeroConf')
- # For each new tx, check each wallet
- rawTx = self.newZeroConfSinceLastUpdate.pop()
- for wltID in self.walletMap.keys():
- wlt = self.walletMap[wltID]
- le = wlt.cppWallet.calcLedgerEntryForTxStr(rawTx)
- if not le.getTxHash()=='\x00'*32:
- LOGDEBUG('ZerConf tx for wallet: %s. Adding to notify queue.' % wltID)
- notifyIn = self.getSettingOrSetDefault('NotifyBtcIn', not OS_MACOSX)
- notifyOut = self.getSettingOrSetDefault('NotifyBtcOut', not OS_MACOSX)
- if (le.getValue()<=0 and notifyOut) or (le.getValue()>0 and notifyIn):
- self.notifyQueue.append([wltID, le, False]) # notifiedAlready=False
- self.createCombinedLedger()
- self.walletModel.reset()
- TimerStop('CheckNewZeroConf')
-
+ self.checkNewZeroConf()
+
# Trigger any notifications, if we have them...
- TimerStart('doSystemTrayThing')
self.doTheSystemTrayThing()
- TimerStop('doSystemTrayThing')
if newBlocks>0 and not TheBDM.isDirty():
-
+
# This says "after scan", but works when new blocks appear, too
TheBDM.updateWalletsAfterScan(wait=True)
@@ -4780,58 +5977,50 @@ def Heartbeat(self, nextBeatSec=1):
LOGINFO('New Block! : %d', self.currBlockNum)
didAffectUs = False
-
+
# LITE sync means it won't rescan if addresses have been imported
- TimerStart('newBlockSyncRescanZC')
- for wltID in self.walletMap.keys():
- self.walletMap[wltID].syncWithBlockchainLite()
- TheBDM.rescanWalletZeroConf(self.walletMap[wltID].cppWallet)
- newLedgerSize = len(self.walletMap[wltID].getTxLedger())
- didAffectUs = (prevLedgSize[wltID] != newLedgerSize)
- TimerStop('newBlockSyncRescanZC')
-
+ didAffectUs = self.newBlockSyncRescanZC(prevLedgSize)
+
if didAffectUs:
LOGINFO('New Block contained a transaction relevant to us!')
self.walletListChanged()
self.notifyOnSurpriseTx(self.currBlockNum-newBlocks, \
self.currBlockNum+1)
-
+
self.createCombinedLedger()
self.blkReceived = RightNow()
self.writeSetting('LastBlkRecvTime', self.blkReceived)
self.writeSetting('LastBlkRecv', self.currBlockNum)
-
+
if self.netMode==NETWORKMODE.Full:
LOGINFO('Current block number: %d', self.currBlockNum)
self.lblArmoryStatus.setText(\
'Connected (%s blocks) ' % \
(htmlColor('TextGreen'), self.currBlockNum))
-
+
# Update the wallet view to immediately reflect new balances
- TimerStart('walletModelReset')
self.walletModel.reset()
- TimerStop('walletModelReset')
-
+
blkRecvAgo = RightNow() - self.blkReceived
#blkStampAgo = RightNow() - TheBDM.getTopBlockHeader().getTimestamp()
self.lblArmoryStatus.setToolTip('Last block received is %s ago' % \
secondsToHumanTime(blkRecvAgo))
-
-
+
+
for func in self.extraHeartbeatOnline:
func()
-
-
+
+
except:
LOGEXCEPT('Error in heartbeat function')
print sys.exc_info()
finally:
reactor.callLater(nextBeatSec, self.Heartbeat)
-
+
#############################################################################
def notifyOnSurpriseTx(self, blk0, blk1):
- # We usually see transactions as zero-conf first, then they show up in
+ # We usually see transactions as zero-conf first, then they show up in
# a block. It is a "surprise" when the first time we see it is in a block
notifiedAlready = set([ n[1].getTxHash() for n in self.notifyQueue ])
for blk in range(blk0, blk1):
@@ -4845,14 +6034,15 @@ def notifyOnSurpriseTx(self, blk0, blk1):
self.notifyQueue.append([wltID, le, False])
else:
pass
-
-
+
+
#############################################################################
+ @TimeThisFunction
def doTheSystemTrayThing(self):
"""
I named this method as it is because this is not just "show a message."
- I need to display all relevant transactions, in sequence that they were
+ I need to display all relevant transactions, in sequence that they were
received. I will store them in self.notifyQueue, and this method will
do nothing if it's empty.
"""
@@ -4860,7 +6050,7 @@ def doTheSystemTrayThing(self):
RightNow()')
dispLines.append( 'Wallet:\t"%s" (%s)' % (wlt.labelName, wltID))
@@ -4914,8 +6105,8 @@ def doTheSystemTrayThing(self):
totalStr = coin2str( sum([other[i][1] for i in range(len(other))]), maxZeros=1)
dispLines.append( 'Amount: \t%s BTC' % totalStr.strip())
if len(other)==1:
- dispLines.append('Sent To:\t%s' % hash160_to_addrStr(other[0][0]))
- addrComment = wlt.getComment(other[0][0])
+ dispLines.append('Sent To:\t%s' % other[0][0])
+ addrComment = wlt.getComment(addrStr_to_hash160(other[0][0])[1])
else:
dispLines.append('')
dispLines.append('From:\tWallet "%s" (%s)' % (wlt.labelName, wltID))
@@ -4924,14 +6115,16 @@ def doTheSystemTrayThing(self):
'\n'.join(dispLines), \
QSystemTrayIcon.Information, \
10000)
+ LOGINFO(title)
+ #LOGINFO('\n' + '\n'.join(dispLines))
#qsnd = QSound('drip.wav')
#qsnd.play()
self.notifyBlockedUntil = RightNow() + 5
return
-
-
-
+
+
+
#############################################################################
def closeEvent(self, event=None):
moc = self.getSettingOrSetDefault('MinimizeOrClose', 'DontKnow')
@@ -4963,11 +6156,54 @@ def closeEvent(self, event=None):
+ #############################################################################
+ def unpackLinuxTarGz(self, targzFile, changeSettings=True):
+ if targzFile is None:
+ return None
+
+ if not os.path.exists(targzFile):
+ return None
+
+ unpackDir = os.path.join(ARMORY_HOME_DIR, 'latestBitcoinInst')
+ unpackDir2 = os.path.join(ARMORY_HOME_DIR, 'latestBitcoinInstOld')
+ if os.path.exists(unpackDir):
+ if os.path.exists(unpackDir2):
+ shutil.rmtree(unpackDir2)
+ shutil.move(unpackDir, unpackDir2)
+
+ os.mkdir(unpackDir)
+
+ out,err = execAndWait('tar -zxf %s -C %s' % (targzFile, unpackDir), \
+ timeout=5)
+
+ LOGINFO('UNPACK STDOUT: "' + out + '"')
+ LOGINFO('UNPACK STDERR: "' + err + '"')
+
+
+ # There should only be one subdir
+ unpackDirChild = None
+ for fn in os.listdir(unpackDir):
+ unpackDirChild = os.path.join(unpackDir, fn)
+
+ if unpackDirChild is None:
+ LOGERROR('There was apparently an error unpacking the file')
+ return None
+
+ finalDir = os.path.abspath(unpackDirChild)
+ LOGWARN('Bitcoin Core unpacked into: %s', finalDir)
+
+ if changeSettings:
+ self.settings.set('SatoshiExe', finalDir)
+
+ return finalDir
+
+
+
#############################################################################
def closeForReal(self, event=None):
'''
- Seriously, I could not figure out how to exit gracefully, so the next
- best thing is to just hard-kill the app with a sys.exit() call. Oh well...
+ Unlike File->Quit or clicking the X on the window, which may actually
+ minimize Armory, this method is for *really* closing Armory
'''
try:
# Save the main window geometry in the settings file
@@ -4982,26 +6218,10 @@ def closeForReal(self, event=None):
LOGINFO('BDM is safe for clean shutdown')
TheBDM.execCleanShutdown(wait=True)
- # This will do nothing if bitcoind isn't running.
+ # This will do nothing if bitcoind isn't running.
TheSDM.stopBitcoind()
-
- # Mostly for my own use, I'm curious how fast various things run
- if CLI_OPTIONS.doDebug:
- SaveTimingsCSV( os.path.join(ARMORY_HOME_DIR, 'timings.csv') )
- except:
- # Don't want a strange error here interrupt shutdown
- LOGEXCEPT('Strange error during shutdown')
-
- try:
- if self.doHardReset:
- rebuildFile = os.path.join(ARMORY_HOME_DIR, 'rebuild.txt')
- touchFile(rebuildFile)
- os.remove(self.settingsPath)
- mempoolfile = os.path.join(ARMORY_HOME_DIR, 'mempool.bin')
- if os.path.exists(mempoolfile):
- os.remove(mempoolfile)
except:
- # Don't want a strange error here interrupt shutdown
+ # Don't want a strange error here interrupt shutdown
LOGEXCEPT('Strange error during shutdown')
@@ -5011,15 +6231,227 @@ def closeForReal(self, event=None):
reactor.stop()
if event:
event.accept()
+
+
+
+ #############################################################################
+ def execTrigger(self, toSpawn):
+ super(ArmoryDialog, toSpawn).exec_()
+
+
+ #############################################################################
+ def initTrigger(self, toInit):
+ if isinstance(toInit, DlgProgress):
+ toInit.setup(self)
+ toInit.status = 1
+
+
+ #############################################################################
+ def checkForNegImports(self):
+ negativeImports = []
+
+ for wlt in self.walletMap:
+ if self.walletMap[wlt].hasNegativeImports:
+ negativeImports.append(self.walletMap[wlt].uniqueIDB58)
+
+ # If we detect any negative import
+ if len(negativeImports) > 0:
+ logDirs = []
+ for wltID in negativeImports:
+ if not wltID in self.walletMap:
+ continue
+
+ homedir = os.path.dirname(self.walletMap[wltID].walletPath)
+ wltlogdir = os.path.join(homedir, wltID)
+ if not os.path.exists(wltlogdir):
+ continue
+
+ for subdirname in os.listdir(wltlogdir):
+ subdirpath = os.path.join(wltlogdir, subdirname)
+ logDirs.append([wltID, subdirpath])
+
+
+ DlgInconsistentWltReport(self, self, logDirs).exec_()
+
+
+ #############################################################################
+ def getAllRecoveryLogDirs(self, wltIDList):
+ self.logDirs = []
+ for wltID in wltIDList:
+ if not wltID in self.walletMap:
+ continue
+
+ homedir = os.path.dirname(self.walletMap[wltID].walletPath)
+ logdir = os.path.join(homedir, wltID)
+ if not os.path.exists(logdir):
+ continue
+
+ self.logDirs.append([wltID, logdir])
+
+ return self.logDirs
+
+ #############################################################################
+ @AllowAsync
+ def CheckWalletConsistency(self, wallets, prgAt=None):
+
+ if prgAt:
+ totalSize = 0
+ walletSize = {}
+ for wlt in wallets:
+ statinfo = os.stat(wallets[wlt].walletPath)
+ walletSize[wlt] = statinfo.st_size
+ totalSize = totalSize + statinfo.st_size
+
+ i=0
+ dlgrdy = [0]
+ nerrors = 0
+
+ for wlt in wallets:
+ if prgAt:
+ prgAt[0] = i
+ f = 10000*walletSize[wlt]/totalSize
+ prgAt[1] = f
+ i = f +i
+
+ self.wltCstStatus = WalletConsistencyCheck(wallets[wlt], prgAt)
+ if self.wltCstStatus[0] != 0:
+ self.WltCstError(wallets[wlt], self.wltCstStatus[1], dlgrdy)
+ while not dlgrdy[0]:
+ time.sleep(0.01)
+ nerrors = nerrors +1
+
+ prgAt[2] = 1
+
+ dlgrdy[0] = 0
+ while prgAt[2] != 2:
+ time.sleep(0.1)
+ if nerrors == 0:
+ self.emit(SIGNAL('UWCS'), [1, 'All wallets are consistent', 10000, dlgrdy])
+ self.emit(SIGNAL('checkForNegImports'))
+ else:
+ while not dlgrdy:
+ self.emit(SIGNAL('UWCS'), [1, 'Consistency Check Failed!', 0, dlgrdy])
+ time.sleep(1)
+
+ self.checkRdyForFix()
+
+
+ def checkRdyForFix(self):
+ #check BDM first
+ time.sleep(1)
+ self.dlgCptWlt.emit(SIGNAL('Show'))
+ while 1:
+ if TheBDM.getBDMState() == 'Scanning':
+ canFix = tr("""
+ The wallet analysis tool will become available
+ as soon as Armory is done loading. You can close this
+ window and it will reappear when ready.""")
+ self.dlgCptWlt.UpdateCanFix([canFix])
+ time.sleep(1)
+ elif TheBDM.getBDMState() == 'Offline' or \
+ TheBDM.getBDMState() == 'Uninitialized':
+ TheSDM.setDisabled(True)
+ CLI_OPTIONS.offline = True
+ break
+ else:
+ break
+
+ #check running dialogs
+ self.dlgCptWlt.emit(SIGNAL('Show'))
+ runningList = []
+ while 1:
+ listchanged = 0
+ canFix = []
+ for dlg in runningList:
+ if dlg not in runningDialogsList:
+ runningList.remove(dlg)
+ listchanged = 1
+
+ for dlg in runningDialogsList:
+ if not isinstance(dlg, DlgCorruptWallet):
+ if dlg not in runningList:
+ runningList.append(dlg)
+ listchanged = 1
+
+ if len(runningList):
+ if listchanged:
+ canFix.append(tr("""
+ The following windows need closed before you can
+ run the wallet analysis tool:"""))
+ canFix.extend([str(myobj.windowTitle()) for myobj in runningList])
+ self.dlgCptWlt.UpdateCanFix(canFix)
+ time.sleep(0.2)
+ else:
+ break
+
+
+ canFix.append('Ready to analyze inconsistent wallets!')
+ self.dlgCptWlt.UpdateCanFix(canFix, True)
+ self.dlgCptWlt.exec_()
+
+ def checkWallets(self):
+ nwallets = len(self.walletMap)
+
+ if nwallets > 0:
+ self.prgAt = [0, 0, 0]
+
+ self.pbarWalletProgress = QProgressBar()
+ self.pbarWalletProgress.setMaximum(10000)
+ self.pbarWalletProgress.setMaximumSize(300, 22)
+ self.pbarWalletProgress.setStyleSheet('text-align: center; margin-bottom: 2px; margin-left: 10px;')
+ self.pbarWalletProgress.setFormat('Wallet Consistency Check: %p%')
+ self.pbarWalletProgress.setValue(0)
+ self.statusBar().addWidget(self.pbarWalletProgress)
+
+ self.connect(self, SIGNAL('UWCS'), self.UpdateWalletConsistencyStatus)
+ self.connect(self, SIGNAL('PWCE'), self.PromptWltCstError)
+ self.CheckWalletConsistency(self.walletMap, self.prgAt, async=True)
+ self.UpdateConsistencyCheckMessage(async = True)
+ #self.extraHeartbeatAlways.append(self.UpdateWalletConsistencyPBar)
+
+ @AllowAsync
+ def UpdateConsistencyCheckMessage(self):
+ while self.prgAt[2] == 0:
+ self.emit(SIGNAL('UWCS'), [0, self.prgAt[0]])
+ time.sleep(0.5)
+
+ self.emit(SIGNAL('UWCS'), [2])
+ self.prgAt[2] = 2
+
+ def UpdateWalletConsistencyStatus(self, msg):
+ if msg[0] == 0:
+ self.pbarWalletProgress.setValue(msg[1])
+ elif msg[0] == 1:
+ self.statusBar().showMessage(msg[1], msg[2])
+ msg[3][0] = 1
+ else:
+ self.pbarWalletProgress.hide()
+
+ def WltCstError(self, wlt, status, dlgrdy):
+ self.emit(SIGNAL('PWCE'), dlgrdy, wlt, status)
+ LOGERROR('Wallet consistency check failed! (%s)', wlt.uniqueIDB58)
+
+ def PromptWltCstError(self, dlgrdy, wallet=None, status='', mode=None):
+ if not self.dlgCptWlt:
+ self.dlgCptWlt = DlgCorruptWallet(wallet, status, self, self)
+ dlgrdy[0] = 1
+ else:
+ self.dlgCptWlt.addStatus(wallet, status)
+
+ if not mode:
+ self.dlgCptWlt.show()
+ else:
+ self.dlgCptWlt.exec_()
+
############################################
class ArmoryInstanceListener(Protocol):
def connectionMade(self):
LOGINFO('Another Armory instance just tried to open.')
self.factory.func_conn_made()
-
+
def dataReceived(self, data):
LOGINFO('Received data from alternate Armory instance')
self.factory.func_recv_data(data)
@@ -5062,7 +6494,7 @@ def checkForAlreadyOpen():
############################################
def checkForAlreadyOpenError():
LOGINFO('Already open error checking')
- # Sometimes in Windows, Armory actually isn't open, because it holds
+ # Sometimes in Windows, Armory actually isn't open, because it holds
# onto the socket even after it's closed.
armoryExists = []
bitcoindExists = []
@@ -5079,7 +6511,7 @@ def checkForAlreadyOpenError():
if len(armoryExists)>0:
LOGINFO('Not an error! Armory really is open')
- return
+ return
elif len(bitcoindExists)>0:
# Strange condition where bitcoind doesn't get killed by Armory/guardian
# (I've only seen this happen on windows, though)
@@ -5088,7 +6520,7 @@ def checkForAlreadyOpenError():
killProcess(pid)
time.sleep(0.5)
raise
-
+
############################################
if 1:
@@ -5125,7 +6557,7 @@ def endProgram():
reactor.threadpool.stop()
QAPP.quit()
os._exit(0)
-
+
QAPP.connect(form, SIGNAL("lastWindowClosed()"), endProgram)
reactor.addSystemEventTrigger('before', 'shutdown', endProgram)
QAPP.setQuitOnLastWindowClosed(True)
@@ -5133,3 +6565,5 @@ def endProgram():
os._exit(QAPP.exec_())
+
+
diff --git a/ArmorySetup.nsi b/ArmorySetup.nsi
index 93c68227f..a88484031 100644
--- a/ArmorySetup.nsi
+++ b/ArmorySetup.nsi
@@ -52,7 +52,8 @@ Var StartMenuGroup
!insertmacro MUI_LANGUAGE English
# Installer attributes
-OutFile ArmorySetup-${VERSION}-beta_win32.exe
+# Default to -testing to match 90% of builds. Manually change actual releases
+OutFile armory_${VERSION}-testing_winAll.exe
InstallDir "$PROGRAMFILES\Armory"
CRCCheck on
XPStyle on
diff --git a/BitTornado/BT1/Choker.py b/BitTornado/BT1/Choker.py
new file mode 100644
index 000000000..be4017418
--- /dev/null
+++ b/BitTornado/BT1/Choker.py
@@ -0,0 +1,128 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from random import randrange, shuffle
+from BitTornado.clock import clock
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+class Choker:
+ def __init__(self, config, schedule, picker, done = lambda: False):
+ self.config = config
+ self.round_robin_period = config['round_robin_period']
+ self.schedule = schedule
+ self.picker = picker
+ self.connections = []
+ self.last_preferred = 0
+ self.last_round_robin = clock()
+ self.done = done
+ self.super_seed = False
+ self.paused = False
+ schedule(self._round_robin, 5)
+
+ def set_round_robin_period(self, x):
+ self.round_robin_period = x
+
+ def _round_robin(self):
+ self.schedule(self._round_robin, 5)
+ if self.super_seed:
+ cons = range(len(self.connections))
+ to_close = []
+ count = self.config['min_uploads']-self.last_preferred
+ if count > 0: # optimization
+ shuffle(cons)
+ for c in cons:
+ i = self.picker.next_have(self.connections[c], count > 0)
+ if i is None:
+ continue
+ if i < 0:
+ to_close.append(self.connections[c])
+ continue
+ self.connections[c].send_have(i)
+ count -= 1
+ for c in to_close:
+ c.close()
+ if self.last_round_robin + self.round_robin_period < clock():
+ self.last_round_robin = clock()
+ for i in xrange(1, len(self.connections)):
+ c = self.connections[i]
+ u = c.get_upload()
+ if u.is_choked() and u.is_interested():
+ self.connections = self.connections[i:] + self.connections[:i]
+ break
+ self._rechoke()
+
+ def _rechoke(self):
+ preferred = []
+ maxuploads = self.config['max_uploads']
+ if self.paused:
+ for c in self.connections:
+ c.get_upload().choke()
+ return
+ if maxuploads > 1:
+ for c in self.connections:
+ u = c.get_upload()
+ if not u.is_interested():
+ continue
+ if self.done():
+ r = u.get_rate()
+ else:
+ d = c.get_download()
+ r = d.get_rate()
+ if r < 1000 or d.is_snubbed():
+ continue
+ preferred.append((-r, c))
+ self.last_preferred = len(preferred)
+ preferred.sort()
+ del preferred[maxuploads-1:]
+ preferred = [x[1] for x in preferred]
+ count = len(preferred)
+ hit = False
+ to_unchoke = []
+ for c in self.connections:
+ u = c.get_upload()
+ if c in preferred:
+ to_unchoke.append(u)
+ else:
+ if count < maxuploads or not hit:
+ to_unchoke.append(u)
+ if u.is_interested():
+ count += 1
+ hit = True
+ else:
+ u.choke()
+ for u in to_unchoke:
+ u.unchoke()
+
+ def connection_made(self, connection, p = None):
+ if p is None:
+ p = randrange(-2, len(self.connections) + 1)
+ self.connections.insert(max(p, 0), connection)
+ self._rechoke()
+
+ def connection_lost(self, connection):
+ self.connections.remove(connection)
+ self.picker.lost_peer(connection)
+ if connection.get_upload().is_interested() and not connection.get_upload().is_choked():
+ self._rechoke()
+
+ def interested(self, connection):
+ if not connection.get_upload().is_choked():
+ self._rechoke()
+
+ def not_interested(self, connection):
+ if not connection.get_upload().is_choked():
+ self._rechoke()
+
+ def set_super_seed(self):
+ while self.connections: # close all connections
+ self.connections[0].close()
+ self.picker.set_superseed()
+ self.super_seed = True
+
+ def pause(self, flag):
+ self.paused = flag
+ self._rechoke()
diff --git a/BitTornado/BT1/Connecter.py b/BitTornado/BT1/Connecter.py
new file mode 100644
index 000000000..e668c02fc
--- /dev/null
+++ b/BitTornado/BT1/Connecter.py
@@ -0,0 +1,288 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.bitfield import Bitfield
+from BitTornado.clock import clock
+from binascii import b2a_hex
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+DEBUG = False
+
+def toint(s):
+ return long(b2a_hex(s), 16)
+
+def tobinary(i):
+ return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
+ chr((i >> 8) & 0xFF) + chr(i & 0xFF))
+
+CHOKE = chr(0)
+UNCHOKE = chr(1)
+INTERESTED = chr(2)
+NOT_INTERESTED = chr(3)
+# index
+HAVE = chr(4)
+# index, bitfield
+BITFIELD = chr(5)
+# index, begin, length
+REQUEST = chr(6)
+# index, begin, piece
+PIECE = chr(7)
+# index, begin, piece
+CANCEL = chr(8)
+
+class Connection:
+ def __init__(self, connection, connecter):
+ self.connection = connection
+ self.connecter = connecter
+ self.got_anything = False
+ self.next_upload = None
+ self.outqueue = []
+ self.partial_message = None
+ self.download = None
+ self.send_choke_queued = False
+ self.just_unchoked = None
+
+ def get_ip(self, real=False):
+ return self.connection.get_ip(real)
+
+ def get_id(self):
+ return self.connection.get_id()
+
+ def get_readable_id(self):
+ return self.connection.get_readable_id()
+
+ def close(self):
+ if DEBUG:
+ print 'connection closed'
+ self.connection.close()
+
+ def is_locally_initiated(self):
+ return self.connection.is_locally_initiated()
+
+ def send_interested(self):
+ self._send_message(INTERESTED)
+
+ def send_not_interested(self):
+ self._send_message(NOT_INTERESTED)
+
+ def send_choke(self):
+ if self.partial_message:
+ self.send_choke_queued = True
+ else:
+ self._send_message(CHOKE)
+ self.upload.choke_sent()
+ self.just_unchoked = 0
+
+ def send_unchoke(self):
+ if self.send_choke_queued:
+ self.send_choke_queued = False
+ if DEBUG:
+ print 'CHOKE SUPPRESSED'
+ else:
+ self._send_message(UNCHOKE)
+ if ( self.partial_message or self.just_unchoked is None
+ or not self.upload.interested or self.download.active_requests ):
+ self.just_unchoked = 0
+ else:
+ self.just_unchoked = clock()
+
+ def send_request(self, index, begin, length):
+ self._send_message(REQUEST + tobinary(index) +
+ tobinary(begin) + tobinary(length))
+ if DEBUG:
+ print 'sent request: '+str(index)+': '+str(begin)+'-'+str(begin+length)
+
+ def send_cancel(self, index, begin, length):
+ self._send_message(CANCEL + tobinary(index) +
+ tobinary(begin) + tobinary(length))
+ if DEBUG:
+ print 'sent cancel: '+str(index)+': '+str(begin)+'-'+str(begin+length)
+
+ def send_bitfield(self, bitfield):
+ self._send_message(BITFIELD + bitfield)
+
+ def send_have(self, index):
+ self._send_message(HAVE + tobinary(index))
+
+ def send_keepalive(self):
+ self._send_message('')
+
+ def _send_message(self, s):
+ s = tobinary(len(s))+s
+ if self.partial_message:
+ self.outqueue.append(s)
+ else:
+ self.connection.send_message_raw(s)
+
+ def send_partial(self, bytes):
+ if self.connection.closed:
+ return 0
+ if self.partial_message is None:
+ s = self.upload.get_upload_chunk()
+ if s is None:
+ return 0
+ index, begin, piece = s
+ self.partial_message = ''.join((
+ tobinary(len(piece) + 9), PIECE,
+ tobinary(index), tobinary(begin), piece.tostring() ))
+ if DEBUG:
+ print 'sending chunk: '+str(index)+': '+str(begin)+'-'+str(begin+len(piece))
+
+ if bytes < len(self.partial_message):
+ self.connection.send_message_raw(self.partial_message[:bytes])
+ self.partial_message = self.partial_message[bytes:]
+ return bytes
+
+ q = [self.partial_message]
+ self.partial_message = None
+ if self.send_choke_queued:
+ self.send_choke_queued = False
+ self.outqueue.append(tobinary(1)+CHOKE)
+ self.upload.choke_sent()
+ self.just_unchoked = 0
+ q.extend(self.outqueue)
+ self.outqueue = []
+ q = ''.join(q)
+ self.connection.send_message_raw(q)
+ return len(q)
+
+ def get_upload(self):
+ return self.upload
+
+ def get_download(self):
+ return self.download
+
+ def set_download(self, download):
+ self.download = download
+
+ def backlogged(self):
+ return not self.connection.is_flushed()
+
+ def got_request(self, i, p, l):
+ self.upload.got_request(i, p, l)
+ if self.just_unchoked:
+ self.connecter.ratelimiter.ping(clock() - self.just_unchoked)
+ self.just_unchoked = 0
+
+
+
+
+class Connecter:
+ def __init__(self, make_upload, downloader, choker, numpieces,
+ totalup, config, ratelimiter, sched = None):
+ self.downloader = downloader
+ self.make_upload = make_upload
+ self.choker = choker
+ self.numpieces = numpieces
+ self.config = config
+ self.ratelimiter = ratelimiter
+ self.rate_capped = False
+ self.sched = sched
+ self.totalup = totalup
+ self.rate_capped = False
+ self.connections = {}
+ self.external_connection_made = 0
+
+ def how_many_connections(self):
+ return len(self.connections)
+
+ def connection_made(self, connection):
+ c = Connection(connection, self)
+ self.connections[connection] = c
+ c.upload = self.make_upload(c, self.ratelimiter, self.totalup)
+ c.download = self.downloader.make_download(c)
+ self.choker.connection_made(c)
+ return c
+
+ def connection_lost(self, connection):
+ c = self.connections[connection]
+ del self.connections[connection]
+ if c.download:
+ c.download.disconnected()
+ self.choker.connection_lost(c)
+
+ def connection_flushed(self, connection):
+ conn = self.connections[connection]
+ if conn.next_upload is None and (conn.partial_message is not None
+ or len(conn.upload.buffer) > 0):
+ self.ratelimiter.queue(conn)
+
+ def got_piece(self, i):
+ for co in self.connections.values():
+ co.send_have(i)
+
+ def got_message(self, connection, message):
+ c = self.connections[connection]
+ t = message[0]
+ if t == BITFIELD and c.got_anything:
+ connection.close()
+ return
+ c.got_anything = True
+ if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and
+ len(message) != 1):
+ connection.close()
+ return
+ if t == CHOKE:
+ c.download.got_choke()
+ elif t == UNCHOKE:
+ c.download.got_unchoke()
+ elif t == INTERESTED:
+ if not c.download.have.complete():
+ c.upload.got_interested()
+ elif t == NOT_INTERESTED:
+ c.upload.got_not_interested()
+ elif t == HAVE:
+ if len(message) != 5:
+ connection.close()
+ return
+ i = toint(message[1:])
+ if i >= self.numpieces:
+ connection.close()
+ return
+ if c.download.got_have(i):
+ c.upload.got_not_interested()
+ elif t == BITFIELD:
+ try:
+ b = Bitfield(self.numpieces, message[1:])
+ except ValueError:
+ connection.close()
+ return
+ if c.download.got_have_bitfield(b):
+ c.upload.got_not_interested()
+ elif t == REQUEST:
+ if len(message) != 13:
+ connection.close()
+ return
+ i = toint(message[1:5])
+ if i >= self.numpieces:
+ connection.close()
+ return
+ c.got_request(i, toint(message[5:9]),
+ toint(message[9:]))
+ elif t == CANCEL:
+ if len(message) != 13:
+ connection.close()
+ return
+ i = toint(message[1:5])
+ if i >= self.numpieces:
+ connection.close()
+ return
+ c.upload.got_cancel(i, toint(message[5:9]),
+ toint(message[9:]))
+ elif t == PIECE:
+ if len(message) <= 9:
+ connection.close()
+ return
+ i = toint(message[1:5])
+ if i >= self.numpieces:
+ connection.close()
+ return
+ if c.download.got_piece(i, toint(message[5:9]), message[9:]):
+ self.got_piece(i)
+ else:
+ connection.close()
diff --git a/BitTornado/BT1/Downloader.py b/BitTornado/BT1/Downloader.py
new file mode 100644
index 000000000..3b4c978a0
--- /dev/null
+++ b/BitTornado/BT1/Downloader.py
@@ -0,0 +1,594 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.CurrentRateMeasure import Measure
+from BitTornado.bitfield import Bitfield
+from random import shuffle
+from BitTornado.clock import clock
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+EXPIRE_TIME = 60 * 60
+
+class PerIPStats:
+ def __init__(self, ip):
+ self.numgood = 0
+ self.bad = {}
+ self.numconnections = 0
+ self.lastdownload = None
+ self.peerid = None
+
+class BadDataGuard:
+ def __init__(self, download):
+ self.download = download
+ self.ip = download.ip
+ self.downloader = download.downloader
+ self.stats = self.downloader.perip[self.ip]
+ self.lastindex = None
+
+ def failed(self, index, bump = False):
+ self.stats.bad.setdefault(index, 0)
+ self.downloader.gotbaddata[self.ip] = 1
+ self.stats.bad[index] += 1
+ if len(self.stats.bad) > 1:
+ if self.download is not None:
+ self.downloader.try_kick(self.download)
+ elif self.stats.numconnections == 1 and self.stats.lastdownload is not None:
+ self.downloader.try_kick(self.stats.lastdownload)
+ if len(self.stats.bad) >= 3 and len(self.stats.bad) > int(self.stats.numgood/30):
+ self.downloader.try_ban(self.ip)
+ elif bump:
+ self.downloader.picker.bump(index)
+
+ def good(self, index):
+ # lastindex is a hack to only increase numgood by one for each good
+ # piece, however many chunks come from the connection(s) from this IP
+ if index != self.lastindex:
+ self.stats.numgood += 1
+ self.lastindex = index
+
+class SingleDownload:
+ def __init__(self, downloader, connection):
+ self.downloader = downloader
+ self.connection = connection
+ self.choked = True
+ self.interested = False
+ self.active_requests = []
+ self.measure = Measure(downloader.max_rate_period)
+ self.peermeasure = Measure(downloader.max_rate_period)
+ self.have = Bitfield(downloader.numpieces)
+ self.last = -1000
+ self.last2 = -1000
+ self.example_interest = None
+ self.backlog = 2
+ self.ip = connection.get_ip()
+ self.guard = BadDataGuard(self)
+
+ def _backlog(self, just_unchoked):
+ self.backlog = min(
+ 2+int(4*self.measure.get_rate()/self.downloader.chunksize),
+ (2*just_unchoked)+self.downloader.queue_limit() )
+ if self.backlog > 50:
+ self.backlog = max(50, self.backlog * 0.075)
+ return self.backlog
+
+ def disconnected(self):
+ self.downloader.lost_peer(self)
+ if self.have.complete():
+ self.downloader.picker.lost_seed()
+ else:
+ for i in xrange(len(self.have)):
+ if self.have[i]:
+ self.downloader.picker.lost_have(i)
+ if self.have.complete() and self.downloader.storage.is_endgame():
+ self.downloader.add_disconnected_seed(self.connection.get_readable_id())
+ self._letgo()
+ self.guard.download = None
+
+ def _letgo(self):
+ if self.downloader.queued_out.has_key(self):
+ del self.downloader.queued_out[self]
+ if not self.active_requests:
+ return
+ if self.downloader.endgamemode:
+ self.active_requests = []
+ return
+ lost = {}
+ for index, begin, length in self.active_requests:
+ self.downloader.storage.request_lost(index, begin, length)
+ lost[index] = 1
+ lost = lost.keys()
+ self.active_requests = []
+ if self.downloader.paused:
+ return
+ ds = [d for d in self.downloader.downloads if not d.choked]
+ shuffle(ds)
+ for d in ds:
+ d._request_more()
+ for d in self.downloader.downloads:
+ if d.choked and not d.interested:
+ for l in lost:
+ if d.have[l] and self.downloader.storage.do_I_have_requests(l):
+ d.send_interested()
+ break
+
+ def got_choke(self):
+ if not self.choked:
+ self.choked = True
+ self._letgo()
+
+ def got_unchoke(self):
+ if self.choked:
+ self.choked = False
+ if self.interested:
+ self._request_more(new_unchoke = True)
+ self.last2 = clock()
+
+ def is_choked(self):
+ return self.choked
+
+ def is_interested(self):
+ return self.interested
+
+ def send_interested(self):
+ if not self.interested:
+ self.interested = True
+ self.connection.send_interested()
+ if not self.choked:
+ self.last2 = clock()
+
+ def send_not_interested(self):
+ if self.interested:
+ self.interested = False
+ self.connection.send_not_interested()
+
+ def got_piece(self, index, begin, piece):
+ length = len(piece)
+ try:
+ self.active_requests.remove((index, begin, length))
+ except ValueError:
+ self.downloader.discarded += length
+ return False
+ if self.downloader.endgamemode:
+ self.downloader.all_requests.remove((index, begin, length))
+ self.last = clock()
+ self.last2 = clock()
+ self.measure.update_rate(length)
+ self.downloader.measurefunc(length)
+ if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard):
+ self.downloader.piece_flunked(index)
+ return False
+ if self.downloader.storage.do_I_have(index):
+ self.downloader.picker.complete(index)
+ if self.downloader.endgamemode:
+ for d in self.downloader.downloads:
+ if d is not self:
+ if d.interested:
+ if d.choked:
+ assert not d.active_requests
+ d.fix_download_endgame()
+ else:
+ try:
+ d.active_requests.remove((index, begin, length))
+ except ValueError:
+ continue
+ d.connection.send_cancel(index, begin, length)
+ d.fix_download_endgame()
+ else:
+ assert not d.active_requests
+ self._request_more()
+ self.downloader.check_complete(index)
+ return self.downloader.storage.do_I_have(index)
+
+ def _request_more(self, new_unchoke = False):
+ assert not self.choked
+ if self.downloader.endgamemode:
+ self.fix_download_endgame(new_unchoke)
+ return
+ if self.downloader.paused:
+ return
+ if len(self.active_requests) >= self._backlog(new_unchoke):
+ if not (self.active_requests or self.backlog):
+ self.downloader.queued_out[self] = 1
+ return
+ lost_interests = []
+ while len(self.active_requests) < self.backlog:
+ interest = self.downloader.picker.next(self.have,
+ self.downloader.storage.do_I_have_requests,
+ self.downloader.too_many_partials())
+ if interest is None:
+ break
+ self.example_interest = interest
+ self.send_interested()
+ loop = True
+ while len(self.active_requests) < self.backlog and loop:
+ begin, length = self.downloader.storage.new_request(interest)
+ self.downloader.picker.requested(interest)
+ self.active_requests.append((interest, begin, length))
+ self.connection.send_request(interest, begin, length)
+ self.downloader.chunk_requested(length)
+ if not self.downloader.storage.do_I_have_requests(interest):
+ loop = False
+ lost_interests.append(interest)
+ if not self.active_requests:
+ self.send_not_interested()
+ if lost_interests:
+ for d in self.downloader.downloads:
+ if d.active_requests or not d.interested:
+ continue
+ if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest):
+ continue
+ for lost in lost_interests:
+ if d.have[lost]:
+ break
+ else:
+ continue
+ interest = self.downloader.picker.next(d.have,
+ self.downloader.storage.do_I_have_requests,
+ self.downloader.too_many_partials())
+ if interest is None:
+ d.send_not_interested()
+ else:
+ d.example_interest = interest
+ if self.downloader.storage.is_endgame():
+ self.downloader.start_endgame()
+
+
+ def fix_download_endgame(self, new_unchoke = False):
+ if self.downloader.paused:
+ return
+ if len(self.active_requests) >= self._backlog(new_unchoke):
+ if not (self.active_requests or self.backlog) and not self.choked:
+ self.downloader.queued_out[self] = 1
+ return
+ want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests]
+ if not (self.active_requests or want):
+ self.send_not_interested()
+ return
+ if want:
+ self.send_interested()
+ if self.choked:
+ return
+ shuffle(want)
+ del want[self.backlog - len(self.active_requests):]
+ self.active_requests.extend(want)
+ for piece, begin, length in want:
+ self.connection.send_request(piece, begin, length)
+ self.downloader.chunk_requested(length)
+
+ def got_have(self, index):
+ if index == self.downloader.numpieces-1:
+ self.downloader.totalmeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length)
+ self.peermeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length)
+ else:
+ self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length)
+ self.peermeasure.update_rate(self.downloader.storage.piece_length)
+ if not self.have[index]:
+ self.have[index] = True
+ self.downloader.picker.got_have(index)
+ if self.have.complete():
+ self.downloader.picker.became_seed()
+ if self.downloader.storage.am_I_complete():
+ self.downloader.add_disconnected_seed(self.connection.get_readable_id())
+ self.connection.close()
+ elif self.downloader.endgamemode:
+ self.fix_download_endgame()
+ elif ( not self.downloader.paused
+ and not self.downloader.picker.is_blocked(index)
+ and self.downloader.storage.do_I_have_requests(index) ):
+ if not self.choked:
+ self._request_more()
+ else:
+ self.send_interested()
+ return self.have.complete()
+
+ def _check_interests(self):
+ if self.interested or self.downloader.paused:
+ return
+ for i in xrange(len(self.have)):
+ if ( self.have[i] and not self.downloader.picker.is_blocked(i)
+ and ( self.downloader.endgamemode
+ or self.downloader.storage.do_I_have_requests(i) ) ):
+ self.send_interested()
+ return
+
+ def got_have_bitfield(self, have):
+ if self.downloader.storage.am_I_complete() and have.complete():
+ if self.downloader.super_seeding:
+ self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too
+ self.connection.close()
+ self.downloader.add_disconnected_seed(self.connection.get_readable_id())
+ return False
+ self.have = have
+ if have.complete():
+ self.downloader.picker.got_seed()
+ else:
+ for i in xrange(len(have)):
+ if have[i]:
+ self.downloader.picker.got_have(i)
+ if self.downloader.endgamemode and not self.downloader.paused:
+ for piece, begin, length in self.downloader.all_requests:
+ if self.have[piece]:
+ self.send_interested()
+ break
+ else:
+ self._check_interests()
+ return have.complete()
+
+ def get_rate(self):
+ return self.measure.get_rate()
+
+ def is_snubbed(self):
+ if ( self.interested and not self.choked
+ and clock() - self.last2 > self.downloader.snub_time ):
+ for index, begin, length in self.active_requests:
+ self.connection.send_cancel(index, begin, length)
+ self.got_choke() # treat it just like a choke
+ return clock() - self.last > self.downloader.snub_time
+
+
+class Downloader:
+ def __init__(self, storage, picker, backlog, max_rate_period,
+ numpieces, chunksize, measurefunc, snub_time,
+ kickbans_ok, kickfunc, banfunc):
+ self.storage = storage
+ self.picker = picker
+ self.backlog = backlog
+ self.max_rate_period = max_rate_period
+ self.measurefunc = measurefunc
+ self.totalmeasure = Measure(max_rate_period*storage.piece_length/storage.request_size)
+ self.numpieces = numpieces
+ self.chunksize = chunksize
+ self.snub_time = snub_time
+ self.kickfunc = kickfunc
+ self.banfunc = banfunc
+ self.disconnectedseeds = {}
+ self.downloads = []
+ self.perip = {}
+ self.gotbaddata = {}
+ self.kicked = {}
+ self.banned = {}
+ self.kickbans_ok = kickbans_ok
+ self.kickbans_halted = False
+ self.super_seeding = False
+ self.endgamemode = False
+ self.endgame_queued_pieces = []
+ self.all_requests = []
+ self.discarded = 0L
+# self.download_rate = 25000 # 25K/s test rate
+ self.download_rate = 0
+ self.bytes_requested = 0
+ self.last_time = clock()
+ self.queued_out = {}
+ self.requeueing = False
+ self.paused = False
+
+ def set_download_rate(self, rate):
+ self.download_rate = rate * 1000
+ self.bytes_requested = 0
+
+ def queue_limit(self):
+ if not self.download_rate:
+ return 10e10 # that's a big queue!
+ t = clock()
+ self.bytes_requested -= (t - self.last_time) * self.download_rate
+ self.last_time = t
+ if not self.requeueing and self.queued_out and self.bytes_requested < 0:
+ self.requeueing = True
+ q = self.queued_out.keys()
+ shuffle(q)
+ self.queued_out = {}
+ for d in q:
+ d._request_more()
+ self.requeueing = False
+ if -self.bytes_requested > 5*self.download_rate:
+ self.bytes_requested = -5*self.download_rate
+ return max(int(-self.bytes_requested/self.chunksize),0)
+
+ def chunk_requested(self, size):
+ self.bytes_requested += size
+
+ external_data_received = chunk_requested
+
+ def make_download(self, connection):
+ ip = connection.get_ip()
+ if self.perip.has_key(ip):
+ perip = self.perip[ip]
+ else:
+ perip = self.perip.setdefault(ip, PerIPStats(ip))
+ perip.peerid = connection.get_readable_id()
+ perip.numconnections += 1
+ d = SingleDownload(self, connection)
+ perip.lastdownload = d
+ self.downloads.append(d)
+ return d
+
+ def piece_flunked(self, index):
+ if self.paused:
+ return
+ if self.endgamemode:
+ if self.downloads:
+ while self.storage.do_I_have_requests(index):
+ nb, nl = self.storage.new_request(index)
+ self.all_requests.append((index, nb, nl))
+ for d in self.downloads:
+ d.fix_download_endgame()
+ return
+ self._reset_endgame()
+ return
+ ds = [d for d in self.downloads if not d.choked]
+ shuffle(ds)
+ for d in ds:
+ d._request_more()
+ ds = [d for d in self.downloads if not d.interested and d.have[index]]
+ for d in ds:
+ d.example_interest = index
+ d.send_interested()
+
+ def has_downloaders(self):
+ return len(self.downloads)
+
+ def lost_peer(self, download):
+ ip = download.ip
+ self.perip[ip].numconnections -= 1
+ if self.perip[ip].lastdownload == download:
+ self.perip[ip].lastdownload = None
+ self.downloads.remove(download)
+ if self.endgamemode and not self.downloads: # all peers gone
+ self._reset_endgame()
+
+ def _reset_endgame(self):
+ self.storage.reset_endgame(self.all_requests)
+ self.endgamemode = False
+ self.all_requests = []
+ self.endgame_queued_pieces = []
+
+
+ def add_disconnected_seed(self, id):
+# if not self.disconnectedseeds.has_key(id):
+# self.picker.seed_seen_recently()
+ self.disconnectedseeds[id]=clock()
+
+# def expire_disconnected_seeds(self):
+
+ def num_disconnected_seeds(self):
+ # first expire old ones
+ expired = []
+ for id,t in self.disconnectedseeds.items():
+ if clock() - t > EXPIRE_TIME: #Expire old seeds after so long
+ expired.append(id)
+ for id in expired:
+# self.picker.seed_disappeared()
+ del self.disconnectedseeds[id]
+ return len(self.disconnectedseeds)
+ # if this isn't called by a stats-gathering function
+ # it should be scheduled to run every minute or two.
+
+ def _check_kicks_ok(self):
+ if len(self.gotbaddata) > 10:
+ self.kickbans_ok = False
+ self.kickbans_halted = True
+ return self.kickbans_ok and len(self.downloads) > 2
+
+ def try_kick(self, download):
+ if self._check_kicks_ok():
+ download.guard.download = None
+ ip = download.ip
+ id = download.connection.get_readable_id()
+ self.kicked[ip] = id
+ self.perip[ip].peerid = id
+ self.kickfunc(download.connection)
+
+ def try_ban(self, ip):
+ if self._check_kicks_ok():
+ self.banfunc(ip)
+ self.banned[ip] = self.perip[ip].peerid
+ if self.kicked.has_key(ip):
+ del self.kicked[ip]
+
+ def set_super_seed(self):
+ self.super_seeding = True
+
+ def check_complete(self, index):
+ if self.endgamemode and not self.all_requests:
+ self.endgamemode = False
+ if self.endgame_queued_pieces and not self.endgamemode:
+ self.requeue_piece_download()
+ if self.storage.am_I_complete():
+ assert not self.all_requests
+ assert not self.endgamemode
+ for d in [i for i in self.downloads if i.have.complete()]:
+ d.connection.send_have(index) # be nice, tell the other seed you completed
+ self.add_disconnected_seed(d.connection.get_readable_id())
+ d.connection.close()
+ return True
+ return False
+
+ def too_many_partials(self):
+ return len(self.storage.dirty) > (len(self.downloads)/2)
+
+
+ def cancel_piece_download(self, pieces):
+ if self.endgamemode:
+ if self.endgame_queued_pieces:
+ for piece in pieces:
+ try:
+ self.endgame_queued_pieces.remove(piece)
+ except:
+ pass
+ new_all_requests = []
+ for index, nb, nl in self.all_requests:
+ if index in pieces:
+ self.storage.request_lost(index, nb, nl)
+ else:
+ new_all_requests.append((index, nb, nl))
+ self.all_requests = new_all_requests
+
+ for d in self.downloads:
+ hit = False
+ for index, nb, nl in d.active_requests:
+ if index in pieces:
+ hit = True
+ d.connection.send_cancel(index, nb, nl)
+ if not self.endgamemode:
+ self.storage.request_lost(index, nb, nl)
+ if hit:
+ d.active_requests = [ r for r in d.active_requests
+ if r[0] not in pieces ]
+ d._request_more()
+ if not self.endgamemode and d.choked:
+ d._check_interests()
+
+ def requeue_piece_download(self, pieces = []):
+ if self.endgame_queued_pieces:
+ for piece in pieces:
+ if not piece in self.endgame_queued_pieces:
+ self.endgame_queued_pieces.append(piece)
+ pieces = self.endgame_queued_pieces
+ if self.endgamemode:
+ if self.all_requests:
+ self.endgame_queued_pieces = pieces
+ return
+ self.endgamemode = False
+ self.endgame_queued_pieces = None
+
+ ds = [d for d in self.downloads]
+ shuffle(ds)
+ for d in ds:
+ if d.choked:
+ d._check_interests()
+ else:
+ d._request_more()
+
+ def start_endgame(self):
+ assert not self.endgamemode
+ self.endgamemode = True
+ assert not self.all_requests
+ for d in self.downloads:
+ if d.active_requests:
+ assert d.interested and not d.choked
+ for request in d.active_requests:
+ assert not request in self.all_requests
+ self.all_requests.append(request)
+ for d in self.downloads:
+ d.fix_download_endgame()
+
+ def pause(self, flag):
+ self.paused = flag
+ if flag:
+ for d in self.downloads:
+ for index, begin, length in d.active_requests:
+ d.connection.send_cancel(index, begin, length)
+ d._letgo()
+ d.send_not_interested()
+ if self.endgamemode:
+ self._reset_endgame()
+ else:
+ shuffle(self.downloads)
+ for d in self.downloads:
+ d._check_interests()
+ if d.interested and not d.choked:
+ d._request_more()
diff --git a/BitTornado/BT1/DownloaderFeedback.py b/BitTornado/BT1/DownloaderFeedback.py
new file mode 100644
index 000000000..836617457
--- /dev/null
+++ b/BitTornado/BT1/DownloaderFeedback.py
@@ -0,0 +1,155 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from urllib import quote
+from threading import Event
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+class DownloaderFeedback:
+ def __init__(self, choker, httpdl, add_task, upfunc, downfunc,
+ ratemeasure, leftfunc, file_length, finflag, sp, statistics,
+ statusfunc = None, interval = None):
+ self.choker = choker
+ self.httpdl = httpdl
+ self.add_task = add_task
+ self.upfunc = upfunc
+ self.downfunc = downfunc
+ self.ratemeasure = ratemeasure
+ self.leftfunc = leftfunc
+ self.file_length = file_length
+ self.finflag = finflag
+ self.sp = sp
+ self.statistics = statistics
+ self.lastids = []
+ self.spewdata = None
+ self.doneprocessing = Event()
+ self.doneprocessing.set()
+ if statusfunc:
+ self.autodisplay(statusfunc, interval)
+
+
+ def _rotate(self):
+ cs = self.choker.connections
+ for id in self.lastids:
+ for i in xrange(len(cs)):
+ if cs[i].get_id() == id:
+ return cs[i:] + cs[:i]
+ return cs
+
+ def spews(self):
+ l = []
+ cs = self._rotate()
+ self.lastids = [c.get_id() for c in cs]
+ for c in cs:
+ a = {}
+ a['id'] = c.get_readable_id()
+ a['ip'] = c.get_ip()
+ a['optimistic'] = (c is self.choker.connections[0])
+ if c.is_locally_initiated():
+ a['direction'] = 'L'
+ else:
+ a['direction'] = 'R'
+ u = c.get_upload()
+ a['uprate'] = int(u.measure.get_rate())
+ a['uinterested'] = u.is_interested()
+ a['uchoked'] = u.is_choked()
+ d = c.get_download()
+ a['downrate'] = int(d.measure.get_rate())
+ a['dinterested'] = d.is_interested()
+ a['dchoked'] = d.is_choked()
+ a['snubbed'] = d.is_snubbed()
+ a['utotal'] = d.connection.upload.measure.get_total()
+ a['dtotal'] = d.connection.download.measure.get_total()
+ if len(d.connection.download.have) > 0:
+ a['completed'] = float(len(d.connection.download.have)-d.connection.download.have.numfalse)/float(len(d.connection.download.have))
+ else:
+ a['completed'] = 1.0
+ a['speed'] = d.connection.download.peermeasure.get_rate()
+
+ l.append(a)
+
+ for dl in self.httpdl.get_downloads():
+ if dl.goodseed:
+ a = {}
+ a['id'] = 'http seed'
+ a['ip'] = dl.baseurl
+ a['optimistic'] = False
+ a['direction'] = 'L'
+ a['uprate'] = 0
+ a['uinterested'] = False
+ a['uchoked'] = False
+ a['downrate'] = int(dl.measure.get_rate())
+ a['dinterested'] = True
+ a['dchoked'] = not dl.active
+ a['snubbed'] = not dl.active
+ a['utotal'] = None
+ a['dtotal'] = dl.measure.get_total()
+ a['completed'] = 1.0
+ a['speed'] = None
+
+ l.append(a)
+
+ return l
+
+
+ def gather(self, displayfunc = None):
+ s = {'stats': self.statistics.update()}
+ if self.sp.isSet():
+ s['spew'] = self.spews()
+ else:
+ s['spew'] = None
+ s['up'] = self.upfunc()
+ if self.finflag.isSet():
+ s['done'] = self.file_length
+ return s
+ s['down'] = self.downfunc()
+ obtained, desired = self.leftfunc()
+ s['done'] = obtained
+ s['wanted'] = desired
+ if desired > 0:
+ s['frac'] = float(obtained)/desired
+ else:
+ s['frac'] = 1.0
+ if desired == obtained:
+ s['time'] = 0
+ else:
+ s['time'] = self.ratemeasure.get_time_left(desired-obtained)
+ return s
+
+
+ def display(self, displayfunc):
+ if not self.doneprocessing.isSet():
+ return
+ self.doneprocessing.clear()
+ stats = self.gather()
+ if self.finflag.isSet():
+ displayfunc(dpflag = self.doneprocessing,
+ upRate = stats['up'],
+ statistics = stats['stats'], spew = stats['spew'])
+ elif stats['time'] is not None:
+ displayfunc(dpflag = self.doneprocessing,
+ fractionDone = stats['frac'], sizeDone = stats['done'],
+ downRate = stats['down'], upRate = stats['up'],
+ statistics = stats['stats'], spew = stats['spew'],
+ timeEst = stats['time'])
+ else:
+ displayfunc(dpflag = self.doneprocessing,
+ fractionDone = stats['frac'], sizeDone = stats['done'],
+ downRate = stats['down'], upRate = stats['up'],
+ statistics = stats['stats'], spew = stats['spew'])
+
+
+ def autodisplay(self, displayfunc, interval):
+ self.displayfunc = displayfunc
+ self.interval = interval
+ self._autodisplay()
+
+ def _autodisplay(self):
+ self.add_task(self._autodisplay, self.interval)
+ self.display(self.displayfunc)
diff --git a/BitTornado/BT1/Encrypter.py b/BitTornado/BT1/Encrypter.py
new file mode 100644
index 000000000..8dcce1f74
--- /dev/null
+++ b/BitTornado/BT1/Encrypter.py
@@ -0,0 +1,333 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from binascii import b2a_hex
+from socket import error as socketerror
+from urllib import quote
+from traceback import print_exc
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+MAX_INCOMPLETE = 8
+
+protocol_name = 'BitTorrent protocol'
+option_pattern = chr(0)*8
+
+def toint(s):
+ return long(b2a_hex(s), 16)
+
+def tobinary(i):
+ return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
+ chr((i >> 8) & 0xFF) + chr(i & 0xFF))
+
+hexchars = '0123456789ABCDEF'
+hexmap = []
+for i in xrange(256):
+ hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F])
+
+def tohex(s):
+ r = []
+ for c in s:
+ r.append(hexmap[ord(c)])
+ return ''.join(r)
+
+def make_readable(s):
+ if not s:
+ return ''
+ if quote(s).find('%') >= 0:
+ return tohex(s)
+ return '"'+s+'"'
+
+
+class IncompleteCounter:
+ def __init__(self):
+ self.c = 0
+ def increment(self):
+ self.c += 1
+ def decrement(self):
+ self.c -= 1
+ def toomany(self):
+ return self.c >= MAX_INCOMPLETE
+
+incompletecounter = IncompleteCounter()
+
+
+# header, reserved, download id, my id, [length, message]
+
+class Connection:
+ def __init__(self, Encoder, connection, id, ext_handshake=False):
+ self.Encoder = Encoder
+ self.connection = connection
+ self.connecter = Encoder.connecter
+ self.id = id
+ self.readable_id = make_readable(id)
+ self.locally_initiated = (id != None)
+ self.complete = False
+ self.keepalive = lambda: None
+ self.closed = False
+ self.buffer = StringIO()
+ if self.locally_initiated:
+ incompletecounter.increment()
+ if self.locally_initiated or ext_handshake:
+ self.connection.write(chr(len(protocol_name)) + protocol_name +
+ option_pattern + self.Encoder.download_id)
+ if ext_handshake:
+ self.Encoder.connecter.external_connection_made += 1
+ self.connection.write(self.Encoder.my_id)
+ self.next_len, self.next_func = 20, self.read_peer_id
+ else:
+ self.next_len, self.next_func = 1, self.read_header_len
+ self.Encoder.raw_server.add_task(self._auto_close, 15)
+
+ def get_ip(self, real=False):
+ return self.connection.get_ip(real)
+
+ def get_id(self):
+ return self.id
+
+ def get_readable_id(self):
+ return self.readable_id
+
+ def is_locally_initiated(self):
+ return self.locally_initiated
+
+ def is_flushed(self):
+ return self.connection.is_flushed()
+
+ def read_header_len(self, s):
+ if ord(s) != len(protocol_name):
+ return None
+ return len(protocol_name), self.read_header
+
+ def read_header(self, s):
+ if s != protocol_name:
+ return None
+ return 8, self.read_reserved
+
+ def read_reserved(self, s):
+ return 20, self.read_download_id
+
+ def read_download_id(self, s):
+ if s != self.Encoder.download_id:
+ return None
+ if not self.locally_initiated:
+ self.Encoder.connecter.external_connection_made += 1
+ self.connection.write(chr(len(protocol_name)) + protocol_name +
+ option_pattern + self.Encoder.download_id + self.Encoder.my_id)
+ return 20, self.read_peer_id
+
+ def read_peer_id(self, s):
+ if not self.id:
+ self.id = s
+ self.readable_id = make_readable(s)
+ else:
+ if s != self.id:
+ return None
+ self.complete = self.Encoder.got_id(self)
+ if not self.complete:
+ return None
+ if self.locally_initiated:
+ self.connection.write(self.Encoder.my_id)
+ incompletecounter.decrement()
+ c = self.Encoder.connecter.connection_made(self)
+ self.keepalive = c.send_keepalive
+ return 4, self.read_len
+
+ def read_len(self, s):
+ l = toint(s)
+ if l > self.Encoder.max_len:
+ return None
+ return l, self.read_message
+
+ def read_message(self, s):
+ if s != '':
+ self.connecter.got_message(self, s)
+ return 4, self.read_len
+
+ def read_dead(self, s):
+ return None
+
+ def _auto_close(self):
+ if not self.complete:
+ self.close()
+
+ def close(self):
+ if not self.closed:
+ self.connection.close()
+ self.sever()
+
+ def sever(self):
+ self.closed = True
+ del self.Encoder.connections[self.connection]
+ if self.complete:
+ self.connecter.connection_lost(self)
+ elif self.locally_initiated:
+ incompletecounter.decrement()
+
+ def send_message_raw(self, message):
+ if not self.closed:
+ self.connection.write(message)
+
+ def data_came_in(self, connection, s):
+ self.Encoder.measurefunc(len(s))
+ while True:
+ if self.closed:
+ return
+ i = self.next_len - self.buffer.tell()
+ if i > len(s):
+ self.buffer.write(s)
+ return
+ self.buffer.write(s[:i])
+ s = s[i:]
+ m = self.buffer.getvalue()
+ self.buffer.reset()
+ self.buffer.truncate()
+ try:
+ x = self.next_func(m)
+ except:
+ self.next_len, self.next_func = 1, self.read_dead
+ raise
+ if x is None:
+ self.close()
+ return
+ self.next_len, self.next_func = x
+
+ def connection_flushed(self, connection):
+ if self.complete:
+ self.connecter.connection_flushed(self)
+
+ def connection_lost(self, connection):
+ if self.Encoder.connections.has_key(connection):
+ self.sever()
+
+
+class Encoder:
+ def __init__(self, connecter, raw_server, my_id, max_len,
+ schedulefunc, keepalive_delay, download_id,
+ measurefunc, config):
+ self.raw_server = raw_server
+ self.connecter = connecter
+ self.my_id = my_id
+ self.max_len = max_len
+ self.schedulefunc = schedulefunc
+ self.keepalive_delay = keepalive_delay
+ self.download_id = download_id
+ self.measurefunc = measurefunc
+ self.config = config
+ self.connections = {}
+ self.banned = {}
+ self.to_connect = []
+ self.paused = False
+ if self.config['max_connections'] == 0:
+ self.max_connections = 2 ** 30
+ else:
+ self.max_connections = self.config['max_connections']
+ schedulefunc(self.send_keepalives, keepalive_delay)
+
+ def send_keepalives(self):
+ self.schedulefunc(self.send_keepalives, self.keepalive_delay)
+ if self.paused:
+ return
+ for c in self.connections.values():
+ c.keepalive()
+
+ def start_connections(self, list):
+ if not self.to_connect:
+ self.raw_server.add_task(self._start_connection_from_queue)
+ self.to_connect = list
+
+ def _start_connection_from_queue(self):
+ if self.connecter.external_connection_made:
+ max_initiate = self.config['max_initiate']
+ else:
+ max_initiate = int(self.config['max_initiate']*1.5)
+ cons = len(self.connections)
+ if cons >= self.max_connections or cons >= max_initiate:
+ delay = 60
+ elif self.paused or incompletecounter.toomany():
+ delay = 1
+ else:
+ delay = 0
+ dns, id = self.to_connect.pop(0)
+ self.start_connection(dns, id)
+ if self.to_connect:
+ self.raw_server.add_task(self._start_connection_from_queue, delay)
+
+ def start_connection(self, dns, id):
+ if ( self.paused
+ or len(self.connections) >= self.max_connections
+ or id == self.my_id
+ or self.banned.has_key(dns[0]) ):
+ return True
+ for v in self.connections.values():
+ if v is None:
+ continue
+ if id and v.id == id:
+ return True
+ ip = v.get_ip(True)
+ if self.config['security'] and ip != 'unknown' and ip == dns[0]:
+ return True
+ try:
+ c = self.raw_server.start_connection(dns)
+ con = Connection(self, c, id)
+ self.connections[c] = con
+ c.set_handler(con)
+ except socketerror:
+ return False
+ return True
+
+ def _start_connection(self, dns, id):
+ def foo(self=self, dns=dns, id=id):
+ self.start_connection(dns, id)
+
+ self.schedulefunc(foo, 0)
+
+ def got_id(self, connection):
+ if connection.id == self.my_id:
+ self.connecter.external_connection_made -= 1
+ return False
+ ip = connection.get_ip(True)
+ if self.config['security'] and self.banned.has_key(ip):
+ return False
+ for v in self.connections.values():
+ if connection is not v:
+ if connection.id == v.id:
+ return False
+ if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True):
+ v.close()
+ return True
+
+ def external_connection_made(self, connection):
+ if self.paused or len(self.connections) >= self.max_connections:
+ connection.close()
+ return False
+ con = Connection(self, connection, None)
+ self.connections[connection] = con
+ connection.set_handler(con)
+ return True
+
+ def externally_handshaked_connection_made(self, connection, options, already_read):
+ if self.paused or len(self.connections) >= self.max_connections:
+ connection.close()
+ return False
+ con = Connection(self, connection, None, True)
+ self.connections[connection] = con
+ connection.set_handler(con)
+ if already_read:
+ con.data_came_in(con, already_read)
+ return True
+
+ def close_all(self):
+ for c in self.connections.values():
+ c.close()
+ self.connections = {}
+
+ def ban(self, ip):
+ self.banned[ip] = 1
+
+ def pause(self, flag):
+ self.paused = flag
diff --git a/BitTornado/BT1/FileSelector.py b/BitTornado/BT1/FileSelector.py
new file mode 100644
index 000000000..79f096e32
--- /dev/null
+++ b/BitTornado/BT1/FileSelector.py
@@ -0,0 +1,245 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from random import shuffle
+from traceback import print_exc
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+
+class FileSelector:
+ def __init__(self, files, piece_length, bufferdir,
+ storage, storagewrapper, sched, failfunc):
+ self.files = files
+ self.storage = storage
+ self.storagewrapper = storagewrapper
+ self.sched = sched
+ self.failfunc = failfunc
+ self.downloader = None
+ self.picker = None
+
+ storage.set_bufferdir(bufferdir)
+
+ self.numfiles = len(files)
+ self.priority = [1] * self.numfiles
+ self.new_priority = None
+ self.new_partials = None
+ self.filepieces = []
+ total = 0L
+ for file, length in files:
+ if not length:
+ self.filepieces.append(())
+ else:
+ pieces = range( int(total/piece_length),
+ int((total+length-1)/piece_length)+1 )
+ self.filepieces.append(tuple(pieces))
+ total += length
+ self.numpieces = int((total+piece_length-1)/piece_length)
+ self.piece_priority = [1] * self.numpieces
+
+
+
+ def init_priority(self, new_priority):
+ try:
+ assert len(new_priority) == self.numfiles
+ for v in new_priority:
+ assert type(v) in (type(0),type(0L))
+ assert v >= -1
+ assert v <= 2
+ except:
+# print_exc()
+ return False
+ try:
+ files_updated = False
+ for f in xrange(self.numfiles):
+ if new_priority[f] < 0:
+ self.storage.disable_file(f)
+ files_updated = True
+ if files_updated:
+ self.storage.reset_file_status()
+ self.new_priority = new_priority
+ except (IOError, OSError), e:
+ self.failfunc("can't open partial file for "
+ + self.files[f][0] + ': ' + str(e))
+ return False
+ return True
+
+ '''
+ d['priority'] = [file #1 priority [,file #2 priority...] ]
+ a list of download priorities for each file.
+ Priority may be -1, 0, 1, 2. -1 = download disabled,
+ 0 = highest, 1 = normal, 2 = lowest.
+ Also see Storage.pickle and StorageWrapper.pickle for additional keys.
+ '''
+ def unpickle(self, d):
+ if d.has_key('priority'):
+ if not self.init_priority(d['priority']):
+ return
+ pieces = self.storage.unpickle(d)
+ if not pieces: # don't bother, nothing restoreable
+ return
+ new_piece_priority = self._get_piece_priority_list(self.new_priority)
+ self.storagewrapper.reblock([i == -1 for i in new_piece_priority])
+ self.new_partials = self.storagewrapper.unpickle(d, pieces)
+
+
+ def tie_in(self, picker, cancelfunc, requestmorefunc, rerequestfunc):
+ self.picker = picker
+ self.cancelfunc = cancelfunc
+ self.requestmorefunc = requestmorefunc
+ self.rerequestfunc = rerequestfunc
+
+ if self.new_priority:
+ self.priority = self.new_priority
+ self.new_priority = None
+ self.new_piece_priority = self._set_piece_priority(self.priority)
+
+ if self.new_partials:
+ shuffle(self.new_partials)
+ for p in self.new_partials:
+ self.picker.requested(p)
+ self.new_partials = None
+
+
+ def _set_files_disabled(self, old_priority, new_priority):
+ old_disabled = [p == -1 for p in old_priority]
+ new_disabled = [p == -1 for p in new_priority]
+ data_to_update = []
+ for f in xrange(self.numfiles):
+ if new_disabled[f] != old_disabled[f]:
+ data_to_update.extend(self.storage.get_piece_update_list(f))
+ buffer = []
+ for piece, start, length in data_to_update:
+ if self.storagewrapper.has_data(piece):
+ data = self.storagewrapper.read_raw(piece, start, length)
+ if data is None:
+ return False
+ buffer.append((piece, start, data))
+
+ files_updated = False
+ try:
+ for f in xrange(self.numfiles):
+ if new_disabled[f] and not old_disabled[f]:
+ self.storage.disable_file(f)
+ files_updated = True
+ if old_disabled[f] and not new_disabled[f]:
+ self.storage.enable_file(f)
+ files_updated = True
+ except (IOError, OSError), e:
+ if new_disabled[f]:
+ msg = "can't open partial file for "
+ else:
+ msg = 'unable to open '
+ self.failfunc(msg + self.files[f][0] + ': ' + str(e))
+ return False
+ if files_updated:
+ self.storage.reset_file_status()
+
+ changed_pieces = {}
+ for piece, start, data in buffer:
+ if not self.storagewrapper.write_raw(piece, start, data):
+ return False
+ data.release()
+ changed_pieces[piece] = 1
+ if not self.storagewrapper.doublecheck_data(changed_pieces):
+ return False
+
+ return True
+
+
+ def _get_piece_priority_list(self, file_priority_list):
+ l = [-1] * self.numpieces
+ for f in xrange(self.numfiles):
+ if file_priority_list[f] == -1:
+ continue
+ for i in self.filepieces[f]:
+ if l[i] == -1:
+ l[i] = file_priority_list[f]
+ continue
+ l[i] = min(l[i],file_priority_list[f])
+ return l
+
+
+ def _set_piece_priority(self, new_priority):
+ was_complete = self.storagewrapper.am_I_complete()
+ new_piece_priority = self._get_piece_priority_list(new_priority)
+ pieces = range(self.numpieces)
+ shuffle(pieces)
+ new_blocked = []
+ new_unblocked = []
+ for piece in pieces:
+ self.picker.set_priority(piece,new_piece_priority[piece])
+ o = self.piece_priority[piece] == -1
+ n = new_piece_priority[piece] == -1
+ if n and not o:
+ new_blocked.append(piece)
+ if o and not n:
+ new_unblocked.append(piece)
+ if new_blocked:
+ self.cancelfunc(new_blocked)
+ self.storagewrapper.reblock([i == -1 for i in new_piece_priority])
+ if new_unblocked:
+ self.requestmorefunc(new_unblocked)
+ if was_complete and not self.storagewrapper.am_I_complete():
+ self.rerequestfunc()
+
+ return new_piece_priority
+
+
+ def set_priorities_now(self, new_priority = None):
+ if not new_priority:
+ new_priority = self.new_priority
+ self.new_priority = None # potential race condition
+ if not new_priority:
+ return
+ old_priority = self.priority
+ self.priority = new_priority
+ if not self._set_files_disabled(old_priority, new_priority):
+ return
+ self.piece_priority = self._set_piece_priority(new_priority)
+
+ def set_priorities(self, new_priority):
+ self.new_priority = new_priority
+ self.sched(self.set_priorities_now)
+
+ def set_priority(self, f, p):
+ new_priority = self.get_priorities()
+ new_priority[f] = p
+ self.set_priorities(new_priority)
+
+ def get_priorities(self):
+ priority = self.new_priority
+ if not priority:
+ priority = self.priority # potential race condition
+ return [i for i in priority]
+
+ def __setitem__(self, index, val):
+ self.set_priority(index, val)
+
+ def __getitem__(self, index):
+ try:
+ return self.new_priority[index]
+ except:
+ return self.priority[index]
+
+
+ def finish(self):
+ for f in xrange(self.numfiles):
+ if self.priority[f] == -1:
+ self.storage.delete_file(f)
+
+ def pickle(self):
+ d = {'priority': self.priority}
+ try:
+ s = self.storage.pickle()
+ sw = self.storagewrapper.pickle()
+ for k in s.keys():
+ d[k] = s[k]
+ for k in sw.keys():
+ d[k] = sw[k]
+ except (IOError, OSError):
+ pass
+ return d
diff --git a/BitTornado/BT1/Filter.py b/BitTornado/BT1/Filter.py
new file mode 100644
index 000000000..3506399f3
--- /dev/null
+++ b/BitTornado/BT1/Filter.py
@@ -0,0 +1,12 @@
+class Filter:
+ def __init__(self, callback):
+ self.callback = callback
+
+ def check(self, ip, paramslist, headers):
+
+ def params(key, default = None, l = paramslist):
+ if l.has_key(key):
+ return l[key][0]
+ return default
+
+ return None
diff --git a/BitTornado/BT1/HTTPDownloader.py b/BitTornado/BT1/HTTPDownloader.py
new file mode 100644
index 000000000..922d3ffc0
--- /dev/null
+++ b/BitTornado/BT1/HTTPDownloader.py
@@ -0,0 +1,251 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado.CurrentRateMeasure import Measure
+from random import randint
+from urlparse import urlparse
+from httplib import HTTPConnection
+from urllib import quote
+from threading import Thread
+from BitTornado.__init__ import product_name,version_short
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+EXPIRE_TIME = 60 * 60
+
+VERSION = product_name+'/'+version_short
+
+class haveComplete:
+ def complete(self):
+ return True
+ def __getitem__(self, x):
+ return True
+haveall = haveComplete()
+
+class SingleDownload:
+ def __init__(self, downloader, url):
+ self.downloader = downloader
+ self.baseurl = url
+ try:
+ (scheme, self.netloc, path, pars, query, fragment) = urlparse(url)
+ except:
+ self.downloader.errorfunc('cannot parse http seed address: '+url)
+ return
+ if scheme != 'http':
+ self.downloader.errorfunc('http seed url not http: '+url)
+ return
+ try:
+ self.connection = HTTPConnection(self.netloc)
+ except:
+ self.downloader.errorfunc('cannot connect to http seed: '+url)
+ return
+ self.seedurl = path
+ if pars:
+ self.seedurl += ';'+pars
+ self.seedurl += '?'
+ if query:
+ self.seedurl += query+'&'
+ self.seedurl += 'info_hash='+quote(self.downloader.infohash)
+
+ self.measure = Measure(downloader.max_rate_period)
+ self.index = None
+ self.url = ''
+ self.requests = []
+ self.request_size = 0
+ self.endflag = False
+ self.error = None
+ self.retry_period = 30
+ self._retry_period = None
+ self.errorcount = 0
+ self.goodseed = False
+ self.active = False
+ self.cancelled = False
+ self.resched(randint(2,10))
+
+ def resched(self, len = None):
+ if len is None:
+ len = self.retry_period
+ if self.errorcount > 3:
+ len = len * (self.errorcount - 2)
+ self.downloader.rawserver.add_task(self.download, len)
+
+ def _want(self, index):
+ if self.endflag:
+ return self.downloader.storage.do_I_have_requests(index)
+ else:
+ return self.downloader.storage.is_unstarted(index)
+
+ def download(self):
+ self.cancelled = False
+ if self.downloader.picker.am_I_complete():
+ self.downloader.downloads.remove(self)
+ return
+ self.index = self.downloader.picker.next(haveall, self._want)
+ if ( self.index is None and not self.endflag
+ and not self.downloader.peerdownloader.has_downloaders() ):
+ self.endflag = True
+ self.index = self.downloader.picker.next(haveall, self._want)
+ if self.index is None:
+ self.endflag = True
+ self.resched()
+ else:
+ self.url = ( self.seedurl+'&piece='+str(self.index) )
+ self._get_requests()
+ if self.request_size < self.downloader.storage._piecelen(self.index):
+ self.url += '&ranges='+self._request_ranges()
+ rq = Thread(target = self._request)
+ rq.setDaemon(False)
+ rq.start()
+ self.active = True
+
+ def _request(self):
+ import encodings.ascii
+ import encodings.punycode
+ import encodings.idna
+
+ self.error = None
+ self.received_data = None
+ try:
+ self.connection.request('GET',self.url, None,
+ {'User-Agent': VERSION})
+ r = self.connection.getresponse()
+ self.connection_status = r.status
+ self.received_data = r.read()
+ except Exception, e:
+ self.error = 'error accessing http seed: '+str(e)
+ try:
+ self.connection.close()
+ except:
+ pass
+ try:
+ self.connection = HTTPConnection(self.netloc)
+ except:
+ self.connection = None # will cause an exception and retry next cycle
+ self.downloader.rawserver.add_task(self.request_finished)
+
+ def request_finished(self):
+ self.active = False
+ if self.error is not None:
+ if self.goodseed:
+ self.downloader.errorfunc(self.error)
+ self.errorcount += 1
+ if self.received_data:
+ self.errorcount = 0
+ if not self._got_data():
+ self.received_data = None
+ if not self.received_data:
+ self._release_requests()
+ self.downloader.peerdownloader.piece_flunked(self.index)
+ if self._retry_period:
+ self.resched(self._retry_period)
+ self._retry_period = None
+ return
+ self.resched()
+
+ def _got_data(self):
+ if self.connection_status == 503: # seed is busy
+ try:
+ self.retry_period = max(int(self.received_data),5)
+ except:
+ pass
+ return False
+ if self.connection_status != 200:
+ self.errorcount += 1
+ return False
+ self._retry_period = 1
+ if len(self.received_data) != self.request_size:
+ if self.goodseed:
+ self.downloader.errorfunc('corrupt data from http seed - redownloading')
+ return False
+ self.measure.update_rate(len(self.received_data))
+ self.downloader.measurefunc(len(self.received_data))
+ if self.cancelled:
+ return False
+ if not self._fulfill_requests():
+ return False
+ if not self.goodseed:
+ self.goodseed = True
+ self.downloader.seedsfound += 1
+ if self.downloader.storage.do_I_have(self.index):
+ self.downloader.picker.complete(self.index)
+ self.downloader.peerdownloader.check_complete(self.index)
+ self.downloader.gotpiecefunc(self.index)
+ return True
+
+ def _get_requests(self):
+ self.requests = []
+ self.request_size = 0L
+ while self.downloader.storage.do_I_have_requests(self.index):
+ r = self.downloader.storage.new_request(self.index)
+ self.requests.append(r)
+ self.request_size += r[1]
+ self.requests.sort()
+
+ def _fulfill_requests(self):
+ start = 0L
+ success = True
+ while self.requests:
+ begin, length = self.requests.pop(0)
+ if not self.downloader.storage.piece_came_in(self.index, begin,
+ self.received_data[start:start+length]):
+ success = False
+ break
+ start += length
+ return success
+
+ def _release_requests(self):
+ for begin, length in self.requests:
+ self.downloader.storage.request_lost(self.index, begin, length)
+ self.requests = []
+
+ def _request_ranges(self):
+ s = ''
+ begin, length = self.requests[0]
+ for begin1, length1 in self.requests[1:]:
+ if begin + length == begin1:
+ length += length1
+ continue
+ else:
+ if s:
+ s += ','
+ s += str(begin)+'-'+str(begin+length-1)
+ begin, length = begin1, length1
+ if s:
+ s += ','
+ s += str(begin)+'-'+str(begin+length-1)
+ return s
+
+
+class HTTPDownloader:
+ def __init__(self, storage, picker, rawserver,
+ finflag, errorfunc, peerdownloader,
+ max_rate_period, infohash, measurefunc, gotpiecefunc):
+ self.storage = storage
+ self.picker = picker
+ self.rawserver = rawserver
+ self.finflag = finflag
+ self.errorfunc = errorfunc
+ self.peerdownloader = peerdownloader
+ self.infohash = infohash
+ self.max_rate_period = max_rate_period
+ self.gotpiecefunc = gotpiecefunc
+ self.measurefunc = measurefunc
+ self.downloads = []
+ self.seedsfound = 0
+
+ def make_download(self, url):
+ self.downloads.append(SingleDownload(self, url))
+ return self.downloads[-1]
+
+ def get_downloads(self):
+ if self.finflag.isSet():
+ return []
+ return self.downloads
+
+ def cancel_piece_download(self, pieces):
+ for d in self.downloads:
+ if d.active and d.index in pieces:
+ d.cancelled = True
diff --git a/BitTornado/BT1/NatCheck.py b/BitTornado/BT1/NatCheck.py
new file mode 100644
index 000000000..258d1cbbd
--- /dev/null
+++ b/BitTornado/BT1/NatCheck.py
@@ -0,0 +1,95 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from socket import error as socketerror
+from traceback import print_exc
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+protocol_name = 'BitTorrent protocol'
+
+# header, reserved, download id, my id, [length, message]
+
+class NatCheck:
+ def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver):
+ self.resultfunc = resultfunc
+ self.downloadid = downloadid
+ self.peerid = peerid
+ self.ip = ip
+ self.port = port
+ self.closed = False
+ self.buffer = StringIO()
+ self.next_len = 1
+ self.next_func = self.read_header_len
+ try:
+ self.connection = rawserver.start_connection((ip, port), self)
+ self.connection.write(chr(len(protocol_name)) + protocol_name +
+ (chr(0) * 8) + downloadid)
+ except socketerror:
+ self.answer(False)
+ except IOError:
+ self.answer(False)
+
+ def answer(self, result):
+ self.closed = True
+ try:
+ self.connection.close()
+ except AttributeError:
+ pass
+ self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port)
+
+ def read_header_len(self, s):
+ if ord(s) != len(protocol_name):
+ return None
+ return len(protocol_name), self.read_header
+
+ def read_header(self, s):
+ if s != protocol_name:
+ return None
+ return 8, self.read_reserved
+
+ def read_reserved(self, s):
+ return 20, self.read_download_id
+
+ def read_download_id(self, s):
+ if s != self.downloadid:
+ return None
+ return 20, self.read_peer_id
+
+ def read_peer_id(self, s):
+ if s != self.peerid:
+ return None
+ self.answer(True)
+ return None
+
+ def data_came_in(self, connection, s):
+ while True:
+ if self.closed:
+ return
+ i = self.next_len - self.buffer.tell()
+ if i > len(s):
+ self.buffer.write(s)
+ return
+ self.buffer.write(s[:i])
+ s = s[i:]
+ m = self.buffer.getvalue()
+ self.buffer.reset()
+ self.buffer.truncate()
+ x = self.next_func(m)
+ if x is None:
+ if not self.closed:
+ self.answer(False)
+ return
+ self.next_len, self.next_func = x
+
+ def connection_lost(self, connection):
+ if not self.closed:
+ self.closed = True
+ self.resultfunc(False, self.downloadid, self.peerid, self.ip, self.port)
+
+ def connection_flushed(self, connection):
+ pass
diff --git a/BitTornado/BT1/PiecePicker.py b/BitTornado/BT1/PiecePicker.py
new file mode 100644
index 000000000..83861f3bc
--- /dev/null
+++ b/BitTornado/BT1/PiecePicker.py
@@ -0,0 +1,320 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from random import randrange, shuffle
+from BitTornado.clock import clock
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+class PiecePicker:
+ def __init__(self, numpieces,
+ rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3,
+ priority_step = 20):
+ self.rarest_first_cutoff = rarest_first_cutoff
+ self.rarest_first_priority_cutoff = rarest_first_priority_cutoff + priority_step
+ self.priority_step = priority_step
+ self.cutoff = rarest_first_priority_cutoff
+ self.numpieces = numpieces
+ self.started = []
+ self.totalcount = 0
+ self.numhaves = [0] * numpieces
+ self.priority = [1] * numpieces
+ self.removed_partials = {}
+ self.crosscount = [numpieces]
+ self.crosscount2 = [numpieces]
+ self.has = [0] * numpieces
+ self.numgot = 0
+ self.done = False
+ self.seed_connections = {}
+ self.past_ips = {}
+ self.seed_time = None
+ self.superseed = False
+ self.seeds_connected = 0
+ self._init_interests()
+
+ def _init_interests(self):
+ self.interests = [[] for x in xrange(self.priority_step)]
+ self.level_in_interests = [self.priority_step] * self.numpieces
+ interests = range(self.numpieces)
+ shuffle(interests)
+ self.pos_in_interests = [0] * self.numpieces
+ for i in xrange(self.numpieces):
+ self.pos_in_interests[interests[i]] = i
+ self.interests.append(interests)
+
+
+ def got_have(self, piece):
+ self.totalcount+=1
+ numint = self.numhaves[piece]
+ self.numhaves[piece] += 1
+ self.crosscount[numint] -= 1
+ if numint+1==len(self.crosscount):
+ self.crosscount.append(0)
+ self.crosscount[numint+1] += 1
+ if not self.done:
+ numintplus = numint+self.has[piece]
+ self.crosscount2[numintplus] -= 1
+ if numintplus+1 == len(self.crosscount2):
+ self.crosscount2.append(0)
+ self.crosscount2[numintplus+1] += 1
+ numint = self.level_in_interests[piece]
+ self.level_in_interests[piece] += 1
+ if self.superseed:
+ self.seed_got_haves[piece] += 1
+ numint = self.level_in_interests[piece]
+ self.level_in_interests[piece] += 1
+ elif self.has[piece] or self.priority[piece] == -1:
+ return
+ if numint == len(self.interests) - 1:
+ self.interests.append([])
+ self._shift_over(piece, self.interests[numint], self.interests[numint + 1])
+
+ def lost_have(self, piece):
+ self.totalcount-=1
+ numint = self.numhaves[piece]
+ self.numhaves[piece] -= 1
+ self.crosscount[numint] -= 1
+ self.crosscount[numint-1] += 1
+ if not self.done:
+ numintplus = numint+self.has[piece]
+ self.crosscount2[numintplus] -= 1
+ self.crosscount2[numintplus-1] += 1
+ numint = self.level_in_interests[piece]
+ self.level_in_interests[piece] -= 1
+ if self.superseed:
+ numint = self.level_in_interests[piece]
+ self.level_in_interests[piece] -= 1
+ elif self.has[piece] or self.priority[piece] == -1:
+ return
+ self._shift_over(piece, self.interests[numint], self.interests[numint - 1])
+
+ def _shift_over(self, piece, l1, l2):
+ assert self.superseed or (not self.has[piece] and self.priority[piece] >= 0)
+ parray = self.pos_in_interests
+ p = parray[piece]
+ assert l1[p] == piece
+ q = l1[-1]
+ l1[p] = q
+ parray[q] = p
+ del l1[-1]
+ newp = randrange(len(l2)+1)
+ if newp == len(l2):
+ parray[piece] = len(l2)
+ l2.append(piece)
+ else:
+ old = l2[newp]
+ parray[old] = len(l2)
+ l2.append(old)
+ l2[newp] = piece
+ parray[piece] = newp
+
+
+ def got_seed(self):
+ self.seeds_connected += 1
+ self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected,0)
+
+ def became_seed(self):
+ self.got_seed()
+ self.totalcount -= self.numpieces
+ self.numhaves = [i-1 for i in self.numhaves]
+ if self.superseed or not self.done:
+ self.level_in_interests = [i-1 for i in self.level_in_interests]
+ if self.interests:
+ del self.interests[0]
+ del self.crosscount[0]
+ if not self.done:
+ del self.crosscount2[0]
+
+ def lost_seed(self):
+ self.seeds_connected -= 1
+ self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected,0)
+
+
+ def requested(self, piece):
+ if piece not in self.started:
+ self.started.append(piece)
+
+ def _remove_from_interests(self, piece, keep_partial = False):
+ l = self.interests[self.level_in_interests[piece]]
+ p = self.pos_in_interests[piece]
+ assert l[p] == piece
+ q = l[-1]
+ l[p] = q
+ self.pos_in_interests[q] = p
+ del l[-1]
+ try:
+ self.started.remove(piece)
+ if keep_partial:
+ self.removed_partials[piece] = 1
+ except ValueError:
+ pass
+
+ def complete(self, piece):
+ assert not self.has[piece]
+ self.has[piece] = 1
+ self.numgot += 1
+ if self.numgot == self.numpieces:
+ self.done = True
+ self.crosscount2 = self.crosscount
+ else:
+ numhaves = self.numhaves[piece]
+ self.crosscount2[numhaves] -= 1
+ if numhaves+1 == len(self.crosscount2):
+ self.crosscount2.append(0)
+ self.crosscount2[numhaves+1] += 1
+ self._remove_from_interests(piece)
+
+
+ def next(self, haves, wantfunc, complete_first = False):
+ cutoff = self.numgot < self.rarest_first_cutoff
+ complete_first = (complete_first or cutoff) and not haves.complete()
+ best = None
+ bestnum = 2 ** 30
+ for i in self.started:
+ if haves[i] and wantfunc(i):
+ if self.level_in_interests[i] < bestnum:
+ best = i
+ bestnum = self.level_in_interests[i]
+ if best is not None:
+ if complete_first or (cutoff and len(self.interests) > self.cutoff):
+ return best
+ if haves.complete():
+ r = [ (0, min(bestnum,len(self.interests))) ]
+ elif cutoff and len(self.interests) > self.cutoff:
+ r = [ (self.cutoff, min(bestnum,len(self.interests))),
+ (0, self.cutoff) ]
+ else:
+ r = [ (0, min(bestnum,len(self.interests))) ]
+ for lo,hi in r:
+ for i in xrange(lo,hi):
+ for j in self.interests[i]:
+ if haves[j] and wantfunc(j):
+ return j
+ if best is not None:
+ return best
+ return None
+
+
+ def am_I_complete(self):
+ return self.done
+
+ def bump(self, piece):
+ l = self.interests[self.level_in_interests[piece]]
+ pos = self.pos_in_interests[piece]
+ del l[pos]
+ l.append(piece)
+ for i in range(pos,len(l)):
+ self.pos_in_interests[l[i]] = i
+ try:
+ self.started.remove(piece)
+ except:
+ pass
+
+ def set_priority(self, piece, p):
+ if self.superseed:
+ return False # don't muck with this if you're a superseed
+ oldp = self.priority[piece]
+ if oldp == p:
+ return False
+ self.priority[piece] = p
+ if p == -1:
+ # when setting priority -1,
+ # make sure to cancel any downloads for this piece
+ if not self.has[piece]:
+ self._remove_from_interests(piece, True)
+ return True
+ if oldp == -1:
+ level = self.numhaves[piece] + (self.priority_step * p)
+ self.level_in_interests[piece] = level
+ if self.has[piece]:
+ return True
+ while len(self.interests) < level+1:
+ self.interests.append([])
+ l2 = self.interests[level]
+ parray = self.pos_in_interests
+ newp = randrange(len(l2)+1)
+ if newp == len(l2):
+ parray[piece] = len(l2)
+ l2.append(piece)
+ else:
+ old = l2[newp]
+ parray[old] = len(l2)
+ l2.append(old)
+ l2[newp] = piece
+ parray[piece] = newp
+ if self.removed_partials.has_key(piece):
+ del self.removed_partials[piece]
+ self.started.append(piece)
+ # now go to downloader and try requesting more
+ return True
+ numint = self.level_in_interests[piece]
+ newint = numint + ((p - oldp) * self.priority_step)
+ self.level_in_interests[piece] = newint
+ if self.has[piece]:
+ return False
+ while len(self.interests) < newint+1:
+ self.interests.append([])
+ self._shift_over(piece, self.interests[numint], self.interests[newint])
+ return False
+
+ def is_blocked(self, piece):
+ return self.priority[piece] < 0
+
+
+ def set_superseed(self):
+ assert self.done
+ self.superseed = True
+ self.seed_got_haves = [0] * self.numpieces
+ self._init_interests() # assume everyone is disconnected
+
+ def next_have(self, connection, looser_upload):
+ if self.seed_time is None:
+ self.seed_time = clock()
+ return None
+ if clock() < self.seed_time+10: # wait 10 seconds after seeing the first peers
+ return None # to give time to grab have lists
+ if not connection.upload.super_seeding:
+ return None
+ olddl = self.seed_connections.get(connection)
+ if olddl is None:
+ ip = connection.get_ip()
+ olddl = self.past_ips.get(ip)
+ if olddl is not None: # peer reconnected
+ self.seed_connections[connection] = olddl
+ if olddl is not None:
+ if looser_upload:
+ num = 1 # send a new have even if it hasn't spread that piece elsewhere
+ else:
+ num = 2
+ if self.seed_got_haves[olddl] < num:
+ return None
+ if not connection.upload.was_ever_interested: # it never downloaded it?
+ connection.upload.skipped_count += 1
+ if connection.upload.skipped_count >= 3: # probably another stealthed seed
+ return -1 # signal to close it
+ for tier in self.interests:
+ for piece in tier:
+ if not connection.download.have[piece]:
+ seedint = self.level_in_interests[piece]
+ self.level_in_interests[piece] += 1 # tweak it up one, so you don't duplicate effort
+ if seedint == len(self.interests) - 1:
+ self.interests.append([])
+ self._shift_over(piece,
+ self.interests[seedint], self.interests[seedint + 1])
+ self.seed_got_haves[piece] = 0 # reset this
+ self.seed_connections[connection] = piece
+ connection.upload.seed_have_list.append(piece)
+ return piece
+ return -1 # something screwy; terminate connection
+
+ def lost_peer(self, connection):
+ olddl = self.seed_connections.get(connection)
+ if olddl is None:
+ return
+ del self.seed_connections[connection]
+ self.past_ips[connection.get_ip()] = olddl
+ if self.seed_got_haves[olddl] == 1:
+ self.seed_got_haves[olddl] = 0
diff --git a/BitTornado/BT1/Rerequester.py b/BitTornado/BT1/Rerequester.py
new file mode 100644
index 000000000..b6aa3cf5c
--- /dev/null
+++ b/BitTornado/BT1/Rerequester.py
@@ -0,0 +1,425 @@
+# Written by Bram Cohen
+# modified for multitracker operation by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado.zurllib import urlopen, quote
+from urlparse import urlparse, urlunparse
+from socket import gethostbyname
+from btformats import check_peers
+from BitTornado.bencode import bdecode
+from threading import Thread, Lock
+from cStringIO import StringIO
+from traceback import print_exc
+from socket import error, gethostbyname
+from random import shuffle
+from sha import sha
+from time import time
+try:
+ from os import getpid
+except ImportError:
+ def getpid():
+ return 1
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
+keys = {}
+basekeydata = str(getpid()) + repr(time()) + 'tracker'
+
+def add_key(tracker):
+ key = ''
+ for i in sha(basekeydata+tracker).digest()[-6:]:
+ key += mapbase64[ord(i) & 0x3F]
+ keys[tracker] = key
+
+def get_key(tracker):
+ try:
+ return "&key="+keys[tracker]
+ except:
+ add_key(tracker)
+ return "&key="+keys[tracker]
+
+class fakeflag:
+ def __init__(self, state=False):
+ self.state = state
+ def wait(self):
+ pass
+ def isSet(self):
+ return self.state
+
+class Rerequester:
+ def __init__(self, trackerlist, interval, sched, howmany, minpeers,
+ connect, externalsched, amount_left, up, down,
+ port, ip, myid, infohash, timeout, errorfunc, excfunc,
+ maxpeers, doneflag, upratefunc, downratefunc,
+ unpauseflag = fakeflag(True),
+ seed_id = '', seededfunc = None, force_rapid_update = False ):
+
+ self.excfunc = excfunc
+ newtrackerlist = []
+ for tier in trackerlist:
+ if len(tier)>1:
+ shuffle(tier)
+ newtrackerlist += [tier]
+ self.trackerlist = newtrackerlist
+ self.lastsuccessful = ''
+ self.rejectedmessage = 'rejected by tracker - '
+
+ self.url = ('?info_hash=%s&peer_id=%s&port=%s' %
+ (quote(infohash), quote(myid), str(port)))
+ self.ip = ip
+ self.interval = interval
+ self.last = None
+ self.trackerid = None
+ self.announce_interval = 30 * 60
+ self.sched = sched
+ self.howmany = howmany
+ self.minpeers = minpeers
+ self.connect = connect
+ self.externalsched = externalsched
+ self.amount_left = amount_left
+ self.up = up
+ self.down = down
+ self.timeout = timeout
+ self.errorfunc = errorfunc
+ self.maxpeers = maxpeers
+ self.doneflag = doneflag
+ self.upratefunc = upratefunc
+ self.downratefunc = downratefunc
+ self.unpauseflag = unpauseflag
+ if seed_id:
+ self.url += '&seed_id='+quote(seed_id)
+ self.seededfunc = seededfunc
+ if seededfunc:
+ self.url += '&check_seeded=1'
+ self.force_rapid_update = force_rapid_update
+ self.last_failed = True
+ self.never_succeeded = True
+ self.errorcodes = {}
+ self.lock = SuccessLock()
+ self.special = None
+ self.stopped = False
+
+ def start(self):
+ self.sched(self.c, self.interval/2)
+ self.d(0)
+
+ def c(self):
+ if self.stopped:
+ return
+ if not self.unpauseflag.isSet() and (
+ self.howmany() < self.minpeers or self.force_rapid_update ):
+ self.announce(3, self._c)
+ else:
+ self._c()
+
+ def _c(self):
+ self.sched(self.c, self.interval)
+
+ def d(self, event = 3):
+ if self.stopped:
+ return
+ if not self.unpauseflag.isSet():
+ self._d()
+ return
+ self.announce(event, self._d)
+
+ def _d(self):
+ if self.never_succeeded:
+ self.sched(self.d, 60) # retry in 60 seconds
+ elif self.force_rapid_update:
+ return
+ else:
+ self.sched(self.d, self.announce_interval)
+
+
+ def hit(self, event = 3):
+ if not self.unpauseflag.isSet() and (
+ self.howmany() < self.minpeers or self.force_rapid_update ):
+ self.announce(event)
+
+ def announce(self, event = 3, callback = lambda: None, specialurl = None):
+
+ if specialurl is not None:
+ s = self.url+'&uploaded=0&downloaded=0&left=1' # don't add to statistics
+ if self.howmany() >= self.maxpeers:
+ s += '&numwant=0'
+ else:
+ s += '&no_peer_id=1&compact=1'
+ self.last_failed = True # force true, so will display an error
+ self.special = specialurl
+ self.rerequest(s, callback)
+ return
+
+ else:
+ s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
+ (self.url, str(self.up()), str(self.down()),
+ str(self.amount_left())))
+ if self.last is not None:
+ s += '&last=' + quote(str(self.last))
+ if self.trackerid is not None:
+ s += '&trackerid=' + quote(str(self.trackerid))
+ if self.howmany() >= self.maxpeers:
+ s += '&numwant=0'
+ else:
+ s += '&no_peer_id=1&compact=1'
+ if event != 3:
+ s += '&event=' + ['started', 'completed', 'stopped'][event]
+ if event == 2:
+ self.stopped = True
+ self.rerequest(s, callback)
+
+
+ def snoop(self, peers, callback = lambda: None): # tracker call support
+ self.rerequest(self.url
+ +'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant='
+ +str(peers), callback)
+
+
+ def rerequest(self, s, callback):
+ if not self.lock.isfinished(): # still waiting for prior cycle to complete??
+ def retry(self = self, s = s, callback = callback):
+ self.rerequest(s, callback)
+ self.sched(retry,5) # retry in 5 seconds
+ return
+ self.lock.reset()
+ rq = Thread(target = self._rerequest, args = [s, callback])
+ rq.setDaemon(False)
+ rq.start()
+
+ def _rerequest(self, s, callback):
+ try:
+ def fail (self = self, callback = callback):
+ self._fail(callback)
+ if self.ip:
+ try:
+ s += '&ip=' + gethostbyname(self.ip)
+ except:
+ self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip
+ self.externalsched(fail)
+ self.errorcodes = {}
+ if self.special is None:
+ for t in range(len(self.trackerlist)):
+ for tr in range(len(self.trackerlist[t])):
+ tracker = self.trackerlist[t][tr]
+ if self.rerequest_single(tracker, s, callback):
+ if not self.last_failed and tr != 0:
+ del self.trackerlist[t][tr]
+ self.trackerlist[t] = [tracker] + self.trackerlist[t]
+ return
+ else:
+ tracker = self.special
+ self.special = None
+ if self.rerequest_single(tracker, s, callback):
+ return
+ # no success from any tracker
+ self.externalsched(fail)
+ except:
+ self.exception(callback)
+
+
+ def _fail(self, callback):
+ if ( (self.upratefunc() < 100 and self.downratefunc() < 100)
+ or not self.amount_left() ):
+ for f in ['rejected', 'bad_data', 'troublecode']:
+ if self.errorcodes.has_key(f):
+ r = self.errorcodes[f]
+ break
+ else:
+ r = 'Problem connecting to tracker - unspecified error'
+ self.errorfunc(r)
+
+ self.last_failed = True
+ self.lock.give_up()
+ self.externalsched(callback)
+
+
+ def rerequest_single(self, t, s, callback):
+ l = self.lock.set()
+ rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback])
+ rq.setDaemon(False)
+ rq.start()
+ self.lock.wait()
+ if self.lock.success:
+ self.lastsuccessful = t
+ self.last_failed = False
+ self.never_succeeded = False
+ return True
+ if not self.last_failed and self.lastsuccessful == t:
+ # if the last tracker hit was successful, and you've just tried the tracker
+ # you'd contacted before, don't go any further, just fail silently.
+ self.last_failed = True
+ self.externalsched(callback)
+ self.lock.give_up()
+ return True
+ return False # returns true if it wants rerequest() to exit
+
+
+ def _rerequest_single(self, t, s, l, callback):
+ try:
+ closer = [None]
+ def timedout(self = self, l = l, closer = closer):
+ if self.lock.trip(l):
+ self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded'
+ self.lock.unwait(l)
+ try:
+ closer[0]()
+ except:
+ pass
+
+ self.externalsched(timedout, self.timeout)
+
+ err = None
+ try:
+ h = urlopen(t+s)
+ closer[0] = h.close
+ data = h.read()
+ except (IOError, error), e:
+ err = 'Problem connecting to tracker - ' + str(e)
+ except:
+ err = 'Problem connecting to tracker'
+ try:
+ h.close()
+ except:
+ pass
+ if err:
+ if self.lock.trip(l):
+ self.errorcodes['troublecode'] = err
+ self.lock.unwait(l)
+ return
+
+ if data == '':
+ if self.lock.trip(l):
+ self.errorcodes['troublecode'] = 'no data from tracker'
+ self.lock.unwait(l)
+ return
+
+ try:
+ r = bdecode(data, sloppy=1)
+ check_peers(r)
+ except ValueError, e:
+ if self.lock.trip(l):
+ self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e)
+ self.lock.unwait(l)
+ return
+
+ if r.has_key('failure reason'):
+ if self.lock.trip(l):
+ self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason']
+ self.lock.unwait(l)
+ return
+
+ if self.lock.trip(l, True): # success!
+ self.lock.unwait(l)
+ else:
+ callback = lambda: None # attempt timed out, don't do a callback
+
+ # even if the attempt timed out, go ahead and process data
+ def add(self = self, r = r, callback = callback):
+ self.postrequest(r, callback)
+ self.externalsched(add)
+ except:
+ self.exception(callback)
+
+
+ def postrequest(self, r, callback):
+ if r.has_key('warning message'):
+ self.errorfunc('warning from tracker - ' + r['warning message'])
+ self.announce_interval = r.get('interval', self.announce_interval)
+ self.interval = r.get('min interval', self.interval)
+ self.trackerid = r.get('tracker id', self.trackerid)
+ self.last = r.get('last')
+# ps = len(r['peers']) + self.howmany()
+ p = r['peers']
+ peers = []
+ if type(p) == type(''):
+ for x in xrange(0, len(p), 6):
+ ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
+ port = (ord(p[x+4]) << 8) | ord(p[x+5])
+ peers.append(((ip, port), 0))
+ else:
+ for x in p:
+ peers.append(((x['ip'].strip(), x['port']), x.get('peer id',0)))
+ ps = len(peers) + self.howmany()
+ if ps < self.maxpeers:
+ if self.doneflag.isSet():
+ if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
+ self.last = None
+ else:
+ if r.get('num peers', 1000) > ps * 1.2:
+ self.last = None
+ if self.seededfunc and r.get('seeded'):
+ self.seededfunc()
+ elif peers:
+ shuffle(peers)
+ self.connect(peers)
+ callback()
+
+ def exception(self, callback):
+ data = StringIO()
+ print_exc(file = data)
+ def r(s = data.getvalue(), callback = callback):
+ if self.excfunc:
+ self.excfunc(s)
+ else:
+ print s
+ callback()
+ self.externalsched(r)
+
+
+class SuccessLock:
+ def __init__(self):
+ self.lock = Lock()
+ self.pause = Lock()
+ self.code = 0L
+ self.success = False
+ self.finished = True
+
+ def reset(self):
+ self.success = False
+ self.finished = False
+
+ def set(self):
+ self.lock.acquire()
+ if not self.pause.locked():
+ self.pause.acquire()
+ self.first = True
+ self.code += 1L
+ self.lock.release()
+ return self.code
+
+ def trip(self, code, s = False):
+ self.lock.acquire()
+ try:
+ if code == self.code and not self.finished:
+ r = self.first
+ self.first = False
+ if s:
+ self.finished = True
+ self.success = True
+ return r
+ finally:
+ self.lock.release()
+
+ def give_up(self):
+ self.lock.acquire()
+ self.success = False
+ self.finished = True
+ self.lock.release()
+
+ def wait(self):
+ self.pause.acquire()
+
+ def unwait(self, code):
+ if code == self.code and self.pause.locked():
+ self.pause.release()
+
+ def isfinished(self):
+ self.lock.acquire()
+ x = self.finished
+ self.lock.release()
+ return x
diff --git a/BitTornado/BT1/Statistics.py b/BitTornado/BT1/Statistics.py
new file mode 100644
index 000000000..499907781
--- /dev/null
+++ b/BitTornado/BT1/Statistics.py
@@ -0,0 +1,177 @@
+# Written by Edward Keyes
+# see LICENSE.txt for license information
+
+from threading import Event
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+class Statistics_Response:
+ pass # empty class
+
+
+class Statistics:
+ def __init__(self, upmeasure, downmeasure, connecter, httpdl,
+ ratelimiter, rerequest_lastfailed, fdatflag):
+ self.upmeasure = upmeasure
+ self.downmeasure = downmeasure
+ self.connecter = connecter
+ self.httpdl = httpdl
+ self.ratelimiter = ratelimiter
+ self.downloader = connecter.downloader
+ self.picker = connecter.downloader.picker
+ self.storage = connecter.downloader.storage
+ self.torrentmeasure = connecter.downloader.totalmeasure
+ self.rerequest_lastfailed = rerequest_lastfailed
+ self.fdatflag = fdatflag
+ self.fdatactive = False
+ self.piecescomplete = None
+ self.placesopen = None
+ self.storage_totalpieces = len(self.storage.hashes)
+
+
+ def set_dirstats(self, files, piece_length):
+ self.piecescomplete = 0
+ self.placesopen = 0
+ self.filelistupdated = Event()
+ self.filelistupdated.set()
+ frange = xrange(len(files))
+ self.filepieces = [[] for x in frange]
+ self.filepieces2 = [[] for x in frange]
+ self.fileamtdone = [0.0 for x in frange]
+ self.filecomplete = [False for x in frange]
+ self.fileinplace = [False for x in frange]
+ start = 0L
+ for i in frange:
+ l = files[i][1]
+ if l == 0:
+ self.fileamtdone[i] = 1.0
+ self.filecomplete[i] = True
+ self.fileinplace[i] = True
+ else:
+ fp = self.filepieces[i]
+ fp2 = self.filepieces2[i]
+ for piece in range(int(start/piece_length),
+ int((start+l-1)/piece_length)+1):
+ fp.append(piece)
+ fp2.append(piece)
+ start += l
+
+
+ def update(self):
+ s = Statistics_Response()
+ s.upTotal = self.upmeasure.get_total()
+ s.downTotal = self.downmeasure.get_total()
+ s.last_failed = self.rerequest_lastfailed()
+ s.external_connection_made = self.connecter.external_connection_made
+ if s.downTotal > 0:
+ s.shareRating = float(s.upTotal)/s.downTotal
+ elif s.upTotal == 0:
+ s.shareRating = 0.0
+ else:
+ s.shareRating = -1.0
+ s.torrentRate = self.torrentmeasure.get_rate()
+ s.torrentTotal = self.torrentmeasure.get_total()
+ s.numSeeds = self.picker.seeds_connected
+ s.numOldSeeds = self.downloader.num_disconnected_seeds()
+ s.numPeers = len(self.downloader.downloads)-s.numSeeds
+ s.numCopies = 0.0
+ for i in self.picker.crosscount:
+ if i==0:
+ s.numCopies+=1
+ else:
+ s.numCopies+=1-float(i)/self.picker.numpieces
+ break
+ if self.picker.done:
+ s.numCopies2 = s.numCopies + 1
+ else:
+ s.numCopies2 = 0.0
+ for i in self.picker.crosscount2:
+ if i==0:
+ s.numCopies2+=1
+ else:
+ s.numCopies2+=1-float(i)/self.picker.numpieces
+ break
+ s.discarded = self.downloader.discarded
+ s.numSeeds += self.httpdl.seedsfound
+ s.numOldSeeds += self.httpdl.seedsfound
+ if s.numPeers == 0 or self.picker.numpieces == 0:
+ s.percentDone = 0.0
+ else:
+ s.percentDone = 100.0*(float(self.picker.totalcount)/self.picker.numpieces)/s.numPeers
+
+ s.backgroundallocating = self.storage.bgalloc_active
+ s.storage_totalpieces = len(self.storage.hashes)
+ s.storage_active = len(self.storage.stat_active)
+ s.storage_new = len(self.storage.stat_new)
+ s.storage_dirty = len(self.storage.dirty)
+ numdownloaded = self.storage.stat_numdownloaded
+ s.storage_justdownloaded = numdownloaded
+ s.storage_numcomplete = self.storage.stat_numfound + numdownloaded
+ s.storage_numflunked = self.storage.stat_numflunked
+ s.storage_isendgame = self.downloader.endgamemode
+
+ s.peers_kicked = self.downloader.kicked.items()
+ s.peers_banned = self.downloader.banned.items()
+
+ try:
+ s.upRate = int(self.ratelimiter.upload_rate/1000)
+ assert s.upRate < 5000
+ except:
+ s.upRate = 0
+ s.upSlots = self.ratelimiter.slots
+
+ if self.piecescomplete is None: # not a multi-file torrent
+ return s
+
+ if self.fdatflag.isSet():
+ if not self.fdatactive:
+ self.fdatactive = True
+ else:
+ self.fdatactive = False
+
+ if self.piecescomplete != self.picker.numgot:
+ for i in xrange(len(self.filecomplete)):
+ if self.filecomplete[i]:
+ continue
+ oldlist = self.filepieces[i]
+ newlist = [ piece
+ for piece in oldlist
+ if not self.storage.have[piece] ]
+ if len(newlist) != len(oldlist):
+ self.filepieces[i] = newlist
+ self.fileamtdone[i] = (
+ (len(self.filepieces2[i])-len(newlist))
+ /float(len(self.filepieces2[i])) )
+ if not newlist:
+ self.filecomplete[i] = True
+ self.filelistupdated.set()
+
+ self.piecescomplete = self.picker.numgot
+
+ if ( self.filelistupdated.isSet()
+ or self.placesopen != len(self.storage.places) ):
+ for i in xrange(len(self.filecomplete)):
+ if not self.filecomplete[i] or self.fileinplace[i]:
+ continue
+ while self.filepieces2[i]:
+ piece = self.filepieces2[i][-1]
+ if self.storage.places[piece] != piece:
+ break
+ del self.filepieces2[i][-1]
+ if not self.filepieces2[i]:
+ self.fileinplace[i] = True
+ self.storage.set_file_readonly(i)
+ self.filelistupdated.set()
+
+ self.placesopen = len(self.storage.places)
+
+ s.fileamtdone = self.fileamtdone
+ s.filecomplete = self.filecomplete
+ s.fileinplace = self.fileinplace
+ s.filelistupdated = self.filelistupdated
+
+ return s
+
diff --git a/BitTornado/BT1/Storage.py b/BitTornado/BT1/Storage.py
new file mode 100644
index 000000000..1ee9b5c81
--- /dev/null
+++ b/BitTornado/BT1/Storage.py
@@ -0,0 +1,584 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.piecebuffer import BufferPool
+from threading import Lock
+from time import time, strftime, localtime
+import os
+from os.path import exists, getsize, getmtime, basename
+from traceback import print_exc
+try:
+ from os import fsync
+except ImportError:
+ fsync = lambda x: None
+from bisect import bisect
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+DEBUG = False
+
+MAXREADSIZE = 32768
+MAXLOCKSIZE = 1000000000L
+MAXLOCKRANGE = 3999999999L # only lock first 4 gig of file
+
+_pool = BufferPool()
+PieceBuffer = _pool.new
+
+def dummy_status(fractionDone = None, activity = None):
+ pass
+
+class Storage:
+ def __init__(self, files, piece_length, doneflag, config,
+ disabled_files = None):
+ # can raise IOError and ValueError
+ self.files = files
+ self.piece_length = piece_length
+ self.doneflag = doneflag
+ self.disabled = [False] * len(files)
+ self.file_ranges = []
+ self.disabled_ranges = []
+ self.working_ranges = []
+ numfiles = 0
+ total = 0l
+ so_far = 0l
+ self.handles = {}
+ self.whandles = {}
+ self.tops = {}
+ self.sizes = {}
+ self.mtimes = {}
+ if config.get('lock_files', True):
+ self.lock_file, self.unlock_file = self._lock_file, self._unlock_file
+ else:
+ self.lock_file, self.unlock_file = lambda x1,x2: None, lambda x1,x2: None
+ self.lock_while_reading = config.get('lock_while_reading', False)
+ self.lock = Lock()
+
+ if not disabled_files:
+ disabled_files = [False] * len(files)
+
+ for i in xrange(len(files)):
+ file, length = files[i]
+ if doneflag.isSet(): # bail out if doneflag is set
+ return
+ self.disabled_ranges.append(None)
+ if length == 0:
+ self.file_ranges.append(None)
+ self.working_ranges.append([])
+ else:
+ range = (total, total + length, 0, file)
+ self.file_ranges.append(range)
+ self.working_ranges.append([range])
+ numfiles += 1
+ total += length
+ if disabled_files[i]:
+ l = 0
+ else:
+ if exists(file):
+ l = getsize(file)
+ if l > length:
+ h = open(file, 'rb+')
+ h.truncate(length)
+ h.flush()
+ h.close()
+ l = length
+ else:
+ l = 0
+ h = open(file, 'wb+')
+ h.flush()
+ h.close()
+ self.mtimes[file] = getmtime(file)
+ self.tops[file] = l
+ self.sizes[file] = length
+ so_far += l
+
+ self.total_length = total
+ self._reset_ranges()
+
+ self.max_files_open = config['max_files_open']
+ if self.max_files_open > 0 and numfiles > self.max_files_open:
+ self.handlebuffer = []
+ else:
+ self.handlebuffer = None
+
+
+ if os.name == 'nt':
+ def _lock_file(self, name, f):
+ import msvcrt
+ for p in range(0, min(self.sizes[name],MAXLOCKRANGE), MAXLOCKSIZE):
+ f.seek(p)
+ msvcrt.locking(f.fileno(), msvcrt.LK_LOCK,
+ min(MAXLOCKSIZE,self.sizes[name]-p))
+
+ def _unlock_file(self, name, f):
+ import msvcrt
+ for p in range(0, min(self.sizes[name],MAXLOCKRANGE), MAXLOCKSIZE):
+ f.seek(p)
+ msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK,
+ min(MAXLOCKSIZE,self.sizes[name]-p))
+
+ elif os.name == 'posix':
+ def _lock_file(self, name, f):
+ import fcntl
+ fcntl.flock(f.fileno(), fcntl.LOCK_EX)
+
+ def _unlock_file(self, name, f):
+ import fcntl
+ fcntl.flock(f.fileno(), fcntl.LOCK_UN)
+
+ else:
+ def _lock_file(self, name, f):
+ pass
+ def _unlock_file(self, name, f):
+ pass
+
+
+ def was_preallocated(self, pos, length):
+ for file, begin, end in self._intervals(pos, length):
+ if self.tops.get(file, 0) < end:
+ return False
+ return True
+
+
+ def _sync(self, file):
+ self._close(file)
+ if self.handlebuffer:
+ self.handlebuffer.remove(file)
+
+ def sync(self):
+ # may raise IOError or OSError
+ for file in self.whandles.keys():
+ self._sync(file)
+
+
+ def set_readonly(self, f=None):
+ if f is None:
+ self.sync()
+ return
+ file = self.files[f][0]
+ if self.whandles.has_key(file):
+ self._sync(file)
+
+
+ def get_total_length(self):
+ return self.total_length
+
+
+ def _open(self, file, mode):
+ if self.mtimes.has_key(file):
+ try:
+ if self.handlebuffer is not None:
+ assert getsize(file) == self.tops[file]
+ newmtime = getmtime(file)
+ oldmtime = self.mtimes[file]
+ assert newmtime <= oldmtime+1
+ assert newmtime >= oldmtime-1
+ except:
+ if DEBUG:
+ print ( file+' modified: '
+ +strftime('(%x %X)',localtime(self.mtimes[file]))
+ +strftime(' != (%x %X) ?',localtime(getmtime(file))) )
+ raise IOError('modified during download')
+ try:
+ return open(file, mode)
+ except:
+ if DEBUG:
+ print_exc()
+ raise
+
+
+ def _close(self, file):
+ f = self.handles[file]
+ del self.handles[file]
+ if self.whandles.has_key(file):
+ del self.whandles[file]
+ f.flush()
+ self.unlock_file(file, f)
+ f.close()
+ self.tops[file] = getsize(file)
+ self.mtimes[file] = getmtime(file)
+ else:
+ if self.lock_while_reading:
+ self.unlock_file(file, f)
+ f.close()
+
+
+ def _close_file(self, file):
+ if not self.handles.has_key(file):
+ return
+ self._close(file)
+ if self.handlebuffer:
+ self.handlebuffer.remove(file)
+
+
+ def _get_file_handle(self, file, for_write):
+ if self.handles.has_key(file):
+ if for_write and not self.whandles.has_key(file):
+ self._close(file)
+ try:
+ f = self._open(file, 'rb+')
+ self.handles[file] = f
+ self.whandles[file] = 1
+ self.lock_file(file, f)
+ except (IOError, OSError), e:
+ if DEBUG:
+ print_exc()
+ raise IOError('unable to reopen '+file+': '+str(e))
+
+ if self.handlebuffer:
+ if self.handlebuffer[-1] != file:
+ self.handlebuffer.remove(file)
+ self.handlebuffer.append(file)
+ elif self.handlebuffer is not None:
+ self.handlebuffer.append(file)
+ else:
+ try:
+ if for_write:
+ f = self._open(file, 'rb+')
+ self.handles[file] = f
+ self.whandles[file] = 1
+ self.lock_file(file, f)
+ else:
+ f = self._open(file, 'rb')
+ self.handles[file] = f
+ if self.lock_while_reading:
+ self.lock_file(file, f)
+ except (IOError, OSError), e:
+ if DEBUG:
+ print_exc()
+ raise IOError('unable to open '+file+': '+str(e))
+
+ if self.handlebuffer is not None:
+ self.handlebuffer.append(file)
+ if len(self.handlebuffer) > self.max_files_open:
+ self._close(self.handlebuffer.pop(0))
+
+ return self.handles[file]
+
+
+ def _reset_ranges(self):
+ self.ranges = []
+ for l in self.working_ranges:
+ self.ranges.extend(l)
+ self.begins = [i[0] for i in self.ranges]
+
+ def _intervals(self, pos, amount):
+ r = []
+ stop = pos + amount
+ p = bisect(self.begins, pos) - 1
+ while p < len(self.ranges):
+ begin, end, offset, file = self.ranges[p]
+ if begin >= stop:
+ break
+ r.append(( file,
+ offset + max(pos, begin) - begin,
+ offset + min(end, stop) - begin ))
+ p += 1
+ return r
+
+
+ def read(self, pos, amount, flush_first = False):
+ r = PieceBuffer()
+ for file, pos, end in self._intervals(pos, amount):
+ if DEBUG:
+ print 'reading '+file+' from '+str(pos)+' to '+str(end)
+ self.lock.acquire()
+ h = self._get_file_handle(file, False)
+ if flush_first and self.whandles.has_key(file):
+ h.flush()
+ fsync(h)
+ h.seek(pos)
+ while pos < end:
+ length = min(end-pos, MAXREADSIZE)
+ data = h.read(length)
+ if len(data) != length:
+ raise IOError('error reading data from '+file)
+ r.append(data)
+ pos += length
+ self.lock.release()
+ return r
+
+ def write(self, pos, s):
+ # might raise an IOError
+ total = 0
+ for file, begin, end in self._intervals(pos, len(s)):
+ if DEBUG:
+ print 'writing '+file+' from '+str(pos)+' to '+str(end)
+ self.lock.acquire()
+ h = self._get_file_handle(file, True)
+ h.seek(begin)
+ h.write(s[total: total + end - begin])
+ self.lock.release()
+ total += end - begin
+
+ def top_off(self):
+ for begin, end, offset, file in self.ranges:
+ l = offset + end - begin
+ if l > self.tops.get(file, 0):
+ self.lock.acquire()
+ h = self._get_file_handle(file, True)
+ h.seek(l-1)
+ h.write(chr(0xFF))
+ self.lock.release()
+
+ def flush(self):
+ # may raise IOError or OSError
+ for file in self.whandles.keys():
+ self.lock.acquire()
+ self.handles[file].flush()
+ self.lock.release()
+
+ def close(self):
+ for file, f in self.handles.items():
+ try:
+ self.unlock_file(file, f)
+ except:
+ pass
+ try:
+ f.close()
+ except:
+ pass
+ self.handles = {}
+ self.whandles = {}
+ self.handlebuffer = None
+
+
+ def _get_disabled_ranges(self, f):
+ if not self.file_ranges[f]:
+ return ((),(),())
+ r = self.disabled_ranges[f]
+ if r:
+ return r
+ start, end, offset, file = self.file_ranges[f]
+ if DEBUG:
+ print 'calculating disabled range for '+self.files[f][0]
+ print 'bytes: '+str(start)+'-'+str(end)
+ print 'file spans pieces '+str(int(start/self.piece_length))+'-'+str(int((end-1)/self.piece_length)+1)
+ pieces = range( int(start/self.piece_length),
+ int((end-1)/self.piece_length)+1 )
+ offset = 0
+ disabled_files = []
+ if len(pieces) == 1:
+ if ( start % self.piece_length == 0
+ and end % self.piece_length == 0 ): # happens to be a single,
+ # perfect piece
+ working_range = [(start, end, offset, file)]
+ update_pieces = []
+ else:
+ midfile = os.path.join(self.bufferdir,str(f))
+ working_range = [(start, end, 0, midfile)]
+ disabled_files.append((midfile, start, end))
+ length = end - start
+ self.sizes[midfile] = length
+ piece = pieces[0]
+ update_pieces = [(piece, start-(piece*self.piece_length), length)]
+ else:
+ update_pieces = []
+ if start % self.piece_length != 0: # doesn't begin on an even piece boundary
+ end_b = pieces[1]*self.piece_length
+ startfile = os.path.join(self.bufferdir,str(f)+'b')
+ working_range_b = [ ( start, end_b, 0, startfile ) ]
+ disabled_files.append((startfile, start, end_b))
+ length = end_b - start
+ self.sizes[startfile] = length
+ offset = length
+ piece = pieces.pop(0)
+ update_pieces.append((piece, start-(piece*self.piece_length), length))
+ else:
+ working_range_b = []
+ if f != len(self.files)-1 and end % self.piece_length != 0:
+ # doesn't end on an even piece boundary
+ start_e = pieces[-1] * self.piece_length
+ endfile = os.path.join(self.bufferdir,str(f)+'e')
+ working_range_e = [ ( start_e, end, 0, endfile ) ]
+ disabled_files.append((endfile, start_e, end))
+ length = end - start_e
+ self.sizes[endfile] = length
+ piece = pieces.pop(-1)
+ update_pieces.append((piece, 0, length))
+ else:
+ working_range_e = []
+ if pieces:
+ working_range_m = [ ( pieces[0]*self.piece_length,
+ (pieces[-1]+1)*self.piece_length,
+ offset, file ) ]
+ else:
+ working_range_m = []
+ working_range = working_range_b + working_range_m + working_range_e
+
+ if DEBUG:
+ print str(working_range)
+ print str(update_pieces)
+ r = (tuple(working_range), tuple(update_pieces), tuple(disabled_files))
+ self.disabled_ranges[f] = r
+ return r
+
+
+ def set_bufferdir(self, dir):
+ self.bufferdir = dir
+
+ def enable_file(self, f):
+ if not self.disabled[f]:
+ return
+ self.disabled[f] = False
+ r = self.file_ranges[f]
+ if not r:
+ return
+ file = r[3]
+ if not exists(file):
+ h = open(file, 'wb+')
+ h.flush()
+ h.close()
+ if not self.tops.has_key(file):
+ self.tops[file] = getsize(file)
+ if not self.mtimes.has_key(file):
+ self.mtimes[file] = getmtime(file)
+ self.working_ranges[f] = [r]
+
+ def disable_file(self, f):
+ if self.disabled[f]:
+ return
+ self.disabled[f] = True
+ r = self._get_disabled_ranges(f)
+ if not r:
+ return
+ for file, begin, end in r[2]:
+ if not os.path.isdir(self.bufferdir):
+ os.makedirs(self.bufferdir)
+ if not exists(file):
+ h = open(file, 'wb+')
+ h.flush()
+ h.close()
+ if not self.tops.has_key(file):
+ self.tops[file] = getsize(file)
+ if not self.mtimes.has_key(file):
+ self.mtimes[file] = getmtime(file)
+ self.working_ranges[f] = r[0]
+
+ reset_file_status = _reset_ranges
+
+
+ def get_piece_update_list(self, f):
+ return self._get_disabled_ranges(f)[1]
+
+
+ def delete_file(self, f):
+ try:
+ os.remove(self.files[f][0])
+ except:
+ pass
+
+
+ '''
+ Pickled data format:
+
+ d['files'] = [ file #, size, mtime {, file #, size, mtime...} ]
+ file # in torrent, and the size and last modification
+ time for those files. Missing files are either empty
+ or disabled.
+ d['partial files'] = [ name, size, mtime... ]
+ Names, sizes and last modification times of files containing
+ partial piece data. Filenames go by the following convention:
+ {file #, 0-based}{nothing, "b" or "e"}
+ eg: "0e" "3" "4b" "4e"
+ Where "b" specifies the partial data for the first piece in
+ the file, "e" the last piece, and no letter signifying that
+ the file is disabled but is smaller than one piece, and that
+ all the data is cached inside so adjacent files may be
+ verified.
+ '''
+ def pickle(self):
+ files = []
+ pfiles = []
+ for i in xrange(len(self.files)):
+ if not self.files[i][1]: # length == 0
+ continue
+ if self.disabled[i]:
+ for file, start, end in self._get_disabled_ranges(i)[2]:
+ pfiles.extend([basename(file),getsize(file),int(getmtime(file))])
+ continue
+ file = self.files[i][0]
+ files.extend([i,getsize(file),int(getmtime(file))])
+ return {'files': files, 'partial files': pfiles}
+
+
+ def unpickle(self, data):
+ # assume all previously-disabled files have already been disabled
+ try:
+ files = {}
+ pfiles = {}
+ l = data['files']
+ assert len(l) % 3 == 0
+ l = [l[x:x+3] for x in xrange(0,len(l),3)]
+ for f, size, mtime in l:
+ files[f] = (size, mtime)
+ l = data.get('partial files',[])
+ assert len(l) % 3 == 0
+ l = [l[x:x+3] for x in xrange(0,len(l),3)]
+ for file, size, mtime in l:
+ pfiles[file] = (size, mtime)
+
+ valid_pieces = {}
+ for i in xrange(len(self.files)):
+ if self.disabled[i]:
+ continue
+ r = self.file_ranges[i]
+ if not r:
+ continue
+ start, end, offset, file =r
+ if DEBUG:
+ print 'adding '+file
+ for p in xrange( int(start/self.piece_length),
+ int((end-1)/self.piece_length)+1 ):
+ valid_pieces[p] = 1
+
+ if DEBUG:
+ print valid_pieces.keys()
+
+ def test(old, size, mtime):
+ oldsize, oldmtime = old
+ if size != oldsize:
+ return False
+ if mtime > oldmtime+1:
+ return False
+ if mtime < oldmtime-1:
+ return False
+ return True
+
+ for i in xrange(len(self.files)):
+ if self.disabled[i]:
+ for file, start, end in self._get_disabled_ranges(i)[2]:
+ f1 = basename(file)
+ if ( not pfiles.has_key(f1)
+ or not test(pfiles[f1],getsize(file),getmtime(file)) ):
+ if DEBUG:
+ print 'removing '+file
+ for p in xrange( int(start/self.piece_length),
+ int((end-1)/self.piece_length)+1 ):
+ if valid_pieces.has_key(p):
+ del valid_pieces[p]
+ continue
+ file, size = self.files[i]
+ if not size:
+ continue
+ if ( not files.has_key(i)
+ or not test(files[i],getsize(file),getmtime(file)) ):
+ start, end, offset, file = self.file_ranges[i]
+ if DEBUG:
+ print 'removing '+file
+ for p in xrange( int(start/self.piece_length),
+ int((end-1)/self.piece_length)+1 ):
+ if valid_pieces.has_key(p):
+ del valid_pieces[p]
+ except:
+ if DEBUG:
+ print_exc()
+ return []
+
+ if DEBUG:
+ print valid_pieces.keys()
+ return valid_pieces.keys()
+
diff --git a/BitTornado/BT1/StorageWrapper.py b/BitTornado/BT1/StorageWrapper.py
new file mode 100644
index 000000000..8dfe6b1b1
--- /dev/null
+++ b/BitTornado/BT1/StorageWrapper.py
@@ -0,0 +1,1045 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.bitfield import Bitfield
+from sha import sha
+from BitTornado.clock import clock
+from traceback import print_exc
+from random import randrange
+try:
+ True
+except:
+ True = 1
+ False = 0
+try:
+ from bisect import insort
+except:
+ def insort(l, item):
+ l.append(item)
+ l.sort()
+
+DEBUG = False
+
+STATS_INTERVAL = 0.2
+
+def dummy_status(fractionDone = None, activity = None):
+ pass
+
+class Olist:
+ def __init__(self, l = []):
+ self.d = {}
+ for i in l:
+ self.d[i] = 1
+ def __len__(self):
+ return len(self.d)
+ def includes(self, i):
+ return self.d.has_key(i)
+ def add(self, i):
+ self.d[i] = 1
+ def extend(self, l):
+ for i in l:
+ self.d[i] = 1
+ def pop(self, n=0):
+ # assert self.d
+ k = self.d.keys()
+ if n == 0:
+ i = min(k)
+ elif n == -1:
+ i = max(k)
+ else:
+ k.sort()
+ i = k[n]
+ del self.d[i]
+ return i
+ def remove(self, i):
+ if self.d.has_key(i):
+ del self.d[i]
+
+class fakeflag:
+ def __init__(self, state=False):
+ self.state = state
+ def wait(self):
+ pass
+ def isSet(self):
+ return self.state
+
+
+class StorageWrapper:
+ def __init__(self, storage, request_size, hashes,
+ piece_size, finished, failed,
+ statusfunc = dummy_status, flag = fakeflag(), check_hashes = True,
+ data_flunked = lambda x: None, backfunc = None,
+ config = {}, unpauseflag = fakeflag(True) ):
+ self.storage = storage
+ self.request_size = long(request_size)
+ self.hashes = hashes
+ self.piece_size = long(piece_size)
+ self.piece_length = long(piece_size)
+ self.finished = finished
+ self.failed = failed
+ self.statusfunc = statusfunc
+ self.flag = flag
+ self.check_hashes = check_hashes
+ self.data_flunked = data_flunked
+ self.backfunc = backfunc
+ self.config = config
+ self.unpauseflag = unpauseflag
+
+ self.alloc_type = config.get('alloc_type','normal')
+ self.double_check = config.get('double_check', 0)
+ self.triple_check = config.get('triple_check', 0)
+ if self.triple_check:
+ self.double_check = True
+ self.bgalloc_enabled = False
+ self.bgalloc_active = False
+ self.total_length = storage.get_total_length()
+ self.amount_left = self.total_length
+ if self.total_length <= self.piece_size * (len(hashes) - 1):
+ raise ValueError, 'bad data in responsefile - total too small'
+ if self.total_length > self.piece_size * len(hashes):
+ raise ValueError, 'bad data in responsefile - total too big'
+ self.numactive = [0] * len(hashes)
+ self.inactive_requests = [1] * len(hashes)
+ self.amount_inactive = self.total_length
+ self.amount_obtained = 0
+ self.amount_desired = self.total_length
+ self.have = Bitfield(len(hashes))
+ self.have_cloaked_data = None
+ self.blocked = [False] * len(hashes)
+ self.blocked_holes = []
+ self.blocked_movein = Olist()
+ self.blocked_moveout = Olist()
+ self.waschecked = [False] * len(hashes)
+ self.places = {}
+ self.holes = []
+ self.stat_active = {}
+ self.stat_new = {}
+ self.dirty = {}
+ self.stat_numflunked = 0
+ self.stat_numdownloaded = 0
+ self.stat_numfound = 0
+ self.download_history = {}
+ self.failed_pieces = {}
+ self.out_of_place = 0
+ self.write_buf_max = config['write_buffer_size']*1048576L
+ self.write_buf_size = 0L
+ self.write_buf = {} # structure: piece: [(start, data), ...]
+ self.write_buf_list = []
+
+ self.initialize_tasks = [
+ ['checking existing data', 0, self.init_hashcheck, self.hashcheckfunc],
+ ['moving data', 1, self.init_movedata, self.movedatafunc],
+ ['allocating disk space', 1, self.init_alloc, self.allocfunc] ]
+
+ self.backfunc(self._bgalloc,0.1)
+ self.backfunc(self._bgsync,max(self.config['auto_flush']*60,60))
+
+ def _bgsync(self):
+ if self.config['auto_flush']:
+ self.sync()
+ self.backfunc(self._bgsync,max(self.config['auto_flush']*60,60))
+
+
+ def old_style_init(self):
+ while self.initialize_tasks:
+ msg, done, init, next = self.initialize_tasks.pop(0)
+ if init():
+ self.statusfunc(activity = msg, fractionDone = done)
+ t = clock() + STATS_INTERVAL
+ x = 0
+ while x is not None:
+ if t < clock():
+ t = clock() + STATS_INTERVAL
+ self.statusfunc(fractionDone = x)
+ self.unpauseflag.wait()
+ if self.flag.isSet():
+ return False
+ x = next()
+
+ self.statusfunc(fractionDone = 0)
+ return True
+
+
+ def initialize(self, donefunc, statusfunc = None):
+ self.initialize_done = donefunc
+ if statusfunc is None:
+ statusfunc = self.statusfunc
+ self.initialize_status = statusfunc
+ self.initialize_next = None
+
+ self.backfunc(self._initialize)
+
+ def _initialize(self):
+ if not self.unpauseflag.isSet():
+ self.backfunc(self._initialize, 1)
+ return
+
+ if self.initialize_next:
+ x = self.initialize_next()
+ if x is None:
+ self.initialize_next = None
+ else:
+ self.initialize_status(fractionDone = x)
+ else:
+ if not self.initialize_tasks:
+ self.initialize_done()
+ return
+ msg, done, init, next = self.initialize_tasks.pop(0)
+ if init():
+ self.initialize_status(activity = msg, fractionDone = done)
+ self.initialize_next = next
+
+ self.backfunc(self._initialize)
+
+
+ def init_hashcheck(self):
+ if self.flag.isSet():
+ return False
+ self.check_list = []
+ if len(self.hashes) == 0 or self.amount_left == 0:
+ self.check_total = 0
+ self.finished()
+ return False
+
+ self.check_targets = {}
+ got = {}
+ for p,v in self.places.items():
+ assert not got.has_key(v)
+ got[v] = 1
+ for i in xrange(len(self.hashes)):
+ if self.places.has_key(i): # restored from pickled
+ self.check_targets[self.hashes[i]] = []
+ if self.places[i] == i:
+ continue
+ else:
+ assert not got.has_key(i)
+ self.out_of_place += 1
+ if got.has_key(i):
+ continue
+ if self._waspre(i):
+ if self.blocked[i]:
+ self.places[i] = i
+ else:
+ self.check_list.append(i)
+ continue
+ if not self.check_hashes:
+ self.failed('told file complete on start-up, but data is missing')
+ return False
+ self.holes.append(i)
+ if self.blocked[i] or self.check_targets.has_key(self.hashes[i]):
+ self.check_targets[self.hashes[i]] = [] # in case of a hash collision, discard
+ else:
+ self.check_targets[self.hashes[i]] = [i]
+ self.check_total = len(self.check_list)
+ self.check_numchecked = 0.0
+ self.lastlen = self._piecelen(len(self.hashes) - 1)
+ self.numchecked = 0.0
+ return self.check_total > 0
+
+ def _markgot(self, piece, pos):
+ if DEBUG:
+ print str(piece)+' at '+str(pos)
+ self.places[piece] = pos
+ self.have[piece] = True
+ len = self._piecelen(piece)
+ self.amount_obtained += len
+ self.amount_left -= len
+ self.amount_inactive -= len
+ self.inactive_requests[piece] = None
+ self.waschecked[piece] = self.check_hashes
+ self.stat_numfound += 1
+
+ def hashcheckfunc(self):
+ if self.flag.isSet():
+ return None
+ if not self.check_list:
+ return None
+
+ i = self.check_list.pop(0)
+ if not self.check_hashes:
+ self._markgot(i, i)
+ else:
+ d1 = self.read_raw(i,0,self.lastlen)
+ if d1 is None:
+ return None
+ sh = sha(d1[:])
+ d1.release()
+ sp = sh.digest()
+ d2 = self.read_raw(i,self.lastlen,self._piecelen(i)-self.lastlen)
+ if d2 is None:
+ return None
+ sh.update(d2[:])
+ d2.release()
+ s = sh.digest()
+ if s == self.hashes[i]:
+ self._markgot(i, i)
+ elif ( self.check_targets.get(s)
+ and self._piecelen(i) == self._piecelen(self.check_targets[s][-1]) ):
+ self._markgot(self.check_targets[s].pop(), i)
+ self.out_of_place += 1
+ elif ( not self.have[-1] and sp == self.hashes[-1]
+ and (i == len(self.hashes) - 1
+ or not self._waspre(len(self.hashes) - 1)) ):
+ self._markgot(len(self.hashes) - 1, i)
+ self.out_of_place += 1
+ else:
+ self.places[i] = i
+ self.numchecked += 1
+ if self.amount_left == 0:
+ self.finished()
+ return (self.numchecked / self.check_total)
+
+
+ def init_movedata(self):
+ if self.flag.isSet():
+ return False
+ if self.alloc_type != 'sparse':
+ return False
+ self.storage.top_off() # sets file lengths to their final size
+ self.movelist = []
+ if self.out_of_place == 0:
+ for i in self.holes:
+ self.places[i] = i
+ self.holes = []
+ return False
+ self.tomove = float(self.out_of_place)
+ for i in xrange(len(self.hashes)):
+ if not self.places.has_key(i):
+ self.places[i] = i
+ elif self.places[i] != i:
+ self.movelist.append(i)
+ self.holes = []
+ return True
+
+ def movedatafunc(self):
+ if self.flag.isSet():
+ return None
+ if not self.movelist:
+ return None
+ i = self.movelist.pop(0)
+ old = self.read_raw(self.places[i], 0, self._piecelen(i))
+ if old is None:
+ return None
+ if not self.write_raw(i, 0, old):
+ return None
+ if self.double_check and self.have[i]:
+ if self.triple_check:
+ old.release()
+ old = self.read_raw( i, 0, self._piecelen(i),
+ flush_first = True )
+ if old is None:
+ return None
+ if sha(old[:]).digest() != self.hashes[i]:
+ self.failed('download corrupted; please restart and resume')
+ return None
+ old.release()
+
+ self.places[i] = i
+ self.tomove -= 1
+ return (self.tomove / self.out_of_place)
+
+
+ def init_alloc(self):
+ if self.flag.isSet():
+ return False
+ if not self.holes:
+ return False
+ self.numholes = float(len(self.holes))
+ self.alloc_buf = chr(0xFF) * self.piece_size
+ if self.alloc_type == 'pre-allocate':
+ self.bgalloc_enabled = True
+ return True
+ if self.alloc_type == 'background':
+ self.bgalloc_enabled = True
+ if self.blocked_moveout:
+ return True
+ return False
+
+
+ def _allocfunc(self):
+ while self.holes:
+ n = self.holes.pop(0)
+ if self.blocked[n]: # assume not self.blocked[index]
+ if not self.blocked_movein:
+ self.blocked_holes.append(n)
+ continue
+ if not self.places.has_key(n):
+ b = self.blocked_movein.pop(0)
+ oldpos = self._move_piece(b, n)
+ self.places[oldpos] = oldpos
+ return None
+ if self.places.has_key(n):
+ oldpos = self._move_piece(n, n)
+ self.places[oldpos] = oldpos
+ return None
+ return n
+ return None
+
+ def allocfunc(self):
+ if self.flag.isSet():
+ return None
+
+ if self.blocked_moveout:
+ self.bgalloc_active = True
+ n = self._allocfunc()
+ if n is not None:
+ if self.blocked_moveout.includes(n):
+ self.blocked_moveout.remove(n)
+ b = n
+ else:
+ b = self.blocked_moveout.pop(0)
+ oldpos = self._move_piece(b,n)
+ self.places[oldpos] = oldpos
+ return len(self.holes) / self.numholes
+
+ if self.holes and self.bgalloc_enabled:
+ self.bgalloc_active = True
+ n = self._allocfunc()
+ if n is not None:
+ self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)])
+ self.places[n] = n
+ return len(self.holes) / self.numholes
+
+ self.bgalloc_active = False
+ return None
+
+ def bgalloc(self):
+ if self.bgalloc_enabled:
+ if not self.holes and not self.blocked_moveout and self.backfunc:
+ self.backfunc(self.storage.flush)
+ # force a flush whenever the "finish allocation" button is hit
+ self.bgalloc_enabled = True
+ return False
+
+ def _bgalloc(self):
+ self.allocfunc()
+ if self.config.get('alloc_rate',0) < 0.1:
+ self.config['alloc_rate'] = 0.1
+ self.backfunc( self._bgalloc,
+ float(self.piece_size)/(self.config['alloc_rate']*1048576) )
+
+
+ def _waspre(self, piece):
+ return self.storage.was_preallocated(piece * self.piece_size, self._piecelen(piece))
+
+ def _piecelen(self, piece):
+ if piece < len(self.hashes) - 1:
+ return self.piece_size
+ else:
+ return self.total_length - (piece * self.piece_size)
+
+ def get_amount_left(self):
+ return self.amount_left
+
+ def do_I_have_anything(self):
+ return self.amount_left < self.total_length
+
+ def _make_inactive(self, index):
+ length = self._piecelen(index)
+ l = []
+ x = 0
+ while x + self.request_size < length:
+ l.append((x, self.request_size))
+ x += self.request_size
+ l.append((x, length - x))
+ self.inactive_requests[index] = l
+
+ def is_endgame(self):
+ return not self.amount_inactive
+
+ def am_I_complete(self):
+ return self.amount_obtained == self.amount_desired
+
+ def reset_endgame(self, requestlist):
+ for index, begin, length in requestlist:
+ self.request_lost(index, begin, length)
+
+ def get_have_list(self):
+ return self.have.tostring()
+
+ def get_have_list_cloaked(self):
+ if self.have_cloaked_data is None:
+ newhave = Bitfield(copyfrom = self.have)
+ unhaves = []
+ n = min(randrange(2,5),len(self.hashes)) # between 2-4 unless torrent is small
+ while len(unhaves) < n:
+ unhave = randrange(min(32,len(self.hashes))) # all in first 4 bytes
+ if not unhave in unhaves:
+ unhaves.append(unhave)
+ newhave[unhave] = False
+ self.have_cloaked_data = (newhave.tostring(), unhaves)
+ return self.have_cloaked_data
+
+ def do_I_have(self, index):
+ return self.have[index]
+
+ def do_I_have_requests(self, index):
+ return not not self.inactive_requests[index]
+
+ def is_unstarted(self, index):
+ return ( not self.have[index] and not self.numactive[index]
+ and not self.dirty.has_key(index) )
+
+ def get_hash(self, index):
+ return self.hashes[index]
+
+ def get_stats(self):
+ return self.amount_obtained, self.amount_desired
+
+ def new_request(self, index):
+ # returns (begin, length)
+ if self.inactive_requests[index] == 1:
+ self._make_inactive(index)
+ self.numactive[index] += 1
+ self.stat_active[index] = 1
+ if not self.dirty.has_key(index):
+ self.stat_new[index] = 1
+ rs = self.inactive_requests[index]
+# r = min(rs)
+# rs.remove(r)
+ r = rs.pop(0)
+ self.amount_inactive -= r[1]
+ return r
+
+
+ def write_raw(self, index, begin, data):
+ try:
+ self.storage.write(self.piece_size * index + begin, data)
+ return True
+ except IOError, e:
+ self.failed('IO Error: ' + str(e))
+ return False
+
+
+ def _write_to_buffer(self, piece, start, data):
+ if not self.write_buf_max:
+ return self.write_raw(self.places[piece], start, data)
+ self.write_buf_size += len(data)
+ while self.write_buf_size > self.write_buf_max:
+ old = self.write_buf_list.pop(0)
+ if not self._flush_buffer(old, True):
+ return False
+ if self.write_buf.has_key(piece):
+ self.write_buf_list.remove(piece)
+ else:
+ self.write_buf[piece] = []
+ self.write_buf_list.append(piece)
+ self.write_buf[piece].append((start,data))
+ return True
+
+ def _flush_buffer(self, piece, popped = False):
+ if not self.write_buf.has_key(piece):
+ return True
+ if not popped:
+ self.write_buf_list.remove(piece)
+ l = self.write_buf[piece]
+ del self.write_buf[piece]
+ l.sort()
+ for start, data in l:
+ self.write_buf_size -= len(data)
+ if not self.write_raw(self.places[piece], start, data):
+ return False
+ return True
+
+ def sync(self):
+ spots = {}
+ for p in self.write_buf_list:
+ spots[self.places[p]] = p
+ l = spots.keys()
+ l.sort()
+ for i in l:
+ try:
+ self._flush_buffer(spots[i])
+ except:
+ pass
+ try:
+ self.storage.sync()
+ except IOError, e:
+ self.failed('IO Error: ' + str(e))
+ except OSError, e:
+ self.failed('OS Error: ' + str(e))
+
+
+ def _move_piece(self, index, newpos):
+ oldpos = self.places[index]
+ if DEBUG:
+ print 'moving '+str(index)+' from '+str(oldpos)+' to '+str(newpos)
+ assert oldpos != index
+ assert oldpos != newpos
+ assert index == newpos or not self.places.has_key(newpos)
+ old = self.read_raw(oldpos, 0, self._piecelen(index))
+ if old is None:
+ return -1
+ if not self.write_raw(newpos, 0, old):
+ return -1
+ self.places[index] = newpos
+ if self.have[index] and (
+ self.triple_check or (self.double_check and index == newpos) ):
+ if self.triple_check:
+ old.release()
+ old = self.read_raw(newpos, 0, self._piecelen(index),
+ flush_first = True)
+ if old is None:
+ return -1
+ if sha(old[:]).digest() != self.hashes[index]:
+ self.failed('download corrupted; please restart and resume')
+ return -1
+ old.release()
+
+ if self.blocked[index]:
+ self.blocked_moveout.remove(index)
+ if self.blocked[newpos]:
+ self.blocked_movein.remove(index)
+ else:
+ self.blocked_movein.add(index)
+ else:
+ self.blocked_movein.remove(index)
+ if self.blocked[newpos]:
+ self.blocked_moveout.add(index)
+ else:
+ self.blocked_moveout.remove(index)
+
+ return oldpos
+
+ def _clear_space(self, index):
+ h = self.holes.pop(0)
+ n = h
+ if self.blocked[n]: # assume not self.blocked[index]
+ if not self.blocked_movein:
+ self.blocked_holes.append(n)
+ return True # repeat
+ if not self.places.has_key(n):
+ b = self.blocked_movein.pop(0)
+ oldpos = self._move_piece(b, n)
+ if oldpos < 0:
+ return False
+ n = oldpos
+ if self.places.has_key(n):
+ oldpos = self._move_piece(n, n)
+ if oldpos < 0:
+ return False
+ n = oldpos
+ if index == n or index in self.holes:
+ if n == h:
+ self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)])
+ self.places[index] = n
+ if self.blocked[n]:
+ # because n may be a spot cleared 10 lines above, it's possible
+ # for it to be blocked. While that spot could be left cleared
+ # and a new spot allocated, this condition might occur several
+ # times in a row, resulting in a significant amount of disk I/O,
+ # delaying the operation of the engine. Rather than do this,
+ # queue the piece to be moved out again, which will be performed
+ # by the background allocator, with which data movement is
+ # automatically limited.
+ self.blocked_moveout.add(index)
+ return False
+ for p, v in self.places.items():
+ if v == index:
+ break
+ else:
+ self.failed('download corrupted; please restart and resume')
+ return False
+ self._move_piece(p, n)
+ self.places[index] = index
+ return False
+
+
+ def piece_came_in(self, index, begin, piece, source = None):
+ assert not self.have[index]
+
+ if not self.places.has_key(index):
+ while self._clear_space(index):
+ pass
+ if DEBUG:
+ print 'new place for '+str(index)+' at '+str(self.places[index])
+ if self.flag.isSet():
+ return
+
+ if self.failed_pieces.has_key(index):
+ old = self.read_raw(self.places[index], begin, len(piece))
+ if old is None:
+ return True
+ if old[:].tostring() != piece:
+ try:
+ self.failed_pieces[index][self.download_history[index][begin]] = 1
+ except:
+ self.failed_pieces[index][None] = 1
+ old.release()
+ self.download_history.setdefault(index,{})[begin] = source
+
+ if not self._write_to_buffer(index, begin, piece):
+ return True
+
+ self.amount_obtained += len(piece)
+ self.dirty.setdefault(index,[]).append((begin, len(piece)))
+ self.numactive[index] -= 1
+ assert self.numactive[index] >= 0
+ if not self.numactive[index]:
+ del self.stat_active[index]
+ if self.stat_new.has_key(index):
+ del self.stat_new[index]
+
+ if self.inactive_requests[index] or self.numactive[index]:
+ return True
+
+ del self.dirty[index]
+ if not self._flush_buffer(index):
+ return True
+ length = self._piecelen(index)
+ data = self.read_raw(self.places[index], 0, length,
+ flush_first = self.triple_check)
+ if data is None:
+ return True
+ hash = sha(data[:]).digest()
+ data.release()
+ if hash != self.hashes[index]:
+
+ self.amount_obtained -= length
+ self.data_flunked(length, index)
+ self.inactive_requests[index] = 1
+ self.amount_inactive += length
+ self.stat_numflunked += 1
+
+ self.failed_pieces[index] = {}
+ allsenders = {}
+ for d in self.download_history[index].values():
+ allsenders[d] = 1
+ if len(allsenders) == 1:
+ culprit = allsenders.keys()[0]
+ if culprit is not None:
+ culprit.failed(index, bump = True)
+ del self.failed_pieces[index] # found the culprit already
+
+ return False
+
+ self.have[index] = True
+ self.inactive_requests[index] = None
+ self.waschecked[index] = True
+ self.amount_left -= length
+ self.stat_numdownloaded += 1
+
+ for d in self.download_history[index].values():
+ if d is not None:
+ d.good(index)
+ del self.download_history[index]
+ if self.failed_pieces.has_key(index):
+ for d in self.failed_pieces[index].keys():
+ if d is not None:
+ d.failed(index)
+ del self.failed_pieces[index]
+
+ if self.amount_left == 0:
+ self.finished()
+ return True
+
+
+ def request_lost(self, index, begin, length):
+ assert not (begin, length) in self.inactive_requests[index]
+ insort(self.inactive_requests[index], (begin, length))
+ self.amount_inactive += length
+ self.numactive[index] -= 1
+ if not self.numactive[index]:
+ del self.stat_active[index]
+ if self.stat_new.has_key(index):
+ del self.stat_new[index]
+
+
+ def get_piece(self, index, begin, length):
+ if not self.have[index]:
+ return None
+ data = None
+ if not self.waschecked[index]:
+ data = self.read_raw(self.places[index], 0, self._piecelen(index))
+ if data is None:
+ return None
+ if sha(data[:]).digest() != self.hashes[index]:
+ self.failed('told file complete on start-up, but piece failed hash check')
+ return None
+ self.waschecked[index] = True
+ if length == -1 and begin == 0:
+ return data # optimization
+ if length == -1:
+ if begin > self._piecelen(index):
+ return None
+ length = self._piecelen(index)-begin
+ if begin == 0:
+ return self.read_raw(self.places[index], 0, length)
+ elif begin + length > self._piecelen(index):
+ return None
+ if data is not None:
+ s = data[begin:begin+length]
+ data.release()
+ return s
+ data = self.read_raw(self.places[index], begin, length)
+ if data is None:
+ return None
+ s = data.getarray()
+ data.release()
+ return s
+
+ def read_raw(self, piece, begin, length, flush_first = False):
+ try:
+ return self.storage.read(self.piece_size * piece + begin,
+ length, flush_first)
+ except IOError, e:
+ self.failed('IO Error: ' + str(e))
+ return None
+
+
+ def set_file_readonly(self, n):
+ try:
+ self.storage.set_readonly(n)
+ except IOError, e:
+ self.failed('IO Error: ' + str(e))
+ except OSError, e:
+ self.failed('OS Error: ' + str(e))
+
+
+ def has_data(self, index):
+ return index not in self.holes and index not in self.blocked_holes
+
+ def doublecheck_data(self, pieces_to_check):
+ if not self.double_check:
+ return
+ sources = []
+ for p,v in self.places.items():
+ if pieces_to_check.has_key(v):
+ sources.append(p)
+ assert len(sources) == len(pieces_to_check)
+ sources.sort()
+ for index in sources:
+ if self.have[index]:
+ piece = self.read_raw(self.places[index],0,self._piecelen(index),
+ flush_first = True )
+ if piece is None:
+ return False
+ if sha(piece[:]).digest() != self.hashes[index]:
+ self.failed('download corrupted; please restart and resume')
+ return False
+ piece.release()
+ return True
+
+
+ def reblock(self, new_blocked):
+ # assume downloads have already been canceled and chunks made inactive
+ for i in xrange(len(new_blocked)):
+ if new_blocked[i] and not self.blocked[i]:
+ length = self._piecelen(i)
+ self.amount_desired -= length
+ if self.have[i]:
+ self.amount_obtained -= length
+ continue
+ if self.inactive_requests[i] == 1:
+ self.amount_inactive -= length
+ continue
+ inactive = 0
+ for nb, nl in self.inactive_requests[i]:
+ inactive += nl
+ self.amount_inactive -= inactive
+ self.amount_obtained -= length - inactive
+
+ if self.blocked[i] and not new_blocked[i]:
+ length = self._piecelen(i)
+ self.amount_desired += length
+ if self.have[i]:
+ self.amount_obtained += length
+ continue
+ if self.inactive_requests[i] == 1:
+ self.amount_inactive += length
+ continue
+ inactive = 0
+ for nb, nl in self.inactive_requests[i]:
+ inactive += nl
+ self.amount_inactive += inactive
+ self.amount_obtained += length - inactive
+
+ self.blocked = new_blocked
+
+ self.blocked_movein = Olist()
+ self.blocked_moveout = Olist()
+ for p,v in self.places.items():
+ if p != v:
+ if self.blocked[p] and not self.blocked[v]:
+ self.blocked_movein.add(p)
+ elif self.blocked[v] and not self.blocked[p]:
+ self.blocked_moveout.add(p)
+
+ self.holes.extend(self.blocked_holes) # reset holes list
+ self.holes.sort()
+ self.blocked_holes = []
+
+
+ '''
+ Pickled data format:
+
+ d['pieces'] = either a string containing a bitfield of complete pieces,
+ or the numeric value "1" signifying a seed. If it is
+ a seed, d['places'] and d['partials'] should be empty
+ and needn't even exist.
+ d['partials'] = [ piece, [ offset, length... ]... ]
+ a list of partial data that had been previously
+ downloaded, plus the given offsets. Adjacent partials
+ are merged so as to save space, and so that if the
+ request size changes then new requests can be
+ calculated more efficiently.
+ d['places'] = [ piece, place, {,piece, place ...} ]
+ the piece index, and the place it's stored.
+ If d['pieces'] specifies a complete piece or d['partials']
+ specifies a set of partials for a piece which has no
+ entry in d['places'], it can be assumed that
+ place[index] = index. A place specified with no
+ corresponding data in d['pieces'] or d['partials']
+ indicates allocated space with no valid data, and is
+ reserved so it doesn't need to be hash-checked.
+ '''
+ def pickle(self):
+ if self.have.complete():
+ return {'pieces': 1}
+ pieces = Bitfield(len(self.hashes))
+ places = []
+ partials = []
+ for p in xrange(len(self.hashes)):
+ if self.blocked[p] or not self.places.has_key(p):
+ continue
+ h = self.have[p]
+ pieces[p] = h
+ pp = self.dirty.get(p)
+ if not h and not pp: # no data
+ places.extend([self.places[p],self.places[p]])
+ elif self.places[p] != p:
+ places.extend([p, self.places[p]])
+ if h or not pp:
+ continue
+ pp.sort()
+ r = []
+ while len(pp) > 1:
+ if pp[0][0]+pp[0][1] == pp[1][0]:
+ pp[0] = list(pp[0])
+ pp[0][1] += pp[1][1]
+ del pp[1]
+ else:
+ r.extend(pp[0])
+ del pp[0]
+ r.extend(pp[0])
+ partials.extend([p,r])
+ return {'pieces': pieces.tostring(), 'places': places, 'partials': partials}
+
+
+ def unpickle(self, data, valid_places):
+ got = {}
+ places = {}
+ dirty = {}
+ download_history = {}
+ stat_active = {}
+ stat_numfound = self.stat_numfound
+ amount_obtained = self.amount_obtained
+ amount_inactive = self.amount_inactive
+ amount_left = self.amount_left
+ inactive_requests = [x for x in self.inactive_requests]
+ restored_partials = []
+
+ try:
+ if data['pieces'] == 1: # a seed
+ assert not data.get('places',None)
+ assert not data.get('partials',None)
+ have = Bitfield(len(self.hashes))
+ for i in xrange(len(self.hashes)):
+ have[i] = True
+ assert have.complete()
+ _places = []
+ _partials = []
+ else:
+ have = Bitfield(len(self.hashes), data['pieces'])
+ _places = data['places']
+ assert len(_places) % 2 == 0
+ _places = [_places[x:x+2] for x in xrange(0,len(_places),2)]
+ _partials = data['partials']
+ assert len(_partials) % 2 == 0
+ _partials = [_partials[x:x+2] for x in xrange(0,len(_partials),2)]
+
+ for index, place in _places:
+ if place not in valid_places:
+ continue
+ assert not got.has_key(index)
+ assert not got.has_key(place)
+ places[index] = place
+ got[index] = 1
+ got[place] = 1
+
+ for index in xrange(len(self.hashes)):
+ if have[index]:
+ if not places.has_key(index):
+ if index not in valid_places:
+ have[index] = False
+ continue
+ assert not got.has_key(index)
+ places[index] = index
+ got[index] = 1
+ length = self._piecelen(index)
+ amount_obtained += length
+ stat_numfound += 1
+ amount_inactive -= length
+ amount_left -= length
+ inactive_requests[index] = None
+
+ for index, plist in _partials:
+ assert not dirty.has_key(index)
+ assert not have[index]
+ if not places.has_key(index):
+ if index not in valid_places:
+ continue
+ assert not got.has_key(index)
+ places[index] = index
+ got[index] = 1
+ assert len(plist) % 2 == 0
+ plist = [plist[x:x+2] for x in xrange(0,len(plist),2)]
+ dirty[index] = plist
+ stat_active[index] = 1
+ download_history[index] = {}
+ # invert given partials
+ length = self._piecelen(index)
+ l = []
+ if plist[0][0] > 0:
+ l.append((0,plist[0][0]))
+ for i in xrange(len(plist)-1):
+ end = plist[i][0]+plist[i][1]
+ assert not end > plist[i+1][0]
+ l.append((end,plist[i+1][0]-end))
+ end = plist[-1][0]+plist[-1][1]
+ assert not end > length
+ if end < length:
+ l.append((end,length-end))
+ # split them to request_size
+ ll = []
+ amount_obtained += length
+ amount_inactive -= length
+ for nb, nl in l:
+ while nl > 0:
+ r = min(nl,self.request_size)
+ ll.append((nb,r))
+ amount_inactive += r
+ amount_obtained -= r
+ nb += self.request_size
+ nl -= self.request_size
+ inactive_requests[index] = ll
+ restored_partials.append(index)
+
+ assert amount_obtained + amount_inactive == self.amount_desired
+ except:
+# print_exc()
+ return [] # invalid data, discard everything
+
+ self.have = have
+ self.places = places
+ self.dirty = dirty
+ self.download_history = download_history
+ self.stat_active = stat_active
+ self.stat_numfound = stat_numfound
+ self.amount_obtained = amount_obtained
+ self.amount_inactive = amount_inactive
+ self.amount_left = amount_left
+ self.inactive_requests = inactive_requests
+
+ return restored_partials
+
diff --git a/BitTornado/BT1/StreamCheck.py b/BitTornado/BT1/StreamCheck.py
new file mode 100644
index 000000000..7e57c7ca8
--- /dev/null
+++ b/BitTornado/BT1/StreamCheck.py
@@ -0,0 +1,135 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from binascii import b2a_hex
+from socket import error as socketerror
+from urllib import quote
+from traceback import print_exc
+import Connecter
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+DEBUG = False
+
+
+protocol_name = 'BitTorrent protocol'
+option_pattern = chr(0)*8
+
+def toint(s):
+ return long(b2a_hex(s), 16)
+
+def tobinary(i):
+ return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
+ chr((i >> 8) & 0xFF) + chr(i & 0xFF))
+
+hexchars = '0123456789ABCDEF'
+hexmap = []
+for i in xrange(256):
+ hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F])
+
+def tohex(s):
+ r = []
+ for c in s:
+ r.append(hexmap[ord(c)])
+ return ''.join(r)
+
+def make_readable(s):
+ if not s:
+ return ''
+ if quote(s).find('%') >= 0:
+ return tohex(s)
+ return '"'+s+'"'
+
+def toint(s):
+ return long(b2a_hex(s), 16)
+
+# header, reserved, download id, my id, [length, message]
+
+streamno = 0
+
+
+class StreamCheck:
+ def __init__(self):
+ global streamno
+ self.no = streamno
+ streamno += 1
+ self.buffer = StringIO()
+ self.next_len, self.next_func = 1, self.read_header_len
+
+ def read_header_len(self, s):
+ if ord(s) != len(protocol_name):
+ print self.no, 'BAD HEADER LENGTH'
+ return len(protocol_name), self.read_header
+
+ def read_header(self, s):
+ if s != protocol_name:
+ print self.no, 'BAD HEADER'
+ return 8, self.read_reserved
+
+ def read_reserved(self, s):
+ return 20, self.read_download_id
+
+ def read_download_id(self, s):
+ if DEBUG:
+ print self.no, 'download ID ' + tohex(s)
+ return 20, self.read_peer_id
+
+ def read_peer_id(self, s):
+ if DEBUG:
+ print self.no, 'peer ID' + make_readable(s)
+ return 4, self.read_len
+
+ def read_len(self, s):
+ l = toint(s)
+ if l > 2 ** 23:
+ print self.no, 'BAD LENGTH: '+str(l)+' ('+s+')'
+ return l, self.read_message
+
+ def read_message(self, s):
+ if not s:
+ return 4, self.read_len
+ m = s[0]
+ if ord(m) > 8:
+ print self.no, 'BAD MESSAGE: '+str(ord(m))
+ if m == Connecter.REQUEST:
+ if len(s) != 13:
+ print self.no, 'BAD REQUEST SIZE: '+str(len(s))
+ return 4, self.read_len
+ index = toint(s[1:5])
+ begin = toint(s[5:9])
+ length = toint(s[9:])
+ print self.no, 'Request: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length)
+ elif m == Connecter.CANCEL:
+ if len(s) != 13:
+ print self.no, 'BAD CANCEL SIZE: '+str(len(s))
+ return 4, self.read_len
+ index = toint(s[1:5])
+ begin = toint(s[5:9])
+ length = toint(s[9:])
+ print self.no, 'Cancel: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length)
+ elif m == Connecter.PIECE:
+ index = toint(s[1:5])
+ begin = toint(s[5:9])
+ length = len(s)-9
+ print self.no, 'Piece: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length)
+ else:
+ print self.no, 'Message '+str(ord(m))+' (length '+str(len(s))+')'
+ return 4, self.read_len
+
+ def write(self, s):
+ while True:
+ i = self.next_len - self.buffer.tell()
+ if i > len(s):
+ self.buffer.write(s)
+ return
+ self.buffer.write(s[:i])
+ s = s[i:]
+ m = self.buffer.getvalue()
+ self.buffer.reset()
+ self.buffer.truncate()
+ x = self.next_func(m)
+ self.next_len, self.next_func = x
diff --git a/BitTornado/BT1/T2T.py b/BitTornado/BT1/T2T.py
new file mode 100644
index 000000000..f4ba2276e
--- /dev/null
+++ b/BitTornado/BT1/T2T.py
@@ -0,0 +1,193 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from Rerequester import Rerequester
+from urllib import quote
+from threading import Event
+from random import randrange
+from string import lower
+import sys
+import __init__
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+DEBUG = True
+
+
+def excfunc(x):
+ print x
+
+class T2TConnection:
+ def __init__(self, myid, tracker, hash, interval, peers, timeout,
+ rawserver, disallow, isdisallowed):
+ self.tracker = tracker
+ self.interval = interval
+ self.hash = hash
+ self.operatinginterval = interval
+ self.peers = peers
+ self.rawserver = rawserver
+ self.disallow = disallow
+ self.isdisallowed = isdisallowed
+ self.active = True
+ self.busy = False
+ self.errors = 0
+ self.rejected = 0
+ self.trackererror = False
+ self.peerlists = []
+
+ self.rerequester = Rerequester([[tracker]], interval,
+ rawserver.add_task, lambda: 0, peers, self.addtolist,
+ rawserver.add_task, lambda: 1, 0, 0, 0, '',
+ myid, hash, timeout, self.errorfunc, excfunc, peers, Event(),
+ lambda: 0, lambda: 0)
+
+ if self.isactive():
+ rawserver.add_task(self.refresh, randrange(int(self.interval/10), self.interval))
+ # stagger announces
+
+ def isactive(self):
+ if self.isdisallowed(self.tracker): # whoops!
+ self.deactivate()
+ return self.active
+
+ def deactivate(self):
+ self.active = False
+
+ def refresh(self):
+ if not self.isactive():
+ return
+ self.lastsuccessful = True
+ self.newpeerdata = []
+ if DEBUG:
+ print 'contacting %s for info_hash=%s' % (self.tracker, quote(self.hash))
+ self.rerequester.snoop(self.peers, self.callback)
+
+ def callback(self):
+ self.busy = False
+ if self.lastsuccessful:
+ self.errors = 0
+ self.rejected = 0
+ if self.rerequester.announce_interval > (3*self.interval):
+ # I think I'm stripping from a regular tracker; boost the number of peers requested
+ self.peers = int(self.peers * (self.rerequester.announce_interval / self.interval))
+ self.operatinginterval = self.rerequester.announce_interval
+ if DEBUG:
+ print ("%s with info_hash=%s returned %d peers" %
+ (self.tracker, quote(self.hash), len(self.newpeerdata)))
+ self.peerlists.append(self.newpeerdata)
+ self.peerlists = self.peerlists[-10:] # keep up to the last 10 announces
+ if self.isactive():
+ self.rawserver.add_task(self.refresh, self.operatinginterval)
+
+ def addtolist(self, peers):
+ for peer in peers:
+ self.newpeerdata.append((peer[1],peer[0][0],peer[0][1]))
+
+ def errorfunc(self, r):
+ self.lastsuccessful = False
+ if DEBUG:
+ print "%s with info_hash=%s gives error: '%s'" % (self.tracker, quote(self.hash), r)
+ if r == self.rerequester.rejectedmessage + 'disallowed': # whoops!
+ if DEBUG:
+ print ' -- disallowed - deactivating'
+ self.deactivate()
+ self.disallow(self.tracker) # signal other torrents on this tracker
+ return
+ if lower(r[:8]) == 'rejected': # tracker rejected this particular torrent
+ self.rejected += 1
+ if self.rejected == 3: # rejected 3 times
+ if DEBUG:
+ print ' -- rejected 3 times - deactivating'
+ self.deactivate()
+ return
+ self.errors += 1
+ if self.errors >= 3: # three or more errors in a row
+ self.operatinginterval += self.interval # lengthen the interval
+ if DEBUG:
+ print ' -- lengthening interval to '+str(self.operatinginterval)+' seconds'
+
+ def harvest(self):
+ x = []
+ for list in self.peerlists:
+ x += list
+ self.peerlists = []
+ return x
+
+
+class T2TList:
+ def __init__(self, enabled, trackerid, interval, maxpeers, timeout, rawserver):
+ self.enabled = enabled
+ self.trackerid = trackerid
+ self.interval = interval
+ self.maxpeers = maxpeers
+ self.timeout = timeout
+ self.rawserver = rawserver
+ self.list = {}
+ self.torrents = {}
+ self.disallowed = {}
+ self.oldtorrents = []
+
+ def parse(self, allowed_list):
+ if not self.enabled:
+ return
+
+ # step 1: Create a new list with all tracker/torrent combinations in allowed_dir
+ newlist = {}
+ for hash, data in allowed_list.items():
+ if data.has_key('announce-list'):
+ for tier in data['announce-list']:
+ for tracker in tier:
+ self.disallowed.setdefault(tracker, False)
+ newlist.setdefault(tracker, {})
+ newlist[tracker][hash] = None # placeholder
+
+ # step 2: Go through and copy old data to the new list.
+ # if the new list has no place for it, then it's old, so deactivate it
+ for tracker, hashdata in self.list.items():
+ for hash, t2t in hashdata.items():
+ if not newlist.has_key(tracker) or not newlist[tracker].has_key(hash):
+ t2t.deactivate() # this connection is no longer current
+ self.oldtorrents += [t2t]
+ # keep it referenced in case a thread comes along and tries to access.
+ else:
+ newlist[tracker][hash] = t2t
+ if not newlist.has_key(tracker):
+ self.disallowed[tracker] = False # reset when no torrents on it left
+
+ self.list = newlist
+ newtorrents = {}
+
+ # step 3: If there are any entries that haven't been initialized yet, do so.
+ # At the same time, copy all entries onto the by-torrent list.
+ for tracker, hashdata in newlist.items():
+ for hash, t2t in hashdata.items():
+ if t2t is None:
+ hashdata[hash] = T2TConnection(self.trackerid, tracker, hash,
+ self.interval, self.maxpeers, self.timeout,
+ self.rawserver, self._disallow, self._isdisallowed)
+ newtorrents.setdefault(hash,[])
+ newtorrents[hash] += [hashdata[hash]]
+
+ self.torrents = newtorrents
+
+ # structures:
+ # list = {tracker: {hash: T2TConnection, ...}, ...}
+ # torrents = {hash: [T2TConnection, ...]}
+ # disallowed = {tracker: flag, ...}
+ # oldtorrents = [T2TConnection, ...]
+
+ def _disallow(self,tracker):
+ self.disallowed[tracker] = True
+
+ def _isdisallowed(self,tracker):
+ return self.disallowed[tracker]
+
+ def harvest(self,hash):
+ harvest = []
+ if self.enabled:
+ for t2t in self.torrents[hash]:
+ harvest += t2t.harvest()
+ return harvest
diff --git a/BitTornado/BT1/Uploader.py b/BitTornado/BT1/Uploader.py
new file mode 100644
index 000000000..f9cddb9c7
--- /dev/null
+++ b/BitTornado/BT1/Uploader.py
@@ -0,0 +1,145 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.CurrentRateMeasure import Measure
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+class Upload:
+ def __init__(self, connection, ratelimiter, totalup, choker, storage,
+ picker, config):
+ self.connection = connection
+ self.ratelimiter = ratelimiter
+ self.totalup = totalup
+ self.choker = choker
+ self.storage = storage
+ self.picker = picker
+ self.config = config
+ self.max_slice_length = config['max_slice_length']
+ self.choked = True
+ self.cleared = True
+ self.interested = False
+ self.super_seeding = False
+ self.buffer = []
+ self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge'])
+ self.was_ever_interested = False
+ if storage.get_amount_left() == 0:
+ if choker.super_seed:
+ self.super_seeding = True # flag, and don't send bitfield
+ self.seed_have_list = [] # set from piecepicker
+ self.skipped_count = 0
+ else:
+ if config['breakup_seed_bitfield']:
+ bitfield, msgs = storage.get_have_list_cloaked()
+ connection.send_bitfield(bitfield)
+ for have in msgs:
+ connection.send_have(have)
+ else:
+ connection.send_bitfield(storage.get_have_list())
+ else:
+ if storage.do_I_have_anything():
+ connection.send_bitfield(storage.get_have_list())
+ self.piecedl = None
+ self.piecebuf = None
+
+ def got_not_interested(self):
+ if self.interested:
+ self.interested = False
+ del self.buffer[:]
+ self.piecedl = None
+ if self.piecebuf:
+ self.piecebuf.release()
+ self.piecebuf = None
+ self.choker.not_interested(self.connection)
+
+ def got_interested(self):
+ if not self.interested:
+ self.interested = True
+ self.was_ever_interested = True
+ self.choker.interested(self.connection)
+
+ def get_upload_chunk(self):
+ if self.choked or not self.buffer:
+ return None
+ index, begin, length = self.buffer.pop(0)
+ if self.config['buffer_reads']:
+ if index != self.piecedl:
+ if self.piecebuf:
+ self.piecebuf.release()
+ self.piecedl = index
+ self.piecebuf = self.storage.get_piece(index, 0, -1)
+ try:
+ piece = self.piecebuf[begin:begin+length]
+ assert len(piece) == length
+ except: # fails if storage.get_piece returns None or if out of range
+ self.connection.close()
+ return None
+ else:
+ if self.piecebuf:
+ self.piecebuf.release()
+ self.piecedl = None
+ piece = self.storage.get_piece(index, begin, length)
+ if piece is None:
+ self.connection.close()
+ return None
+ self.measure.update_rate(len(piece))
+ self.totalup.update_rate(len(piece))
+ return (index, begin, piece)
+
+ def got_request(self, index, begin, length):
+ if ( (self.super_seeding and not index in self.seed_have_list)
+ or not self.interested or length > self.max_slice_length ):
+ self.connection.close()
+ return
+ if not self.cleared:
+ self.buffer.append((index, begin, length))
+ if not self.choked and self.connection.next_upload is None:
+ self.ratelimiter.queue(self.connection)
+
+
+ def got_cancel(self, index, begin, length):
+ try:
+ self.buffer.remove((index, begin, length))
+ except ValueError:
+ pass
+
+ def choke(self):
+ if not self.choked:
+ self.choked = True
+ self.connection.send_choke()
+ self.piecedl = None
+ if self.piecebuf:
+ self.piecebuf.release()
+ self.piecebuf = None
+
+ def choke_sent(self):
+ del self.buffer[:]
+ self.cleared = True
+
+ def unchoke(self):
+ if self.choked:
+ self.choked = False
+ self.cleared = False
+ self.connection.send_unchoke()
+
+ def disconnected(self):
+ if self.piecebuf:
+ self.piecebuf.release()
+ self.piecebuf = None
+
+ def is_choked(self):
+ return self.choked
+
+ def is_interested(self):
+ return self.interested
+
+ def has_queries(self):
+ return not self.choked and len(self.buffer) > 0
+
+ def get_rate(self):
+ return self.measure.get_rate()
+
diff --git a/BitTornado/BT1/__init__.py b/BitTornado/BT1/__init__.py
new file mode 100644
index 000000000..f4572339b
--- /dev/null
+++ b/BitTornado/BT1/__init__.py
@@ -0,0 +1 @@
+# placeholder
\ No newline at end of file
diff --git a/BitTornado/BT1/btformats.py b/BitTornado/BT1/btformats.py
new file mode 100644
index 000000000..9e6089915
--- /dev/null
+++ b/BitTornado/BT1/btformats.py
@@ -0,0 +1,100 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from types import StringType, LongType, IntType, ListType, DictType
+from re import compile
+
+reg = compile(r'^[^/\\.~][^/\\]*$')
+
+ints = (LongType, IntType)
+
+def check_info(info):
+ if type(info) != DictType:
+ raise ValueError, 'bad metainfo - not a dictionary'
+ pieces = info.get('pieces')
+ if type(pieces) != StringType or len(pieces) % 20 != 0:
+ raise ValueError, 'bad metainfo - bad pieces key'
+ piecelength = info.get('piece length')
+ if type(piecelength) not in ints or piecelength <= 0:
+ raise ValueError, 'bad metainfo - illegal piece length'
+ name = info.get('name')
+ if type(name) != StringType:
+ raise ValueError, 'bad metainfo - bad name'
+ if not reg.match(name):
+ raise ValueError, 'name %s disallowed for security reasons' % name
+ if info.has_key('files') == info.has_key('length'):
+ raise ValueError, 'single/multiple file mix'
+ if info.has_key('length'):
+ length = info.get('length')
+ if type(length) not in ints or length < 0:
+ raise ValueError, 'bad metainfo - bad length'
+ else:
+ files = info.get('files')
+ if type(files) != ListType:
+ raise ValueError
+ for f in files:
+ if type(f) != DictType:
+ raise ValueError, 'bad metainfo - bad file value'
+ length = f.get('length')
+ if type(length) not in ints or length < 0:
+ raise ValueError, 'bad metainfo - bad length'
+ path = f.get('path')
+ if type(path) != ListType or path == []:
+ raise ValueError, 'bad metainfo - bad path'
+ for p in path:
+ if type(p) != StringType:
+ raise ValueError, 'bad metainfo - bad path dir'
+ if not reg.match(p):
+ raise ValueError, 'path %s disallowed for security reasons' % p
+ for i in xrange(len(files)):
+ for j in xrange(i):
+ if files[i]['path'] == files[j]['path']:
+ raise ValueError, 'bad metainfo - duplicate path'
+
+def check_message(message):
+ if type(message) != DictType:
+ raise ValueError
+ check_info(message.get('info'))
+ if type(message.get('announce')) != StringType:
+ raise ValueError
+
+def check_peers(message):
+ if type(message) != DictType:
+ raise ValueError
+ if message.has_key('failure reason'):
+ if type(message['failure reason']) != StringType:
+ raise ValueError
+ return
+ peers = message.get('peers')
+ if type(peers) == ListType:
+ for p in peers:
+ if type(p) != DictType:
+ raise ValueError
+ if type(p.get('ip')) != StringType:
+ raise ValueError
+ port = p.get('port')
+ if type(port) not in ints or p <= 0:
+ raise ValueError
+ if p.has_key('peer id'):
+ id = p['peer id']
+ if type(id) != StringType or len(id) != 20:
+ raise ValueError
+ elif type(peers) != StringType or len(peers) % 6 != 0:
+ raise ValueError
+ interval = message.get('interval', 1)
+ if type(interval) not in ints or interval <= 0:
+ raise ValueError
+ minint = message.get('min interval', 1)
+ if type(minint) not in ints or minint <= 0:
+ raise ValueError
+ if type(message.get('tracker id', '')) != StringType:
+ raise ValueError
+ npeers = message.get('num peers', 0)
+ if type(npeers) not in ints or npeers < 0:
+ raise ValueError
+ dpeers = message.get('done peers', 0)
+ if type(dpeers) not in ints or dpeers < 0:
+ raise ValueError
+ last = message.get('last', 0)
+ if type(last) not in ints or last < 0:
+ raise ValueError
diff --git a/BitTornado/BT1/fakeopen.py b/BitTornado/BT1/fakeopen.py
new file mode 100644
index 000000000..e5064922b
--- /dev/null
+++ b/BitTornado/BT1/fakeopen.py
@@ -0,0 +1,89 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from string import join
+
+class FakeHandle:
+ def __init__(self, name, fakeopen):
+ self.name = name
+ self.fakeopen = fakeopen
+ self.pos = 0
+
+ def flush(self):
+ pass
+
+ def close(self):
+ pass
+
+ def seek(self, pos):
+ self.pos = pos
+
+ def read(self, amount = None):
+ old = self.pos
+ f = self.fakeopen.files[self.name]
+ if self.pos >= len(f):
+ return ''
+ if amount is None:
+ self.pos = len(f)
+ return join(f[old:], '')
+ else:
+ self.pos = min(len(f), old + amount)
+ return join(f[old:self.pos], '')
+
+ def write(self, s):
+ f = self.fakeopen.files[self.name]
+ while len(f) < self.pos:
+ f.append(chr(0))
+ self.fakeopen.files[self.name][self.pos : self.pos + len(s)] = list(s)
+ self.pos += len(s)
+
+class FakeOpen:
+ def __init__(self, initial = {}):
+ self.files = {}
+ for key, value in initial.items():
+ self.files[key] = list(value)
+
+ def open(self, filename, mode):
+ """currently treats everything as rw - doesn't support append"""
+ self.files.setdefault(filename, [])
+ return FakeHandle(filename, self)
+
+ def exists(self, file):
+ return self.files.has_key(file)
+
+ def getsize(self, file):
+ return len(self.files[file])
+
+def test_normal():
+ f = FakeOpen({'f1': 'abcde'})
+ assert f.exists('f1')
+ assert not f.exists('f2')
+ assert f.getsize('f1') == 5
+ h = f.open('f1', 'rw')
+ assert h.read(3) == 'abc'
+ assert h.read(1) == 'd'
+ assert h.read() == 'e'
+ assert h.read(2) == ''
+ h.write('fpq')
+ h.seek(4)
+ assert h.read(2) == 'ef'
+ h.write('ghij')
+ h.seek(0)
+ assert h.read() == 'abcdefghij'
+ h.seek(2)
+ h.write('p')
+ h.write('q')
+ assert h.read(1) == 'e'
+ h.seek(1)
+ assert h.read(5) == 'bpqef'
+
+ h2 = f.open('f2', 'rw')
+ assert h2.read() == ''
+ h2.write('mnop')
+ h2.seek(1)
+ assert h2.read() == 'nop'
+
+ assert f.exists('f1')
+ assert f.exists('f2')
+ assert f.getsize('f1') == 10
+ assert f.getsize('f2') == 4
diff --git a/BitTornado/BT1/makemetafile.py b/BitTornado/BT1/makemetafile.py
new file mode 100644
index 000000000..ddb412d25
--- /dev/null
+++ b/BitTornado/BT1/makemetafile.py
@@ -0,0 +1,263 @@
+# Written by Bram Cohen
+# multitracker extensions by John Hoffman
+# see LICENSE.txt for license information
+
+from os.path import getsize, split, join, abspath, isdir
+from os import listdir
+from sha import sha
+from copy import copy
+from string import strip
+from BitTornado.bencode import bencode
+from btformats import check_info
+from threading import Event
+from time import time
+from traceback import print_exc
+try:
+ from sys import getfilesystemencoding
+ ENCODING = getfilesystemencoding()
+except:
+ from sys import getdefaultencoding
+ ENCODING = getdefaultencoding()
+
+defaults = [
+ ('announce_list', '',
+ 'a list of announce URLs - explained below'),
+ ('httpseeds', '',
+ 'a list of http seed URLs - explained below'),
+ ('piece_size_pow2', 0,
+ "which power of 2 to set the piece size to (0 = automatic)"),
+ ('comment', '',
+ "optional human-readable comment to put in .torrent"),
+ ('filesystem_encoding', '',
+ "optional specification for filesystem encoding " +
+ "(set automatically in recent Python versions)"),
+ ('target', '',
+ "optional target file for the torrent")
+ ]
+
+default_piece_len_exp = 18
+
+ignore = ['core', 'CVS']
+
+def print_announcelist_details():
+ print (' announce_list = optional list of redundant/backup tracker URLs, in the format:')
+ print (' url[,url...][|url[,url...]...]')
+ print (' where URLs separated by commas are all tried first')
+ print (' before the next group of URLs separated by the pipe is checked.')
+ print (" If none is given, it is assumed you don't want one in the metafile.")
+ print (' If announce_list is given, clients which support it')
+ print (' will ignore the value.')
+ print (' Examples:')
+ print (' http://tracker1.com|http://tracker2.com|http://tracker3.com')
+ print (' (tries trackers 1-3 in order)')
+ print (' http://tracker1.com,http://tracker2.com,http://tracker3.com')
+ print (' (tries trackers 1-3 in a randomly selected order)')
+ print (' http://tracker1.com|http://backup1.com,http://backup2.com')
+ print (' (tries tracker 1 first, then tries between the 2 backups randomly)')
+ print ('')
+ print (' httpseeds = optional list of http-seed URLs, in the format:')
+ print (' url[|url...]')
+
+def make_meta_file(file, url, params = {}, flag = Event(),
+ progress = lambda x: None, progress_percent = 1):
+ if params.has_key('piece_size_pow2'):
+ piece_len_exp = params['piece_size_pow2']
+ else:
+ piece_len_exp = default_piece_len_exp
+ if params.has_key('target') and params['target'] != '':
+ f = params['target']
+ else:
+ a, b = split(file)
+ if b == '':
+ f = a + '.torrent'
+ else:
+ f = join(a, b + '.torrent')
+
+ if piece_len_exp == 0: # automatic
+ size = calcsize(file)
+ if size > 8L*1024*1024*1024: # > 8 gig =
+ piece_len_exp = 21 # 2 meg pieces
+ elif size > 2*1024*1024*1024: # > 2 gig =
+ piece_len_exp = 20 # 1 meg pieces
+ elif size > 512*1024*1024: # > 512M =
+ piece_len_exp = 19 # 512K pieces
+ elif size > 64*1024*1024: # > 64M =
+ piece_len_exp = 18 # 256K pieces
+ elif size > 16*1024*1024: # > 16M =
+ piece_len_exp = 17 # 128K pieces
+ elif size > 4*1024*1024: # > 4M =
+ piece_len_exp = 16 # 64K pieces
+ else: # < 4M =
+ piece_len_exp = 15 # 32K pieces
+ piece_length = 2 ** piece_len_exp
+
+ encoding = None
+ if params.has_key('filesystem_encoding'):
+ encoding = params['filesystem_encoding']
+ if not encoding:
+ encoding = ENCODING
+ if not encoding:
+ encoding = 'ascii'
+
+ info = makeinfo(file, piece_length, encoding, flag, progress, progress_percent)
+ if flag.isSet():
+ return
+ check_info(info)
+ h = open(f, 'wb')
+ data = {'info': info, 'announce': strip(url), 'creation date': long(time())}
+
+ if params.has_key('comment') and params['comment']:
+ data['comment'] = params['comment']
+
+ if params.has_key('real_announce_list'): # shortcut for progs calling in from outside
+ data['announce-list'] = params['real_announce_list']
+ elif params.has_key('announce_list') and params['announce_list']:
+ l = []
+ for tier in params['announce_list'].split('|'):
+ l.append(tier.split(','))
+ data['announce-list'] = l
+
+ if params.has_key('real_httpseeds'): # shortcut for progs calling in from outside
+ data['httpseeds'] = params['real_httpseeds']
+ elif params.has_key('httpseeds') and params['httpseeds']:
+ data['httpseeds'] = params['httpseeds'].split('|')
+
+ h.write(bencode(data))
+ h.close()
+
+def calcsize(file):
+ if not isdir(file):
+ return getsize(file)
+ total = 0L
+ for s in subfiles(abspath(file)):
+ total += getsize(s[1])
+ return total
+
+
+def uniconvertl(l, e):
+ r = []
+ try:
+ for s in l:
+ r.append(uniconvert(s, e))
+ except UnicodeError:
+ raise UnicodeError('bad filename: '+join(l))
+ return r
+
+def uniconvert(s, e):
+ try:
+ s = unicode(s,e)
+ except UnicodeError:
+ raise UnicodeError('bad filename: '+s)
+ return s.encode('utf-8')
+
+def makeinfo(file, piece_length, encoding, flag, progress, progress_percent=1):
+ file = abspath(file)
+ if isdir(file):
+ subs = subfiles(file)
+ subs.sort()
+ pieces = []
+ sh = sha()
+ done = 0L
+ fs = []
+ totalsize = 0.0
+ totalhashed = 0L
+ for p, f in subs:
+ totalsize += getsize(f)
+
+ for p, f in subs:
+ pos = 0L
+ size = getsize(f)
+ fs.append({'length': size, 'path': uniconvertl(p, encoding)})
+ h = open(f, 'rb')
+ while pos < size:
+ a = min(size - pos, piece_length - done)
+ sh.update(h.read(a))
+ if flag.isSet():
+ return
+ done += a
+ pos += a
+ totalhashed += a
+
+ if done == piece_length:
+ pieces.append(sh.digest())
+ done = 0
+ sh = sha()
+ if progress_percent:
+ progress(totalhashed / totalsize)
+ else:
+ progress(a)
+ h.close()
+ if done > 0:
+ pieces.append(sh.digest())
+ return {'pieces': ''.join(pieces),
+ 'piece length': piece_length, 'files': fs,
+ 'name': uniconvert(split(file)[1], encoding) }
+ else:
+ size = getsize(file)
+ pieces = []
+ p = 0L
+ h = open(file, 'rb')
+ while p < size:
+ x = h.read(min(piece_length, size - p))
+ if flag.isSet():
+ return
+ pieces.append(sha(x).digest())
+ p += piece_length
+ if p > size:
+ p = size
+ if progress_percent:
+ progress(float(p) / size)
+ else:
+ progress(min(piece_length, size - p))
+ h.close()
+ return {'pieces': ''.join(pieces),
+ 'piece length': piece_length, 'length': size,
+ 'name': uniconvert(split(file)[1], encoding) }
+
+def subfiles(d):
+ r = []
+ stack = [([], d)]
+ while len(stack) > 0:
+ p, n = stack.pop()
+ if isdir(n):
+ for s in listdir(n):
+ if s not in ignore and s[:1] != '.':
+ stack.append((copy(p) + [s], join(n, s)))
+ else:
+ r.append((p, n))
+ return r
+
+
+def completedir(dir, url, params = {}, flag = Event(),
+ vc = lambda x: None, fc = lambda x: None):
+ files = listdir(dir)
+ files.sort()
+ ext = '.torrent'
+ if params.has_key('target'):
+ target = params['target']
+ else:
+ target = ''
+
+ togen = []
+ for f in files:
+ if f[-len(ext):] != ext and (f + ext) not in files:
+ togen.append(join(dir, f))
+
+ total = 0
+ for i in togen:
+ total += calcsize(i)
+
+ subtotal = [0]
+ def callback(x, subtotal = subtotal, total = total, vc = vc):
+ subtotal[0] += x
+ vc(float(subtotal[0]) / total)
+ for i in togen:
+ fc(i)
+ try:
+ t = split(i)[-1]
+ if t not in ignore and t[0] != '.':
+ if target != '':
+ params['target'] = join(target,t+ext)
+ make_meta_file(i, url, params, flag, progress = callback, progress_percent = 0)
+ except ValueError:
+ print_exc()
diff --git a/BitTornado/BT1/track.py b/BitTornado/BT1/track.py
new file mode 100644
index 000000000..625750b8a
--- /dev/null
+++ b/BitTornado/BT1/track.py
@@ -0,0 +1,1067 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.parseargs import parseargs, formatDefinitions
+from BitTornado.RawServer import RawServer, autodetect_ipv6, autodetect_socket_style
+from BitTornado.HTTPHandler import HTTPHandler, months, weekdays
+from BitTornado.parsedir import parsedir
+from NatCheck import NatCheck
+from T2T import T2TList
+from BitTornado.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4
+from BitTornado.iprangeparse import IP_List as IP_Range_List
+from BitTornado.torrentlistparse import parsetorrentlist
+from threading import Event, Thread
+from BitTornado.bencode import bencode, bdecode, Bencached
+from BitTornado.zurllib import urlopen, quote, unquote
+from Filter import Filter
+from urlparse import urlparse
+from os import rename, getpid
+from os.path import exists, isfile
+from cStringIO import StringIO
+from traceback import print_exc
+from time import time, gmtime, strftime, localtime
+from BitTornado.clock import clock
+from random import shuffle, seed, randrange
+from sha import sha
+from types import StringType, IntType, LongType, ListType, DictType
+from binascii import b2a_hex, a2b_hex, a2b_base64
+from string import lower
+import sys, os
+import signal
+import re
+import BitTornado.__init__
+from BitTornado.__init__ import version, createPeerID
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+defaults = [
+ ('port', 80, "Port to listen on."),
+ ('dfile', None, 'file to store recent downloader info in'),
+ ('bind', '', 'comma-separated list of ips/hostnames to bind to locally'),
+# ('ipv6_enabled', autodetect_ipv6(),
+ ('ipv6_enabled', 0,
+ 'allow the client to connect to peers via IPv6'),
+ ('ipv6_binds_v4', autodetect_socket_style(),
+ 'set if an IPv6 server socket will also field IPv4 connections'),
+ ('socket_timeout', 15, 'timeout for closing connections'),
+ ('save_dfile_interval', 5 * 60, 'seconds between saving dfile'),
+ ('timeout_downloaders_interval', 45 * 60, 'seconds between expiring downloaders'),
+ ('reannounce_interval', 30 * 60, 'seconds downloaders should wait between reannouncements'),
+ ('response_size', 50, 'number of peers to send in an info message'),
+ ('timeout_check_interval', 5,
+ 'time to wait between checking if any connections have timed out'),
+ ('nat_check', 3,
+ "how many times to check if a downloader is behind a NAT (0 = don't check)"),
+ ('log_nat_checks', 0,
+ "whether to add entries to the log for nat-check results"),
+ ('min_time_between_log_flushes', 3.0,
+ 'minimum time it must have been since the last flush to do another one'),
+ ('min_time_between_cache_refreshes', 600.0,
+ 'minimum time in seconds before a cache is considered stale and is flushed'),
+ ('allowed_dir', '', 'only allow downloads for .torrents in this dir'),
+ ('allowed_list', '', 'only allow downloads for hashes in this list (hex format, one per line)'),
+ ('allowed_controls', 0, 'allow special keys in torrents in the allowed_dir to affect tracker access'),
+ ('multitracker_enabled', 0, 'whether to enable multitracker operation'),
+ ('multitracker_allowed', 'autodetect', 'whether to allow incoming tracker announces (can be none, autodetect or all)'),
+ ('multitracker_reannounce_interval', 2 * 60, 'seconds between outgoing tracker announces'),
+ ('multitracker_maxpeers', 20, 'number of peers to get in a tracker announce'),
+ ('aggregate_forward', '', 'format: [,] - if set, forwards all non-multitracker to this url with this optional password'),
+ ('aggregator', '0', 'whether to act as a data aggregator rather than a tracker. If enabled, may be 1, or ; ' +
+ 'if password is set, then an incoming password is required for access'),
+ ('hupmonitor', 0, 'whether to reopen the log file upon receipt of HUP signal'),
+ ('http_timeout', 60,
+ 'number of seconds to wait before assuming that an http connection has timed out'),
+ ('parse_dir_interval', 60, 'seconds between reloading of allowed_dir or allowed_file ' +
+ 'and allowed_ips and banned_ips lists'),
+ ('show_infopage', 1, "whether to display an info page when the tracker's root dir is loaded"),
+ ('infopage_redirect', '', 'a URL to redirect the info page to'),
+ ('show_names', 1, 'whether to display names from allowed dir'),
+ ('favicon', '', 'file containing x-icon data to return when browser requests favicon.ico'),
+ ('allowed_ips', '', 'only allow connections from IPs specified in the given file; '+
+ 'file contains subnet data in the format: aa.bb.cc.dd/len'),
+ ('banned_ips', '', "don't allow connections from IPs specified in the given file; "+
+ 'file contains IP range data in the format: xxx:xxx:ip1-ip2'),
+ ('only_local_override_ip', 2, "ignore the ip GET parameter from machines which aren't on local network IPs " +
+ "(0 = never, 1 = always, 2 = ignore if NAT checking is not enabled)"),
+ ('logfile', '', 'file to write the tracker logs, use - for stdout (default)'),
+ ('allow_get', 0, 'use with allowed_dir; adds a /file?hash={hash} url that allows users to download the torrent file'),
+ ('keep_dead', 0, 'keep dead torrents after they expire (so they still show up on your /scrape and web page)'),
+ ('scrape_allowed', 'full', 'scrape access allowed (can be none, specific or full)'),
+ ('dedicated_seed_id', '', 'allows tracker to monitor dedicated seed(s) and flag torrents as seeded'),
+ ]
+
+def statefiletemplate(x):
+ if type(x) != DictType:
+ raise ValueError
+ for cname, cinfo in x.items():
+ if cname == 'peers':
+ for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids)
+ if type(y) != DictType: # ... for the active torrents, and each is a dictionary
+ raise ValueError
+ for id, info in y.items(): # ... of client ids interested in that torrent
+ if (len(id) != 20):
+ raise ValueError
+ if type(info) != DictType: # ... each of which is also a dictionary
+ raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
+ if type(info.get('ip', '')) != StringType:
+ raise ValueError
+ port = info.get('port')
+ if type(port) not in (IntType,LongType) or port < 0:
+ raise ValueError
+ left = info.get('left')
+ if type(left) not in (IntType,LongType) or left < 0:
+ raise ValueError
+ elif cname == 'completed':
+ if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
+ raise ValueError # ... for keeping track of the total completions per torrent
+ for y in cinfo.values(): # ... each torrent has an integer value
+ if type(y) not in (IntType,LongType):
+ raise ValueError # ... for the number of reported completions for that torrent
+ elif cname == 'allowed':
+ if (type(cinfo) != DictType): # a list of info_hashes and included data
+ raise ValueError
+ if x.has_key('allowed_dir_files'):
+ adlist = [z[1] for z in x['allowed_dir_files'].values()]
+ for y in cinfo.keys(): # and each should have a corresponding key here
+ if not y in adlist:
+ raise ValueError
+ elif cname == 'allowed_dir_files':
+ if (type(cinfo) != DictType): # a list of files, their attributes and info hashes
+ raise ValueError
+ dirkeys = {}
+ for y in cinfo.values(): # each entry should have a corresponding info_hash
+ if not y[1]:
+ continue
+ if not x['allowed'].has_key(y[1]):
+ raise ValueError
+ if dirkeys.has_key(y[1]): # and each should have a unique info_hash
+ raise ValueError
+ dirkeys[y[1]] = 1
+
+
+alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n'
+
+local_IPs = IP_List()
+local_IPs.set_intranet_addresses()
+
+
+def isotime(secs = None):
+ if secs == None:
+ secs = time()
+ return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
+
+http_via_filter = re.compile(' for ([0-9.]+)\Z')
+
+def _get_forwarded_ip(headers):
+ header = headers.get('x-forwarded-for')
+ if header:
+ try:
+ x,y = header.split(',')
+ except:
+ return header
+ if is_valid_ip(x) and not local_IPs.includes(x):
+ return x
+ return y
+ header = headers.get('client-ip')
+ if header:
+ return header
+ header = headers.get('via')
+ if header:
+ x = http_via_filter.search(header)
+ try:
+ return x.group(1)
+ except:
+ pass
+ header = headers.get('from')
+ #if header:
+ # return header
+ #return None
+ return header
+
+def get_forwarded_ip(headers):
+ x = _get_forwarded_ip(headers)
+ if not is_valid_ip(x) or local_IPs.includes(x):
+ return None
+ return x
+
+def compact_peer_info(ip, port):
+ try:
+ s = ( ''.join([chr(int(i)) for i in ip.split('.')])
+ + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
+ if len(s) != 6:
+ raise ValueError
+ except:
+ s = '' # not a valid IP, must be a domain name
+ return s
+
+class Tracker:
+ def __init__(self, config, rawserver):
+ self.config = config
+ self.response_size = config['response_size']
+ self.dfile = config['dfile']
+ self.natcheck = config['nat_check']
+ favicon = config['favicon']
+ self.parse_dir_interval = config['parse_dir_interval']
+ self.favicon = None
+ if favicon:
+ try:
+ h = open(favicon,'r')
+ self.favicon = h.read()
+ h.close()
+ except:
+ print "**warning** specified favicon file -- %s -- does not exist." % favicon
+ self.rawserver = rawserver
+ self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]]
+ self.cached_t = {} # format: infohash: [time, cache]
+ self.times = {}
+ self.state = {}
+ self.seedcount = {}
+
+ self.allowed_IPs = None
+ self.banned_IPs = None
+ if config['allowed_ips'] or config['banned_ips']:
+ self.allowed_ip_mtime = 0
+ self.banned_ip_mtime = 0
+ self.read_ip_lists()
+
+ self.only_local_override_ip = config['only_local_override_ip']
+ if self.only_local_override_ip == 2:
+ self.only_local_override_ip = not config['nat_check']
+
+ if exists(self.dfile):
+ try:
+ h = open(self.dfile, 'rb')
+ ds = h.read()
+ h.close()
+ tempstate = bdecode(ds)
+ if not tempstate.has_key('peers'):
+ tempstate = {'peers': tempstate}
+ statefiletemplate(tempstate)
+ self.state = tempstate
+ except:
+ print '**warning** statefile '+self.dfile+' corrupt; resetting'
+ self.downloads = self.state.setdefault('peers', {})
+ self.completed = self.state.setdefault('completed', {})
+
+ self.becache = {} # format: infohash: [[l1, s1], [l2, s2], [l3, s3]]
+ for infohash, ds in self.downloads.items():
+ self.seedcount[infohash] = 0
+ for x,y in ds.items():
+ ip = y['ip']
+ if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
+ or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
+ del ds[x]
+ continue
+ if not y['left']:
+ self.seedcount[infohash] += 1
+ if y.get('nat',-1):
+ continue
+ gip = y.get('given_ip')
+ if is_valid_ip(gip) and (
+ not self.only_local_override_ip or local_IPs.includes(ip) ):
+ ip = gip
+ self.natcheckOK(infohash,x,ip,y['port'],y['left'])
+
+ for x in self.downloads.keys():
+ self.times[x] = {}
+ for y in self.downloads[x].keys():
+ self.times[x][y] = 0
+
+ self.trackerid = createPeerID('-T-')
+ seed(self.trackerid)
+
+ self.reannounce_interval = config['reannounce_interval']
+ self.save_dfile_interval = config['save_dfile_interval']
+ self.show_names = config['show_names']
+ rawserver.add_task(self.save_state, self.save_dfile_interval)
+ self.prevtime = clock()
+ self.timeout_downloaders_interval = config['timeout_downloaders_interval']
+ rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
+ self.logfile = None
+ self.log = None
+ if (config['logfile']) and (config['logfile'] != '-'):
+ try:
+ self.logfile = config['logfile']
+ self.log = open(self.logfile,'a')
+ sys.stdout = self.log
+ print "# Log Started: ", isotime()
+ except:
+ print "**warning** could not redirect stdout to log file: ", sys.exc_info()[0]
+
+ if config['hupmonitor']:
+ def huphandler(signum, frame, self = self):
+ try:
+ self.log.close ()
+ self.log = open(self.logfile,'a')
+ sys.stdout = self.log
+ print "# Log reopened: ", isotime()
+ except:
+ print "**warning** could not reopen logfile"
+
+ signal.signal(signal.SIGHUP, huphandler)
+
+ self.allow_get = config['allow_get']
+
+ self.t2tlist = T2TList(config['multitracker_enabled'], self.trackerid,
+ config['multitracker_reannounce_interval'],
+ config['multitracker_maxpeers'], config['http_timeout'],
+ self.rawserver)
+
+ if config['allowed_list']:
+ if config['allowed_dir']:
+ print '**warning** allowed_dir and allowed_list options cannot be used together'
+ print '**warning** disregarding allowed_dir'
+ config['allowed_dir'] = ''
+ self.allowed = self.state.setdefault('allowed_list',{})
+ self.allowed_list_mtime = 0
+ self.parse_allowed()
+ self.remove_from_state('allowed','allowed_dir_files')
+ if config['multitracker_allowed'] == 'autodetect':
+ config['multitracker_allowed'] = 'none'
+ config['allowed_controls'] = 0
+
+ elif config['allowed_dir']:
+ self.allowed = self.state.setdefault('allowed',{})
+ self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{})
+ self.allowed_dir_blocked = {}
+ self.parse_allowed()
+ self.remove_from_state('allowed_list')
+
+ else:
+ self.allowed = None
+ self.remove_from_state('allowed','allowed_dir_files', 'allowed_list')
+ if config['multitracker_allowed'] == 'autodetect':
+ config['multitracker_allowed'] = 'none'
+ config['allowed_controls'] = 0
+
+ self.uq_broken = unquote('+') != ' '
+ self.keep_dead = config['keep_dead']
+ self.Filter = Filter(rawserver.add_task)
+
+ aggregator = config['aggregator']
+ if aggregator == '0':
+ self.is_aggregator = False
+ self.aggregator_key = None
+ else:
+ self.is_aggregator = True
+ if aggregator == '1':
+ self.aggregator_key = None
+ else:
+ self.aggregator_key = aggregator
+ self.natcheck = False
+
+ send = config['aggregate_forward']
+ if not send:
+ self.aggregate_forward = None
+ else:
+ try:
+ self.aggregate_forward, self.aggregate_password = send.split(',')
+ except:
+ self.aggregate_forward = send
+ self.aggregate_password = None
+
+ self.dedicated_seed_id = config['dedicated_seed_id']
+ self.is_seeded = {}
+
+ self.cachetime = 0
+ self.cachetimeupdate()
+
+ def cachetimeupdate(self):
+ self.cachetime += 1 # raw clock, but more efficient for cache
+ self.rawserver.add_task(self.cachetimeupdate,1)
+
+ def aggregate_senddata(self, query):
+ url = self.aggregate_forward+'?'+query
+ if self.aggregate_password is not None:
+ url += '&password='+self.aggregate_password
+ rq = Thread(target = self._aggregate_senddata, args = [url])
+ rq.setDaemon(False)
+ rq.start()
+
+ def _aggregate_senddata(self, url): # just send, don't attempt to error check,
+ try: # discard any returned data
+ h = urlopen(url)
+ h.read()
+ h.close()
+ except:
+ return
+
+
+ def get_infopage(self):
+ try:
+ if not self.config['show_infopage']:
+ return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+ red = self.config['infopage_redirect']
+ if red:
+ return (302, 'Found', {'Content-Type': 'text/html', 'Location': red},
+ 'Click Here')
+
+ s = StringIO()
+ s.write('\n' \
+ 'BitTorrent download info\n')
+ if self.favicon is not None:
+ s.write('\n')
+ s.write('\n\n' \
+ '
BitTorrent download info
\n'\
+ '
\n'
+ '
tracker version: %s
\n' \
+ '
server time: %s
\n' \
+ '
\n' % (version, isotime()))
+ if self.config['allowed_dir']:
+ if self.show_names:
+ names = [ (self.allowed[hash]['name'],hash)
+ for hash in self.allowed.keys() ]
+ else:
+ names = [ (None,hash)
+ for hash in self.allowed.keys() ]
+ else:
+ names = [ (None,hash) for hash in self.downloads.keys() ]
+ if not names:
+ s.write('
not tracking any files yet...
\n')
+ else:
+ names.sort()
+ tn = 0
+ tc = 0
+ td = 0
+ tt = 0 # Total transferred
+ ts = 0 # Total size
+ nf = 0 # Number of files displayed
+ if self.config['allowed_dir'] and self.show_names:
+ s.write('
\n' \
+ '
info hash
torrent name
size
complete
downloading
downloaded
transferred
\n')
+ else:
+ s.write('
\n' \
+ '
info hash
complete
downloading
downloaded
\n')
+ for name,hash in names:
+ l = self.downloads[hash]
+ n = self.completed.get(hash, 0)
+ tn = tn + n
+ c = self.seedcount[hash]
+ tc = tc + c
+ d = len(l) - c
+ td = td + d
+ if self.config['allowed_dir'] and self.show_names:
+ if self.allowed.has_key(hash):
+ nf = nf + 1
+ sz = self.allowed[hash]['length'] # size
+ ts = ts + sz
+ szt = sz * n # Transferred for this torrent
+ tt = tt + szt
+ if self.allow_get == 1:
+ linkname = '' + name + ''
+ else:
+ linkname = name
+ s.write('
\n' \
+ % (b2a_hex(hash), c, d, n))
+ ttn = 0
+ for i in self.completed.values():
+ ttn = ttn + i
+ if self.config['allowed_dir'] and self.show_names:
+ s.write('
transferred: torrent size * total downloaded (does not include partial transfers)
\n' \
+ '
\n')
+
+ s.write('\n' \
+ '\n')
+ return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue())
+ except:
+ print_exc()
+ return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error')
+
+
+ def scrapedata(self, hash, return_name = True):
+ l = self.downloads[hash]
+ n = self.completed.get(hash, 0)
+ c = self.seedcount[hash]
+ d = len(l) - c
+ f = {'complete': c, 'incomplete': d, 'downloaded': n}
+ if return_name and self.show_names and self.config['allowed_dir']:
+ f['name'] = self.allowed[hash]['name']
+ return (f)
+
+ def get_scrape(self, paramslist):
+ fs = {}
+ if paramslist.has_key('info_hash'):
+ if self.config['scrape_allowed'] not in ['specific', 'full']:
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'specific scrape function is not available with this tracker.'}))
+ for hash in paramslist['info_hash']:
+ if self.allowed is not None:
+ if self.allowed.has_key(hash):
+ fs[hash] = self.scrapedata(hash)
+ else:
+ if self.downloads.has_key(hash):
+ fs[hash] = self.scrapedata(hash)
+ else:
+ if self.config['scrape_allowed'] != 'full':
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'full scrape function is not available with this tracker.'}))
+ if self.allowed is not None:
+ keys = self.allowed.keys()
+ else:
+ keys = self.downloads.keys()
+ for hash in keys:
+ fs[hash] = self.scrapedata(hash)
+
+ return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs}))
+
+
+ def get_file(self, hash):
+ if not self.allow_get:
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ 'get function is not available with this tracker.')
+ if not self.allowed.has_key(hash):
+ return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+ fname = self.allowed[hash]['file']
+ fpath = self.allowed[hash]['path']
+ return (200, 'OK', {'Content-Type': 'application/x-bittorrent',
+ 'Content-Disposition': 'attachment; filename=' + fname},
+ open(fpath, 'rb').read())
+
+
+ def check_allowed(self, infohash, paramslist):
+ if ( self.aggregator_key is not None
+ and not ( paramslist.has_key('password')
+ and paramslist['password'][0] == self.aggregator_key ) ):
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'Requested download is not authorized for use with this tracker.'}))
+
+ if self.allowed is not None:
+ if not self.allowed.has_key(infohash):
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'Requested download is not authorized for use with this tracker.'}))
+ if self.config['allowed_controls']:
+ if self.allowed[infohash].has_key('failure reason'):
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason': self.allowed[infohash]['failure reason']}))
+
+ if paramslist.has_key('tracker'):
+ if ( self.config['multitracker_allowed'] == 'none' or # turned off
+ paramslist['peer_id'][0] == self.trackerid ): # oops! contacted myself
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason': 'disallowed'}))
+
+ if ( self.config['multitracker_allowed'] == 'autodetect'
+ and not self.allowed[infohash].has_key('announce-list') ):
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'Requested download is not authorized for multitracker use.'}))
+
+ return None
+
+
+ def add_data(self, infohash, event, ip, paramslist):
+ peers = self.downloads.setdefault(infohash, {})
+ ts = self.times.setdefault(infohash, {})
+ self.completed.setdefault(infohash, 0)
+ self.seedcount.setdefault(infohash, 0)
+
+ def params(key, default = None, l = paramslist):
+ if l.has_key(key):
+ return l[key][0]
+ return default
+
+ myid = params('peer_id','')
+ if len(myid) != 20:
+ raise ValueError, 'id not of length 20'
+ if event not in ['started', 'completed', 'stopped', 'snooped', None]:
+ raise ValueError, 'invalid event'
+ port = long(params('port',''))
+ if port < 0 or port > 65535:
+ raise ValueError, 'invalid port'
+ left = long(params('left',''))
+ if left < 0:
+ raise ValueError, 'invalid amount left'
+ uploaded = long(params('uploaded',''))
+ downloaded = long(params('downloaded',''))
+
+ peer = peers.get(myid)
+ islocal = local_IPs.includes(ip)
+ mykey = params('key')
+ if peer:
+ auth = peer.get('key',-1) == mykey or peer.get('ip') == ip
+
+ gip = params('ip')
+ if is_valid_ip(gip) and (islocal or not self.only_local_override_ip):
+ ip1 = gip
+ else:
+ ip1 = ip
+
+ if params('numwant') is not None:
+ rsize = min(int(params('numwant')),self.response_size)
+ else:
+ rsize = self.response_size
+
+ if event == 'stopped':
+ if peer:
+ if auth:
+ self.delete_peer(infohash,myid)
+
+ elif not peer:
+ ts[myid] = clock()
+ peer = {'ip': ip, 'port': port, 'left': left}
+ if mykey:
+ peer['key'] = mykey
+ if gip:
+ peer['given ip'] = gip
+ if port:
+ if not self.natcheck or islocal:
+ peer['nat'] = 0
+ self.natcheckOK(infohash,myid,ip1,port,left)
+ else:
+ NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
+ else:
+ peer['nat'] = 2**30
+ if event == 'completed':
+ self.completed[infohash] += 1
+ if not left:
+ self.seedcount[infohash] += 1
+
+ peers[myid] = peer
+
+ else:
+ if not auth:
+ return rsize # return w/o changing stats
+
+ ts[myid] = clock()
+ if not left and peer['left']:
+ self.completed[infohash] += 1
+ self.seedcount[infohash] += 1
+ if not peer.get('nat', -1):
+ for bc in self.becache[infohash]:
+ bc[1][myid] = bc[0][myid]
+ del bc[0][myid]
+ elif left and not peer['left']:
+ self.completed[infohash] -= 1
+ self.seedcount[infohash] -= 1
+ if not peer.get('nat', -1):
+ for bc in self.becache[infohash]:
+ bc[0][myid] = bc[1][myid]
+ del bc[1][myid]
+ peer['left'] = left
+
+ if port:
+ recheck = False
+ if ip != peer['ip']:
+ peer['ip'] = ip
+ recheck = True
+ if gip != peer.get('given ip'):
+ if gip:
+ peer['given ip'] = gip
+ elif peer.has_key('given ip'):
+ del peer['given ip']
+ recheck = True
+
+ natted = peer.get('nat', -1)
+ if recheck:
+ if natted == 0:
+ l = self.becache[infohash]
+ y = not peer['left']
+ for x in l:
+ del x[y][myid]
+ if natted >= 0:
+ del peer['nat'] # restart NAT testing
+ if natted and natted < self.natcheck:
+ recheck = True
+
+ if recheck:
+ if not self.natcheck or islocal:
+ peer['nat'] = 0
+ self.natcheckOK(infohash,myid,ip1,port,left)
+ else:
+ NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
+
+ return rsize
+
+
+ def peerlist(self, infohash, stopped, tracker, is_seed, return_type, rsize):
+ data = {} # return data
+ seeds = self.seedcount[infohash]
+ data['complete'] = seeds
+ data['incomplete'] = len(self.downloads[infohash]) - seeds
+
+ if ( self.config['allowed_controls']
+ and self.allowed[infohash].has_key('warning message') ):
+ data['warning message'] = self.allowed[infohash]['warning message']
+
+ if tracker:
+ data['interval'] = self.config['multitracker_reannounce_interval']
+ if not rsize:
+ return data
+ cache = self.cached_t.setdefault(infohash, None)
+ if ( not cache or len(cache[1]) < rsize
+ or cache[0] + self.config['min_time_between_cache_refreshes'] < clock() ):
+ bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
+ cache = [ clock(), bc[0][0].values() + bc[0][1].values() ]
+ self.cached_t[infohash] = cache
+ shuffle(cache[1])
+ cache = cache[1]
+
+ data['peers'] = cache[-rsize:]
+ del cache[-rsize:]
+ return data
+
+ data['interval'] = self.reannounce_interval
+ if stopped or not rsize: # save some bandwidth
+ data['peers'] = []
+ return data
+
+ bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
+ len_l = len(bc[0][0])
+ len_s = len(bc[0][1])
+ if not (len_l+len_s): # caches are empty!
+ data['peers'] = []
+ return data
+ l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
+ cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
+ if cache and ( not cache[1]
+ or (is_seed and len(cache[1]) < rsize)
+ or len(cache[1]) < l_get_size
+ or cache[0]+self.config['min_time_between_cache_refreshes'] < self.cachetime ):
+ cache = None
+ if not cache:
+ peers = self.downloads[infohash]
+ vv = [[],[],[]]
+ for key, ip, port in self.t2tlist.harvest(infohash): # empty if disabled
+ if not peers.has_key(key):
+ vv[0].append({'ip': ip, 'port': port, 'peer id': key})
+ vv[1].append({'ip': ip, 'port': port})
+ vv[2].append(compact_peer_info(ip, port))
+ cache = [ self.cachetime,
+ bc[return_type][0].values()+vv[return_type],
+ bc[return_type][1].values() ]
+ shuffle(cache[1])
+ shuffle(cache[2])
+ self.cached[infohash][return_type] = cache
+ for rr in xrange(len(self.cached[infohash])):
+ if rr != return_type:
+ try:
+ self.cached[infohash][rr][1].extend(vv[rr])
+ except:
+ pass
+ if len(cache[1]) < l_get_size:
+ peerdata = cache[1]
+ if not is_seed:
+ peerdata.extend(cache[2])
+ cache[1] = []
+ cache[2] = []
+ else:
+ if not is_seed:
+ peerdata = cache[2][l_get_size-rsize:]
+ del cache[2][l_get_size-rsize:]
+ rsize -= len(peerdata)
+ else:
+ peerdata = []
+ if rsize:
+ peerdata.extend(cache[1][-rsize:])
+ del cache[1][-rsize:]
+ if return_type == 2:
+ peerdata = ''.join(peerdata)
+ data['peers'] = peerdata
+ return data
+
+
+ def get(self, connection, path, headers):
+ real_ip = connection.get_ip()
+ ip = real_ip
+ if is_ipv4(ip):
+ ipv4 = True
+ else:
+ try:
+ ip = ipv6_to_ipv4(ip)
+ ipv4 = True
+ except ValueError:
+ ipv4 = False
+
+ if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
+ or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'your IP is not allowed on this tracker'}))
+
+ nip = get_forwarded_ip(headers)
+ if nip and not self.only_local_override_ip:
+ ip = nip
+ try:
+ ip = to_ipv4(ip)
+ ipv4 = True
+ except ValueError:
+ ipv4 = False
+
+ paramslist = {}
+ def params(key, default = None, l = paramslist):
+ if l.has_key(key):
+ return l[key][0]
+ return default
+
+ try:
+ (scheme, netloc, path, pars, query, fragment) = urlparse(path)
+ if self.uq_broken == 1:
+ path = path.replace('+',' ')
+ query = query.replace('+',' ')
+ path = unquote(path)[1:]
+ for s in query.split('&'):
+ if s:
+ i = s.index('=')
+ kw = unquote(s[:i])
+ paramslist.setdefault(kw, [])
+ paramslist[kw] += [unquote(s[i+1:])]
+
+ if path == '' or path == 'index.html':
+ return self.get_infopage()
+ if (path == 'file'):
+ return self.get_file(params('info_hash'))
+ if path == 'favicon.ico' and self.favicon is not None:
+ return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon)
+
+ # automated access from here on
+
+ if path in ('scrape', 'scrape.php', 'tracker.php/scrape'):
+ return self.get_scrape(paramslist)
+
+ if not path in ('announce', 'announce.php', 'tracker.php/announce'):
+ return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+
+ # main tracker function
+
+ filtered = self.Filter.check(real_ip, paramslist, headers)
+ if filtered:
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason': filtered}))
+
+ infohash = params('info_hash')
+ if not infohash:
+ raise ValueError, 'no info hash'
+
+ notallowed = self.check_allowed(infohash, paramslist)
+ if notallowed:
+ return notallowed
+
+ event = params('event')
+
+ rsize = self.add_data(infohash, event, ip, paramslist)
+
+ except ValueError, e:
+ return (400, 'Bad Request', {'Content-Type': 'text/plain'},
+ 'you sent me garbage - ' + str(e))
+
+ if self.aggregate_forward and not paramslist.has_key('tracker'):
+ self.aggregate_senddata(query)
+
+ if self.is_aggregator: # don't return peer data here
+ return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'response': 'OK'}))
+
+ if params('compact') and ipv4:
+ return_type = 2
+ elif params('no_peer_id'):
+ return_type = 1
+ else:
+ return_type = 0
+
+ data = self.peerlist(infohash, event=='stopped',
+ params('tracker'), not params('left'),
+ return_type, rsize)
+
+ if paramslist.has_key('scrape'): # deprecated
+ data['scrape'] = self.scrapedata(infohash, False)
+
+ if self.dedicated_seed_id:
+ if params('seed_id') == self.dedicated_seed_id and params('left') == 0:
+ self.is_seeded[infohash] = True
+ if params('check_seeded') and self.is_seeded.get(infohash):
+ data['seeded'] = 1
+
+ return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data))
+
+
+ def natcheckOK(self, infohash, peerid, ip, port, not_seed):
+ bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
+ bc[0][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
+ 'peer id': peerid}))
+ bc[1][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
+ bc[2][not not_seed][peerid] = compact_peer_info(ip, port)
+
+
+ def natchecklog(self, peerid, ip, port, result):
+ year, month, day, hour, minute, second, a, b, c = localtime(time())
+ print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % (
+ ip, quote(peerid), day, months[month], year, hour, minute, second,
+ ip, port, result)
+
+ def connectback_result(self, result, downloadid, peerid, ip, port):
+ record = self.downloads.get(downloadid, {}).get(peerid)
+ if ( record is None
+ or (record['ip'] != ip and record.get('given ip') != ip)
+ or record['port'] != port ):
+ if self.config['log_nat_checks']:
+ self.natchecklog(peerid, ip, port, 404)
+ return
+ if self.config['log_nat_checks']:
+ if result:
+ x = 200
+ else:
+ x = 503
+ self.natchecklog(peerid, ip, port, x)
+ if not record.has_key('nat'):
+ record['nat'] = int(not result)
+ if result:
+ self.natcheckOK(downloadid,peerid,ip,port,record['left'])
+ elif result and record['nat']:
+ record['nat'] = 0
+ self.natcheckOK(downloadid,peerid,ip,port,record['left'])
+ elif not result:
+ record['nat'] += 1
+
+
+ def remove_from_state(self, *l):
+ for s in l:
+ try:
+ del self.state[s]
+ except:
+ pass
+
+ def save_state(self):
+ self.rawserver.add_task(self.save_state, self.save_dfile_interval)
+ h = open(self.dfile, 'wb')
+ h.write(bencode(self.state))
+ h.close()
+
+
+ def parse_allowed(self):
+ self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval)
+
+ if self.config['allowed_dir']:
+ r = parsedir( self.config['allowed_dir'], self.allowed,
+ self.allowed_dir_files, self.allowed_dir_blocked,
+ [".torrent"] )
+ ( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked,
+ added, garbage2 ) = r
+
+ self.state['allowed'] = self.allowed
+ self.state['allowed_dir_files'] = self.allowed_dir_files
+
+ self.t2tlist.parse(self.allowed)
+
+ else:
+ f = self.config['allowed_list']
+ if self.allowed_list_mtime == os.path.getmtime(f):
+ return
+ try:
+ r = parsetorrentlist(f, self.allowed)
+ (self.allowed, added, garbage2) = r
+ self.state['allowed_list'] = self.allowed
+ except (IOError, OSError):
+ print '**warning** unable to read allowed torrent list'
+ return
+ self.allowed_list_mtime = os.path.getmtime(f)
+
+ for infohash in added.keys():
+ self.downloads.setdefault(infohash, {})
+ self.completed.setdefault(infohash, 0)
+ self.seedcount.setdefault(infohash, 0)
+
+
+ def read_ip_lists(self):
+ self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval)
+
+ f = self.config['allowed_ips']
+ if f and self.allowed_ip_mtime != os.path.getmtime(f):
+ self.allowed_IPs = IP_List()
+ try:
+ self.allowed_IPs.read_fieldlist(f)
+ self.allowed_ip_mtime = os.path.getmtime(f)
+ except (IOError, OSError):
+ print '**warning** unable to read allowed_IP list'
+
+ f = self.config['banned_ips']
+ if f and self.banned_ip_mtime != os.path.getmtime(f):
+ self.banned_IPs = IP_Range_List()
+ try:
+ self.banned_IPs.read_rangelist(f)
+ self.banned_ip_mtime = os.path.getmtime(f)
+ except (IOError, OSError):
+ print '**warning** unable to read banned_IP list'
+
+
+ def delete_peer(self, infohash, peerid):
+ dls = self.downloads[infohash]
+ peer = dls[peerid]
+ if not peer['left']:
+ self.seedcount[infohash] -= 1
+ if not peer.get('nat',-1):
+ l = self.becache[infohash]
+ y = not peer['left']
+ for x in l:
+ del x[y][peerid]
+ del self.times[infohash][peerid]
+ del dls[peerid]
+
+ def expire_downloaders(self):
+ for x in self.times.keys():
+ for myid, t in self.times[x].items():
+ if t < self.prevtime:
+ self.delete_peer(x,myid)
+ self.prevtime = clock()
+ if (self.keep_dead != 1):
+ for key, value in self.downloads.items():
+ if len(value) == 0 and (
+ self.allowed is None or not self.allowed.has_key(key) ):
+ del self.times[key]
+ del self.downloads[key]
+ del self.seedcount[key]
+ self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
+
+
+def track(args):
+ if len(args) == 0:
+ print formatDefinitions(defaults, 80)
+ return
+ try:
+ config, files = parseargs(args, defaults, 0, 0)
+ except ValueError, e:
+ print 'error: ' + str(e)
+ print 'run with no arguments for parameter explanations'
+ return
+ r = RawServer(Event(), config['timeout_check_interval'],
+ config['socket_timeout'], ipv6_enable = config['ipv6_enabled'])
+ t = Tracker(config, r)
+ r.bind(config['port'], config['bind'],
+ reuse = True, ipv6_socket_style = config['ipv6_binds_v4'])
+ r.listen_forever(HTTPHandler(t.get, config['min_time_between_log_flushes']))
+ t.save_state()
+ print '# Shutting down: ' + isotime()
+
+def size_format(s):
+ if (s < 1024):
+ r = str(s) + 'B'
+ elif (s < 1048576):
+ r = str(int(s/1024)) + 'KiB'
+ elif (s < 1073741824L):
+ r = str(int(s/1048576)) + 'MiB'
+ elif (s < 1099511627776L):
+ r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
+ else:
+ r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
+ return(r)
+
diff --git a/BitTornado/ConfigDir.py b/BitTornado/ConfigDir.py
new file mode 100644
index 000000000..ecd8f581b
--- /dev/null
+++ b/BitTornado/ConfigDir.py
@@ -0,0 +1,385 @@
+#written by John Hoffman
+
+from inifile import ini_write, ini_read
+from bencode import bencode, bdecode
+from types import IntType, LongType, StringType, FloatType
+from CreateIcons import GetIcons, CreateIcon
+from parseargs import defaultargs
+from __init__ import product_name, version_short
+import sys,os
+from time import time, strftime
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+try:
+ realpath = os.path.realpath
+except:
+ realpath = lambda x:x
+OLDICONPATH = os.path.abspath(os.path.dirname(realpath(sys.argv[0])))
+
+DIRNAME = '.'+product_name
+
+hexchars = '0123456789abcdef'
+hexmap = []
+revmap = {}
+for i in xrange(256):
+ x = hexchars[(i&0xF0)/16]+hexchars[i&0x0F]
+ hexmap.append(x)
+ revmap[x] = chr(i)
+
+def tohex(s):
+ r = []
+ for c in s:
+ r.append(hexmap[ord(c)])
+ return ''.join(r)
+
+def unhex(s):
+ r = [ revmap[s[x:x+2]] for x in xrange(0, len(s), 2) ]
+ return ''.join(r)
+
+def copyfile(oldpath, newpath): # simple file copy, all in RAM
+ try:
+ f = open(oldpath,'rb')
+ r = f.read()
+ success = True
+ except:
+ success = False
+ try:
+ f.close()
+ except:
+ pass
+ if not success:
+ return False
+ try:
+ f = open(newpath,'wb')
+ f.write(r)
+ except:
+ success = False
+ try:
+ f.close()
+ except:
+ pass
+ return success
+
+
+class ConfigDir:
+
+ ###### INITIALIZATION TASKS ######
+
+ def __init__(self, dir_root):
+ """
+ Modified by ACR, for Armory-specific download
+ """
+ config_ext = '.armorydl'
+
+ self.dir_root = dir_root
+
+ if not os.path.isdir(self.dir_root):
+ os.mkdir(self.dir_root, 0700) # exception if failed
+
+ self.dir_icons = os.path.join(dir_root,'icons')
+ if not os.path.isdir(self.dir_icons):
+ os.mkdir(self.dir_icons)
+
+ for icon in GetIcons():
+ i = os.path.join(self.dir_icons,icon)
+ if not os.path.exists(i):
+ if not copyfile(os.path.join(OLDICONPATH,icon),i):
+ CreateIcon(icon,self.dir_icons)
+
+ self.dir_torrentcache = os.path.join(dir_root,'torrentcache')
+ if not os.path.isdir(self.dir_torrentcache):
+ os.mkdir(self.dir_torrentcache)
+
+ self.dir_datacache = os.path.join(dir_root,'datacache')
+ if not os.path.isdir(self.dir_datacache):
+ os.mkdir(self.dir_datacache)
+
+ self.dir_piececache = os.path.join(dir_root,'piececache')
+ if not os.path.isdir(self.dir_piececache):
+ os.mkdir(self.dir_piececache)
+
+ self.configfile = os.path.join(dir_root,'config'+config_ext+'.ini')
+ self.statefile = os.path.join(dir_root,'state'+config_ext)
+
+ self.TorrentDataBuffer = {}
+
+
+ ###### CONFIG HANDLING ######
+
+ def setDefaults(self, defaults, ignore=[]):
+ self.config = defaultargs(defaults)
+ for k in ignore:
+ if self.config.has_key(k):
+ del self.config[k]
+
+ def checkConfig(self):
+ return os.path.exists(self.configfile)
+
+ def loadConfig(self):
+ try:
+ r = ini_read(self.configfile)['']
+ except:
+ return self.config
+ l = self.config.keys()
+ for k,v in r.items():
+ if self.config.has_key(k):
+ t = type(self.config[k])
+ try:
+ if t == StringType:
+ self.config[k] = v
+ elif t == IntType or t == LongType:
+ self.config[k] = long(v)
+ elif t == FloatType:
+ self.config[k] = float(v)
+ l.remove(k)
+ except:
+ pass
+ if l: # new default values since last save
+ self.saveConfig()
+ return self.config
+
+ def saveConfig(self, new_config = None):
+ if new_config:
+ for k,v in new_config.items():
+ if self.config.has_key(k):
+ self.config[k] = v
+ try:
+ ini_write( self.configfile, self.config,
+ 'Generated by '+product_name+'/'+version_short+'\n'
+ + strftime('%x %X') )
+ return True
+ except:
+ return False
+
+ def getConfig(self):
+ return self.config
+
+
+ ###### STATE HANDLING ######
+
+ def getState(self):
+ try:
+ f = open(self.statefile,'rb')
+ r = f.read()
+ except:
+ r = None
+ try:
+ f.close()
+ except:
+ pass
+ try:
+ r = bdecode(r)
+ except:
+ r = None
+ return r
+
+ def saveState(self, state):
+ try:
+ f = open(self.statefile,'wb')
+ f.write(bencode(state))
+ success = True
+ except:
+ success = False
+ try:
+ f.close()
+ except:
+ pass
+ return success
+
+
+ ###### TORRENT HANDLING ######
+
+ def getTorrents(self):
+ d = {}
+ for f in os.listdir(self.dir_torrentcache):
+ f = os.path.basename(f)
+ try:
+ f, garbage = f.split('.')
+ except:
+ pass
+ d[unhex(f)] = 1
+ return d.keys()
+
+ def getTorrentVariations(self, t):
+ t = tohex(t)
+ d = []
+ for f in os.listdir(self.dir_torrentcache):
+ f = os.path.basename(f)
+ if f[:len(t)] == t:
+ try:
+ garbage, ver = f.split('.')
+ except:
+ ver = '0'
+ d.append(int(ver))
+ d.sort()
+ return d
+
+ def getTorrent(self, t, v = -1):
+ t = tohex(t)
+ if v == -1:
+ v = max(self.getTorrentVariations(t)) # potential exception
+ if v:
+ t += '.'+str(v)
+ try:
+ f = open(os.path.join(self.dir_torrentcache,t),'rb')
+ r = bdecode(f.read())
+ except:
+ r = None
+ try:
+ f.close()
+ except:
+ pass
+ return r
+
+ def writeTorrent(self, data, t, v = -1):
+ t = tohex(t)
+ if v == -1:
+ try:
+ v = max(self.getTorrentVariations(t))+1
+ except:
+ v = 0
+ if v:
+ t += '.'+str(v)
+ try:
+ f = open(os.path.join(self.dir_torrentcache,t),'wb')
+ f.write(bencode(data))
+ except:
+ v = None
+ try:
+ f.close()
+ except:
+ pass
+ return v
+
+
+ ###### TORRENT DATA HANDLING ######
+
+ def getTorrentData(self, t):
+ if self.TorrentDataBuffer.has_key(t):
+ return self.TorrentDataBuffer[t]
+ t = os.path.join(self.dir_datacache,tohex(t))
+ if not os.path.exists(t):
+ return None
+ try:
+ f = open(t,'rb')
+ r = bdecode(f.read())
+ except:
+ r = None
+ try:
+ f.close()
+ except:
+ pass
+ self.TorrentDataBuffer[t] = r
+ return r
+
+ def writeTorrentData(self, t, data):
+ self.TorrentDataBuffer[t] = data
+ try:
+ f = open(os.path.join(self.dir_datacache,tohex(t)),'wb')
+ f.write(bencode(data))
+ success = True
+ except:
+ success = False
+ try:
+ f.close()
+ except:
+ pass
+ if not success:
+ self.deleteTorrentData(t)
+ return success
+
+ def deleteTorrentData(self, t):
+ try:
+ os.remove(os.path.join(self.dir_datacache,tohex(t)))
+ except:
+ pass
+
+ def getPieceDir(self, t):
+ return os.path.join(self.dir_piececache,tohex(t))
+
+
+ ###### EXPIRATION HANDLING ######
+
+ def deleteOldCacheData(self, days, still_active = [], delete_torrents = False):
+ if not days:
+ return
+ exptime = time() - (days*24*3600)
+ names = {}
+ times = {}
+
+ for f in os.listdir(self.dir_torrentcache):
+ p = os.path.join(self.dir_torrentcache,f)
+ f = os.path.basename(f)
+ try:
+ f, garbage = f.split('.')
+ except:
+ pass
+ try:
+ f = unhex(f)
+ assert len(f) == 20
+ except:
+ continue
+ if delete_torrents:
+ names.setdefault(f,[]).append(p)
+ try:
+ t = os.path.getmtime(p)
+ except:
+ t = time()
+ times.setdefault(f,[]).append(t)
+
+ for f in os.listdir(self.dir_datacache):
+ p = os.path.join(self.dir_datacache,f)
+ try:
+ f = unhex(os.path.basename(f))
+ assert len(f) == 20
+ except:
+ continue
+ names.setdefault(f,[]).append(p)
+ try:
+ t = os.path.getmtime(p)
+ except:
+ t = time()
+ times.setdefault(f,[]).append(t)
+
+ for f in os.listdir(self.dir_piececache):
+ p = os.path.join(self.dir_piececache,f)
+ try:
+ f = unhex(os.path.basename(f))
+ assert len(f) == 20
+ except:
+ continue
+ for f2 in os.listdir(p):
+ p2 = os.path.join(p,f2)
+ names.setdefault(f,[]).append(p2)
+ try:
+ t = os.path.getmtime(p2)
+ except:
+ t = time()
+ times.setdefault(f,[]).append(t)
+ names.setdefault(f,[]).append(p)
+
+ for k,v in times.items():
+ if max(v) < exptime and not k in still_active:
+ for f in names[k]:
+ try:
+ os.remove(f)
+ except:
+ try:
+ os.removedirs(f)
+ except:
+ pass
+
+
+ def deleteOldTorrents(self, days, still_active = []):
+ self.deleteOldCacheData(days, still_active, True)
+
+
+ ###### OTHER ######
+
+ def getIconDir(self):
+ return self.dir_icons
diff --git a/BitTornado/ConfigReader.py b/BitTornado/ConfigReader.py
new file mode 100644
index 000000000..ae3f38f78
--- /dev/null
+++ b/BitTornado/ConfigReader.py
@@ -0,0 +1,1068 @@
+#written by John Hoffman
+
+from ConnChoice import *
+from wxPython.wx import *
+from types import IntType, FloatType, StringType
+from download_bt1 import defaults
+from ConfigDir import ConfigDir
+import sys,os
+import socket
+from parseargs import defaultargs
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+try:
+ wxFULL_REPAINT_ON_RESIZE
+except:
+ wxFULL_REPAINT_ON_RESIZE = 0 # fix for wx pre-2.5
+
+if (sys.platform == 'win32'):
+ _FONT = 9
+else:
+ _FONT = 10
+
+def HexToColor(s):
+ r,g,b = s.split(' ')
+ return wxColour(red=int(r,16), green=int(g,16), blue=int(b,16))
+
+def hex2(c):
+ h = hex(c)[2:]
+ if len(h) == 1:
+ h = '0'+h
+ return h
+def ColorToHex(c):
+ return hex2(c.Red()) + ' ' + hex2(c.Green()) + ' ' + hex2(c.Blue())
+
+ratesettingslist = []
+for x in connChoices:
+ if not x.has_key('super-seed'):
+ ratesettingslist.append(x['name'])
+
+
+configFileDefaults = [
+ #args only available for the gui client
+ ('win32_taskbar_icon', 1,
+ "whether to iconize do system try or not on win32"),
+ ('gui_stretchwindow', 0,
+ "whether to stretch the download status window to fit the torrent name"),
+ ('gui_displaystats', 1,
+ "whether to display statistics on peers and seeds"),
+ ('gui_displaymiscstats', 1,
+ "whether to display miscellaneous other statistics"),
+ ('gui_ratesettingsdefault', ratesettingslist[0],
+ "the default setting for maximum upload rate and users"),
+ ('gui_ratesettingsmode', 'full',
+ "what rate setting controls to display; options are 'none', 'basic', and 'full'"),
+ ('gui_forcegreenonfirewall', 0,
+ "forces the status icon to be green even if the client seems to be firewalled"),
+ ('gui_default_savedir', '',
+ "default save directory"),
+ ('last_saved', '', # hidden; not set in config
+ "where the last torrent was saved"),
+ ('gui_font', _FONT,
+ "the font size to use"),
+ ('gui_saveas_ask', -1,
+ "whether to ask where to download to (0 = never, 1 = always, -1 = automatic resume"),
+]
+
+def setwxconfigfiledefaults():
+ CHECKINGCOLOR = ColorToHex(wxSystemSettings_GetColour(wxSYS_COLOUR_3DSHADOW))
+ DOWNLOADCOLOR = ColorToHex(wxSystemSettings_GetColour(wxSYS_COLOUR_ACTIVECAPTION))
+
+ configFileDefaults.extend([
+ ('gui_checkingcolor', CHECKINGCOLOR,
+ "progress bar checking color"),
+ ('gui_downloadcolor', DOWNLOADCOLOR,
+ "progress bar downloading color"),
+ ('gui_seedingcolor', '00 FF 00',
+ "progress bar seeding color"),
+ ])
+
+defaultsToIgnore = ['responsefile', 'url', 'priority']
+
+
+class configReader:
+
+ def __init__(self):
+ self.configfile = wxConfig("BitTorrent",style=wxCONFIG_USE_LOCAL_FILE)
+ self.configMenuBox = None
+ self.advancedMenuBox = None
+ self._configReset = True # run reset for the first time
+
+ setwxconfigfiledefaults()
+
+ defaults.extend(configFileDefaults)
+ self.defaults = defaultargs(defaults)
+
+ self.configDir = ConfigDir('gui')
+ self.configDir.setDefaults(defaults,defaultsToIgnore)
+ if self.configDir.checkConfig():
+ self.config = self.configDir.loadConfig()
+ else:
+ self.config = self.configDir.getConfig()
+ self.importOldGUIConfig()
+ self.configDir.saveConfig()
+
+ updated = False # make all config default changes here
+
+ if self.config['gui_ratesettingsdefault'] not in ratesettingslist:
+ self.config['gui_ratesettingsdefault'] = (
+ self.defaults['gui_ratesettingsdefault'] )
+ updated = True
+ if self.config['ipv6_enabled'] and (
+ sys.version_info < (2,3) or not socket.has_ipv6 ):
+ self.config['ipv6_enabled'] = 0
+ updated = True
+ for c in ['gui_checkingcolor','gui_downloadcolor','gui_seedingcolor']:
+ try:
+ HexToColor(self.config[c])
+ except:
+ self.config[c] = self.defaults[c]
+ updated = True
+
+ if updated:
+ self.configDir.saveConfig()
+
+ self.configDir.deleteOldCacheData(self.config['expire_cache_data'])
+
+
+ def importOldGUIConfig(self):
+ oldconfig = wxConfig("BitTorrent",style=wxCONFIG_USE_LOCAL_FILE)
+ cont, s, i = oldconfig.GetFirstEntry()
+ if not cont:
+ oldconfig.DeleteAll()
+ return False
+ while cont: # import old config data
+ if self.config.has_key(s):
+ t = oldconfig.GetEntryType(s)
+ try:
+ if t == 1:
+ assert type(self.config[s]) == type('')
+ self.config[s] = oldconfig.Read(s)
+ elif t == 2 or t == 3:
+ assert type(self.config[s]) == type(1)
+ self.config[s] = int(oldconfig.ReadInt(s))
+ elif t == 4:
+ assert type(self.config[s]) == type(1.0)
+ self.config[s] = oldconfig.ReadFloat(s)
+ except:
+ pass
+ cont, s, i = oldconfig.GetNextEntry(i)
+
+# oldconfig.DeleteAll()
+ return True
+
+
+ def resetConfigDefaults(self):
+ for p,v in self.defaults.items():
+ if not p in defaultsToIgnore:
+ self.config[p] = v
+ self.configDir.saveConfig()
+
+ def writeConfigFile(self):
+ self.configDir.saveConfig()
+
+ def WriteLastSaved(self, l):
+ self.config['last_saved'] = l
+ self.configDir.saveConfig()
+
+
+ def getcheckingcolor(self):
+ return HexToColor(self.config['gui_checkingcolor'])
+ def getdownloadcolor(self):
+ return HexToColor(self.config['gui_downloadcolor'])
+ def getseedingcolor(self):
+ return HexToColor(self.config['gui_seedingcolor'])
+
+ def configReset(self):
+ r = self._configReset
+ self._configReset = False
+ return r
+
+ def getConfigDir(self):
+ return self.configDir
+
+ def getIconDir(self):
+ return self.configDir.getIconDir()
+
+ def getTorrentData(self,t):
+ return self.configDir.getTorrentData(t)
+
+ def setColorIcon(self, xxicon, xxiconptr, xxcolor):
+ idata = wxMemoryDC()
+ idata.SelectObject(xxicon)
+ idata.SetBrush(wxBrush(xxcolor,wxSOLID))
+ idata.DrawRectangle(0,0,16,16)
+ idata.SelectObject(wxNullBitmap)
+ xxiconptr.Refresh()
+
+
+ def getColorFromUser(self, parent, colInit):
+ data = wxColourData()
+ if colInit.Ok():
+ data.SetColour(colInit)
+ data.SetCustomColour(0, self.checkingcolor)
+ data.SetCustomColour(1, self.downloadcolor)
+ data.SetCustomColour(2, self.seedingcolor)
+ dlg = wxColourDialog(parent,data)
+ if not dlg.ShowModal():
+ return colInit
+ return dlg.GetColourData().GetColour()
+
+
+ def configMenu(self, parent):
+ self.parent = parent
+ try:
+ self.FONT = self.config['gui_font']
+ self.default_font = wxFont(self.FONT, wxDEFAULT, wxNORMAL, wxNORMAL, False)
+ self.checkingcolor = HexToColor(self.config['gui_checkingcolor'])
+ self.downloadcolor = HexToColor(self.config['gui_downloadcolor'])
+ self.seedingcolor = HexToColor(self.config['gui_seedingcolor'])
+
+ if (self.configMenuBox is not None):
+ try:
+ self.configMenuBox.Close()
+ except wxPyDeadObjectError, e:
+ self.configMenuBox = None
+
+ self.configMenuBox = wxFrame(None, -1, 'BitTorrent Preferences', size = (1,1),
+ style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+ if (sys.platform == 'win32'):
+ self.icon = self.parent.icon
+ self.configMenuBox.SetIcon(self.icon)
+
+ panel = wxPanel(self.configMenuBox, -1)
+ self.panel = panel
+
+ def StaticText(text, font = self.FONT, underline = False, color = None, panel = panel):
+ x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+ x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+ if color is not None:
+ x.SetForegroundColour(color)
+ return x
+
+ colsizer = wxFlexGridSizer(cols = 1, vgap = 8)
+
+ self.gui_stretchwindow_checkbox = wxCheckBox(panel, -1, "Stretch window to fit torrent name *")
+ self.gui_stretchwindow_checkbox.SetFont(self.default_font)
+ self.gui_stretchwindow_checkbox.SetValue(self.config['gui_stretchwindow'])
+
+ self.gui_displaystats_checkbox = wxCheckBox(panel, -1, "Display peer and seed statistics")
+ self.gui_displaystats_checkbox.SetFont(self.default_font)
+ self.gui_displaystats_checkbox.SetValue(self.config['gui_displaystats'])
+
+ self.gui_displaymiscstats_checkbox = wxCheckBox(panel, -1, "Display miscellaneous other statistics")
+ self.gui_displaymiscstats_checkbox.SetFont(self.default_font)
+ self.gui_displaymiscstats_checkbox.SetValue(self.config['gui_displaymiscstats'])
+
+ self.security_checkbox = wxCheckBox(panel, -1, "Don't allow multiple connections from the same IP")
+ self.security_checkbox.SetFont(self.default_font)
+ self.security_checkbox.SetValue(self.config['security'])
+
+ self.autokick_checkbox = wxCheckBox(panel, -1, "Kick/ban clients that send you bad data *")
+ self.autokick_checkbox.SetFont(self.default_font)
+ self.autokick_checkbox.SetValue(self.config['auto_kick'])
+
+ self.buffering_checkbox = wxCheckBox(panel, -1, "Enable read/write buffering *")
+ self.buffering_checkbox.SetFont(self.default_font)
+ self.buffering_checkbox.SetValue(self.config['buffer_reads'])
+
+ self.breakup_checkbox = wxCheckBox(panel, -1, "Break-up seed bitfield to foil ISP manipulation")
+ self.breakup_checkbox.SetFont(self.default_font)
+ self.breakup_checkbox.SetValue(self.config['breakup_seed_bitfield'])
+
+ self.autoflush_checkbox = wxCheckBox(panel, -1, "Flush data to disk every 5 minutes")
+ self.autoflush_checkbox.SetFont(self.default_font)
+ self.autoflush_checkbox.SetValue(self.config['auto_flush'])
+
+ if sys.version_info >= (2,3) and socket.has_ipv6:
+ self.ipv6enabled_checkbox = wxCheckBox(panel, -1, "Initiate and receive connections via IPv6 *")
+ self.ipv6enabled_checkbox.SetFont(self.default_font)
+ self.ipv6enabled_checkbox.SetValue(self.config['ipv6_enabled'])
+
+ self.gui_forcegreenonfirewall_checkbox = wxCheckBox(panel, -1,
+ "Force icon to display green when firewalled")
+ self.gui_forcegreenonfirewall_checkbox.SetFont(self.default_font)
+ self.gui_forcegreenonfirewall_checkbox.SetValue(self.config['gui_forcegreenonfirewall'])
+
+
+ self.minport_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*8, -1))
+ self.minport_data.SetFont(self.default_font)
+ self.minport_data.SetRange(1,65535)
+ self.minport_data.SetValue(self.config['minport'])
+
+ self.maxport_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*8, -1))
+ self.maxport_data.SetFont(self.default_font)
+ self.maxport_data.SetRange(1,65535)
+ self.maxport_data.SetValue(self.config['maxport'])
+
+ self.randomport_checkbox = wxCheckBox(panel, -1, "randomize")
+ self.randomport_checkbox.SetFont(self.default_font)
+ self.randomport_checkbox.SetValue(self.config['random_port'])
+
+ self.gui_font_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*5, -1))
+ self.gui_font_data.SetFont(self.default_font)
+ self.gui_font_data.SetRange(8,16)
+ self.gui_font_data.SetValue(self.config['gui_font'])
+
+ self.gui_ratesettingsdefault_data=wxChoice(panel, -1, choices = ratesettingslist)
+ self.gui_ratesettingsdefault_data.SetFont(self.default_font)
+ self.gui_ratesettingsdefault_data.SetStringSelection(self.config['gui_ratesettingsdefault'])
+
+ self.maxdownload_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*7, -1))
+ self.maxdownload_data.SetFont(self.default_font)
+ self.maxdownload_data.SetRange(0,5000)
+ self.maxdownload_data.SetValue(self.config['max_download_rate'])
+
+ self.gui_ratesettingsmode_data=wxRadioBox(panel, -1, 'Rate Settings Mode',
+ choices = [ 'none', 'basic', 'full' ] )
+ self.gui_ratesettingsmode_data.SetFont(self.default_font)
+ self.gui_ratesettingsmode_data.SetStringSelection(self.config['gui_ratesettingsmode'])
+
+ if (sys.platform == 'win32'):
+ self.win32_taskbar_icon_checkbox = wxCheckBox(panel, -1, "Minimize to system tray")
+ self.win32_taskbar_icon_checkbox.SetFont(self.default_font)
+ self.win32_taskbar_icon_checkbox.SetValue(self.config['win32_taskbar_icon'])
+
+# self.upnp_checkbox = wxCheckBox(panel, -1, "Enable automatic UPnP port forwarding")
+# self.upnp_checkbox.SetFont(self.default_font)
+# self.upnp_checkbox.SetValue(self.config['upnp_nat_access'])
+ self.upnp_data=wxChoice(panel, -1,
+ choices = ['disabled', 'type 1 (fast)', 'type 2 (slow)'])
+ self.upnp_data.SetFont(self.default_font)
+ self.upnp_data.SetSelection(self.config['upnp_nat_access'])
+
+ self.gui_default_savedir_ctrl = wxTextCtrl(parent = panel, id = -1,
+ value = self.config['gui_default_savedir'],
+ size = (26*self.FONT, -1), style = wxTE_PROCESS_TAB)
+ self.gui_default_savedir_ctrl.SetFont(self.default_font)
+
+ self.gui_savemode_data=wxRadioBox(panel, -1, 'Ask where to save: *',
+ choices = [ 'always', 'never', 'auto-resume' ] )
+ self.gui_savemode_data.SetFont(self.default_font)
+ self.gui_savemode_data.SetSelection(1-self.config['gui_saveas_ask'])
+
+ self.checkingcolor_icon = wxEmptyBitmap(16,16)
+ self.checkingcolor_iconptr = wxStaticBitmap(panel, -1, self.checkingcolor_icon)
+ self.setColorIcon(self.checkingcolor_icon, self.checkingcolor_iconptr, self.checkingcolor)
+
+ self.downloadcolor_icon = wxEmptyBitmap(16,16)
+ self.downloadcolor_iconptr = wxStaticBitmap(panel, -1, self.downloadcolor_icon)
+ self.setColorIcon(self.downloadcolor_icon, self.downloadcolor_iconptr, self.downloadcolor)
+
+ self.seedingcolor_icon = wxEmptyBitmap(16,16)
+ self.seedingcolor_iconptr = wxStaticBitmap(panel, -1, self.seedingcolor_icon)
+ self.setColorIcon(self.seedingcolor_icon, self.downloadcolor_iconptr, self.seedingcolor)
+
+ rowsizer = wxFlexGridSizer(cols = 2, hgap = 20)
+
+ block12sizer = wxFlexGridSizer(cols = 1, vgap = 7)
+
+ block1sizer = wxFlexGridSizer(cols = 1, vgap = 2)
+ if (sys.platform == 'win32'):
+ block1sizer.Add(self.win32_taskbar_icon_checkbox)
+# block1sizer.Add(self.upnp_checkbox)
+ block1sizer.Add(self.gui_stretchwindow_checkbox)
+ block1sizer.Add(self.gui_displaystats_checkbox)
+ block1sizer.Add(self.gui_displaymiscstats_checkbox)
+ block1sizer.Add(self.security_checkbox)
+ block1sizer.Add(self.autokick_checkbox)
+ block1sizer.Add(self.buffering_checkbox)
+ block1sizer.Add(self.breakup_checkbox)
+ block1sizer.Add(self.autoflush_checkbox)
+ if sys.version_info >= (2,3) and socket.has_ipv6:
+ block1sizer.Add(self.ipv6enabled_checkbox)
+ block1sizer.Add(self.gui_forcegreenonfirewall_checkbox)
+
+ block12sizer.Add(block1sizer)
+
+ colorsizer = wxStaticBoxSizer(wxStaticBox(panel, -1, "Gauge Colors:"), wxVERTICAL)
+ colorsizer1 = wxFlexGridSizer(cols = 7)
+ colorsizer1.Add(StaticText(' Checking: '), 1, wxALIGN_BOTTOM)
+ colorsizer1.Add(self.checkingcolor_iconptr, 1, wxALIGN_BOTTOM)
+ colorsizer1.Add(StaticText(' Downloading: '), 1, wxALIGN_BOTTOM)
+ colorsizer1.Add(self.downloadcolor_iconptr, 1, wxALIGN_BOTTOM)
+ colorsizer1.Add(StaticText(' Seeding: '), 1, wxALIGN_BOTTOM)
+ colorsizer1.Add(self.seedingcolor_iconptr, 1, wxALIGN_BOTTOM)
+ colorsizer1.Add(StaticText(' '))
+ minsize = self.checkingcolor_iconptr.GetBestSize()
+ minsize.SetHeight(minsize.GetHeight()+5)
+ colorsizer1.SetMinSize(minsize)
+ colorsizer.Add(colorsizer1)
+
+ block12sizer.Add(colorsizer, 1, wxALIGN_LEFT)
+
+ rowsizer.Add(block12sizer)
+
+ block3sizer = wxFlexGridSizer(cols = 1)
+
+ portsettingsSizer = wxStaticBoxSizer(wxStaticBox(panel, -1, "Port Range:*"), wxVERTICAL)
+ portsettingsSizer1 = wxGridSizer(cols = 2, vgap = 1)
+ portsettingsSizer1.Add(StaticText('From: '), 1, wxALIGN_CENTER_VERTICAL|wxALIGN_RIGHT)
+ portsettingsSizer1.Add(self.minport_data, 1, wxALIGN_BOTTOM)
+ portsettingsSizer1.Add(StaticText('To: '), 1, wxALIGN_CENTER_VERTICAL|wxALIGN_RIGHT)
+ portsettingsSizer1.Add(self.maxport_data, 1, wxALIGN_BOTTOM)
+ portsettingsSizer.Add(portsettingsSizer1)
+ portsettingsSizer.Add(self.randomport_checkbox, 1, wxALIGN_CENTER)
+ block3sizer.Add(portsettingsSizer, 1, wxALIGN_CENTER)
+ block3sizer.Add(StaticText(' '))
+ block3sizer.Add(self.gui_ratesettingsmode_data, 1, wxALIGN_CENTER)
+ block3sizer.Add(StaticText(' '))
+ ratesettingsSizer = wxFlexGridSizer(cols = 1, vgap = 2)
+ ratesettingsSizer.Add(StaticText('Default Rate Setting: *'), 1, wxALIGN_CENTER)
+ ratesettingsSizer.Add(self.gui_ratesettingsdefault_data, 1, wxALIGN_CENTER)
+ block3sizer.Add(ratesettingsSizer, 1, wxALIGN_CENTER)
+ if (sys.platform == 'win32'):
+ block3sizer.Add(StaticText(' '))
+ upnpSizer = wxFlexGridSizer(cols = 1, vgap = 2)
+ upnpSizer.Add(StaticText('UPnP Port Forwarding: *'), 1, wxALIGN_CENTER)
+ upnpSizer.Add(self.upnp_data, 1, wxALIGN_CENTER)
+ block3sizer.Add(upnpSizer, 1, wxALIGN_CENTER)
+
+ rowsizer.Add(block3sizer)
+ colsizer.Add(rowsizer)
+
+ block4sizer = wxFlexGridSizer(cols = 3, hgap = 15)
+ savepathsizer = wxFlexGridSizer(cols = 2, vgap = 1)
+ savepathsizer.Add(StaticText('Default Save Path: *'))
+ savepathsizer.Add(StaticText(' '))
+ savepathsizer.Add(self.gui_default_savedir_ctrl, 1, wxEXPAND)
+ savepathButton = wxButton(panel, -1, '...', size = (18,18))
+# savepathButton.SetFont(self.default_font)
+ savepathsizer.Add(savepathButton, 0, wxALIGN_CENTER)
+ savepathsizer.Add(self.gui_savemode_data, 0, wxALIGN_CENTER)
+ block4sizer.Add(savepathsizer, -1, wxALIGN_BOTTOM)
+
+ fontsizer = wxFlexGridSizer(cols = 1, vgap = 2)
+ fontsizer.Add(StaticText(''))
+ fontsizer.Add(StaticText('Font: *'), 1, wxALIGN_CENTER)
+ fontsizer.Add(self.gui_font_data, 1, wxALIGN_CENTER)
+ block4sizer.Add(fontsizer, 1, wxALIGN_CENTER_VERTICAL)
+
+ dratesettingsSizer = wxFlexGridSizer(cols = 1, vgap = 2)
+ dratesettingsSizer.Add(StaticText('Default Max'), 1, wxALIGN_CENTER)
+ dratesettingsSizer.Add(StaticText('Download Rate'), 1, wxALIGN_CENTER)
+ dratesettingsSizer.Add(StaticText('(kB/s): *'), 1, wxALIGN_CENTER)
+ dratesettingsSizer.Add(self.maxdownload_data, 1, wxALIGN_CENTER)
+ dratesettingsSizer.Add(StaticText('(0 = disabled)'), 1, wxALIGN_CENTER)
+
+ block4sizer.Add(dratesettingsSizer, 1, wxALIGN_CENTER_VERTICAL)
+
+ colsizer.Add(block4sizer, 0, wxALIGN_CENTER)
+# colsizer.Add(StaticText(' '))
+
+ savesizer = wxGridSizer(cols = 4, hgap = 10)
+ saveButton = wxButton(panel, -1, 'Save')
+# saveButton.SetFont(self.default_font)
+ savesizer.Add(saveButton, 0, wxALIGN_CENTER)
+
+ cancelButton = wxButton(panel, -1, 'Cancel')
+# cancelButton.SetFont(self.default_font)
+ savesizer.Add(cancelButton, 0, wxALIGN_CENTER)
+
+ defaultsButton = wxButton(panel, -1, 'Revert to Defaults')
+# defaultsButton.SetFont(self.default_font)
+ savesizer.Add(defaultsButton, 0, wxALIGN_CENTER)
+
+ advancedButton = wxButton(panel, -1, 'Advanced...')
+# advancedButton.SetFont(self.default_font)
+ savesizer.Add(advancedButton, 0, wxALIGN_CENTER)
+ colsizer.Add(savesizer, 1, wxALIGN_CENTER)
+
+ resizewarningtext=StaticText('* These settings will not take effect until the next time you start BitTorrent', self.FONT-2)
+ colsizer.Add(resizewarningtext, 1, wxALIGN_CENTER)
+
+ border = wxBoxSizer(wxHORIZONTAL)
+ border.Add(colsizer, 1, wxEXPAND | wxALL, 4)
+
+ panel.SetSizer(border)
+ panel.SetAutoLayout(True)
+
+ self.advancedConfig = {}
+
+ def setDefaults(evt, self = self):
+ try:
+ self.minport_data.SetValue(self.defaults['minport'])
+ self.maxport_data.SetValue(self.defaults['maxport'])
+ self.randomport_checkbox.SetValue(self.defaults['random_port'])
+ self.gui_stretchwindow_checkbox.SetValue(self.defaults['gui_stretchwindow'])
+ self.gui_displaystats_checkbox.SetValue(self.defaults['gui_displaystats'])
+ self.gui_displaymiscstats_checkbox.SetValue(self.defaults['gui_displaymiscstats'])
+ self.security_checkbox.SetValue(self.defaults['security'])
+ self.autokick_checkbox.SetValue(self.defaults['auto_kick'])
+ self.buffering_checkbox.SetValue(self.defaults['buffer_reads'])
+ self.breakup_checkbox.SetValue(self.defaults['breakup_seed_bitfield'])
+ self.autoflush_checkbox.SetValue(self.defaults['auto_flush'])
+ if sys.version_info >= (2,3) and socket.has_ipv6:
+ self.ipv6enabled_checkbox.SetValue(self.defaults['ipv6_enabled'])
+ self.gui_forcegreenonfirewall_checkbox.SetValue(self.defaults['gui_forcegreenonfirewall'])
+ self.gui_font_data.SetValue(self.defaults['gui_font'])
+ self.gui_ratesettingsdefault_data.SetStringSelection(self.defaults['gui_ratesettingsdefault'])
+ self.maxdownload_data.SetValue(self.defaults['max_download_rate'])
+ self.gui_ratesettingsmode_data.SetStringSelection(self.defaults['gui_ratesettingsmode'])
+ self.gui_default_savedir_ctrl.SetValue(self.defaults['gui_default_savedir'])
+ self.gui_savemode_data.SetSelection(1-self.defaults['gui_saveas_ask'])
+
+ self.checkingcolor = HexToColor(self.defaults['gui_checkingcolor'])
+ self.setColorIcon(self.checkingcolor_icon, self.checkingcolor_iconptr, self.checkingcolor)
+ self.downloadcolor = HexToColor(self.defaults['gui_downloadcolor'])
+ self.setColorIcon(self.downloadcolor_icon, self.downloadcolor_iconptr, self.downloadcolor)
+ self.seedingcolor = HexToColor(self.defaults['gui_seedingcolor'])
+ self.setColorIcon(self.seedingcolor_icon, self.seedingcolor_iconptr, self.seedingcolor)
+
+ if (sys.platform == 'win32'):
+ self.win32_taskbar_icon_checkbox.SetValue(self.defaults['win32_taskbar_icon'])
+# self.upnp_checkbox.SetValue(self.defaults['upnp_nat_access'])
+ self.upnp_data.SetSelection(self.defaults['upnp_nat_access'])
+
+ # reset advanced too
+ self.advancedConfig = {}
+ for key in ['ip', 'bind', 'min_peers', 'max_initiate', 'display_interval',
+ 'alloc_type', 'alloc_rate', 'max_files_open', 'max_connections', 'super_seeder',
+ 'ipv6_binds_v4', 'double_check', 'triple_check', 'lock_files', 'lock_while_reading',
+ 'expire_cache_data']:
+ self.advancedConfig[key] = self.defaults[key]
+ self.CloseAdvanced()
+ except:
+ self.parent.exception()
+
+
+ def saveConfigs(evt, self = self):
+ try:
+ self.config['gui_stretchwindow']=int(self.gui_stretchwindow_checkbox.GetValue())
+ self.config['gui_displaystats']=int(self.gui_displaystats_checkbox.GetValue())
+ self.config['gui_displaymiscstats']=int(self.gui_displaymiscstats_checkbox.GetValue())
+ self.config['security']=int(self.security_checkbox.GetValue())
+ self.config['auto_kick']=int(self.autokick_checkbox.GetValue())
+ buffering=int(self.buffering_checkbox.GetValue())
+ self.config['buffer_reads']=buffering
+ if buffering:
+ self.config['write_buffer_size']=self.defaults['write_buffer_size']
+ else:
+ self.config['write_buffer_size']=0
+ self.config['breakup_seed_bitfield']=int(self.breakup_checkbox.GetValue())
+ if self.autoflush_checkbox.GetValue():
+ self.config['auto_flush']=5
+ else:
+ self.config['auto_flush']=0
+ if sys.version_info >= (2,3) and socket.has_ipv6:
+ self.config['ipv6_enabled']=int(self.ipv6enabled_checkbox.GetValue())
+ self.config['gui_forcegreenonfirewall']=int(self.gui_forcegreenonfirewall_checkbox.GetValue())
+ self.config['minport']=self.minport_data.GetValue()
+ self.config['maxport']=self.maxport_data.GetValue()
+ self.config['random_port']=int(self.randomport_checkbox.GetValue())
+ self.config['gui_font']=self.gui_font_data.GetValue()
+ self.config['gui_ratesettingsdefault']=self.gui_ratesettingsdefault_data.GetStringSelection()
+ self.config['max_download_rate']=self.maxdownload_data.GetValue()
+ self.config['gui_ratesettingsmode']=self.gui_ratesettingsmode_data.GetStringSelection()
+ self.config['gui_default_savedir']=self.gui_default_savedir_ctrl.GetValue()
+ self.config['gui_saveas_ask']=1-self.gui_savemode_data.GetSelection()
+ self.config['gui_checkingcolor']=ColorToHex(self.checkingcolor)
+ self.config['gui_downloadcolor']=ColorToHex(self.downloadcolor)
+ self.config['gui_seedingcolor']=ColorToHex(self.seedingcolor)
+
+ if (sys.platform == 'win32'):
+ self.config['win32_taskbar_icon']=int(self.win32_taskbar_icon_checkbox.GetValue())
+# self.config['upnp_nat_access']=int(self.upnp_checkbox.GetValue())
+ self.config['upnp_nat_access']=self.upnp_data.GetSelection()
+
+ if self.advancedConfig:
+ for key,val in self.advancedConfig.items():
+ self.config[key] = val
+
+ self.writeConfigFile()
+ self._configReset = True
+ self.Close()
+ except:
+ self.parent.exception()
+
+ def cancelConfigs(evt, self = self):
+ self.Close()
+
+ def savepath_set(evt, self = self):
+ try:
+ d = self.gui_default_savedir_ctrl.GetValue()
+ if d == '':
+ d = self.config['last_saved']
+ dl = wxDirDialog(self.panel, 'Choose a default directory to save to',
+ d, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
+ if dl.ShowModal() == wxID_OK:
+ self.gui_default_savedir_ctrl.SetValue(dl.GetPath())
+ except:
+ self.parent.exception()
+
+ def checkingcoloricon_set(evt, self = self):
+ try:
+ newcolor = self.getColorFromUser(self.panel,self.checkingcolor)
+ self.setColorIcon(self.checkingcolor_icon, self.checkingcolor_iconptr, newcolor)
+ self.checkingcolor = newcolor
+ except:
+ self.parent.exception()
+
+ def downloadcoloricon_set(evt, self = self):
+ try:
+ newcolor = self.getColorFromUser(self.panel,self.downloadcolor)
+ self.setColorIcon(self.downloadcolor_icon, self.downloadcolor_iconptr, newcolor)
+ self.downloadcolor = newcolor
+ except:
+ self.parent.exception()
+
+ def seedingcoloricon_set(evt, self = self):
+ try:
+ newcolor = self.getColorFromUser(self.panel,self.seedingcolor)
+ self.setColorIcon(self.seedingcolor_icon, self.seedingcolor_iconptr, newcolor)
+ self.seedingcolor = newcolor
+ except:
+ self.parent.exception()
+
+ EVT_BUTTON(self.configMenuBox, saveButton.GetId(), saveConfigs)
+ EVT_BUTTON(self.configMenuBox, cancelButton.GetId(), cancelConfigs)
+ EVT_BUTTON(self.configMenuBox, defaultsButton.GetId(), setDefaults)
+ EVT_BUTTON(self.configMenuBox, advancedButton.GetId(), self.advancedMenu)
+ EVT_BUTTON(self.configMenuBox, savepathButton.GetId(), savepath_set)
+ EVT_LEFT_DOWN(self.checkingcolor_iconptr, checkingcoloricon_set)
+ EVT_LEFT_DOWN(self.downloadcolor_iconptr, downloadcoloricon_set)
+ EVT_LEFT_DOWN(self.seedingcolor_iconptr, seedingcoloricon_set)
+
+ self.configMenuBox.Show ()
+ border.Fit(panel)
+ self.configMenuBox.Fit()
+ except:
+ self.parent.exception()
+
+
+ def Close(self):
+ self.CloseAdvanced()
+ if self.configMenuBox is not None:
+ try:
+ self.configMenuBox.Close ()
+ except wxPyDeadObjectError, e:
+ pass
+ self.configMenuBox = None
+
+ def advancedMenu(self, event = None):
+ try:
+ if not self.advancedConfig:
+ for key in ['ip', 'bind', 'min_peers', 'max_initiate', 'display_interval',
+ 'alloc_type', 'alloc_rate', 'max_files_open', 'max_connections', 'super_seeder',
+ 'ipv6_binds_v4', 'double_check', 'triple_check', 'lock_files', 'lock_while_reading',
+ 'expire_cache_data']:
+ self.advancedConfig[key] = self.config[key]
+
+ if (self.advancedMenuBox is not None):
+ try:
+ self.advancedMenuBox.Close ()
+ except wxPyDeadObjectError, e:
+ self.advancedMenuBox = None
+
+ self.advancedMenuBox = wxFrame(None, -1, 'BitTorrent Advanced Preferences', size = (1,1),
+ style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+ if (sys.platform == 'win32'):
+ self.advancedMenuBox.SetIcon(self.icon)
+
+ panel = wxPanel(self.advancedMenuBox, -1)
+# self.panel = panel
+
+ def StaticText(text, font = self.FONT, underline = False, color = None, panel = panel):
+ x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+ x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+ if color is not None:
+ x.SetForegroundColour(color)
+ return x
+
+ colsizer = wxFlexGridSizer(cols = 1, hgap = 13, vgap = 13)
+ warningtext = StaticText('CHANGE THESE SETTINGS AT YOUR OWN RISK', self.FONT+4, True, 'Red')
+ colsizer.Add(warningtext, 1, wxALIGN_CENTER)
+
+ self.ip_data = wxTextCtrl(parent = panel, id = -1,
+ value = self.advancedConfig['ip'],
+ size = (self.FONT*13, int(self.FONT*2.2)), style = wxTE_PROCESS_TAB)
+ self.ip_data.SetFont(self.default_font)
+
+ self.bind_data = wxTextCtrl(parent = panel, id = -1,
+ value = self.advancedConfig['bind'],
+ size = (self.FONT*13, int(self.FONT*2.2)), style = wxTE_PROCESS_TAB)
+ self.bind_data.SetFont(self.default_font)
+
+ if sys.version_info >= (2,3) and socket.has_ipv6:
+ self.ipv6bindsv4_data=wxChoice(panel, -1,
+ choices = ['separate sockets', 'single socket'])
+ self.ipv6bindsv4_data.SetFont(self.default_font)
+ self.ipv6bindsv4_data.SetSelection(self.advancedConfig['ipv6_binds_v4'])
+
+ self.minpeers_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*7, -1))
+ self.minpeers_data.SetFont(self.default_font)
+ self.minpeers_data.SetRange(10,100)
+ self.minpeers_data.SetValue(self.advancedConfig['min_peers'])
+ # max_initiate = 2*minpeers
+
+ self.displayinterval_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*7, -1))
+ self.displayinterval_data.SetFont(self.default_font)
+ self.displayinterval_data.SetRange(100,2000)
+ self.displayinterval_data.SetValue(int(self.advancedConfig['display_interval']*1000))
+
+ self.alloctype_data=wxChoice(panel, -1,
+ choices = ['normal', 'background', 'pre-allocate', 'sparse'])
+ self.alloctype_data.SetFont(self.default_font)
+ self.alloctype_data.SetStringSelection(self.advancedConfig['alloc_type'])
+
+ self.allocrate_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*7,-1))
+ self.allocrate_data.SetFont(self.default_font)
+ self.allocrate_data.SetRange(1,100)
+ self.allocrate_data.SetValue(int(self.advancedConfig['alloc_rate']))
+
+ self.locking_data=wxChoice(panel, -1,
+ choices = ['no locking', 'lock while writing', 'lock always'])
+ self.locking_data.SetFont(self.default_font)
+ if self.advancedConfig['lock_files']:
+ if self.advancedConfig['lock_while_reading']:
+ self.locking_data.SetSelection(2)
+ else:
+ self.locking_data.SetSelection(1)
+ else:
+ self.locking_data.SetSelection(0)
+
+ self.doublecheck_data=wxChoice(panel, -1,
+ choices = ['no extra checking', 'double-check', 'triple-check'])
+ self.doublecheck_data.SetFont(self.default_font)
+ if self.advancedConfig['double_check']:
+ if self.advancedConfig['triple_check']:
+ self.doublecheck_data.SetSelection(2)
+ else:
+ self.doublecheck_data.SetSelection(1)
+ else:
+ self.doublecheck_data.SetSelection(0)
+
+ self.maxfilesopen_choices = ['50', '100', '200', 'no limit ']
+ self.maxfilesopen_data=wxChoice(panel, -1, choices = self.maxfilesopen_choices)
+ self.maxfilesopen_data.SetFont(self.default_font)
+ setval = self.advancedConfig['max_files_open']
+ if setval == 0:
+ setval = 'no limit '
+ else:
+ setval = str(setval)
+ if not setval in self.maxfilesopen_choices:
+ setval = self.maxfilesopen_choices[0]
+ self.maxfilesopen_data.SetStringSelection(setval)
+
+ self.maxconnections_choices = ['no limit ', '20', '30', '40', '50', '60', '100', '200']
+ self.maxconnections_data=wxChoice(panel, -1, choices = self.maxconnections_choices)
+ self.maxconnections_data.SetFont(self.default_font)
+ setval = self.advancedConfig['max_connections']
+ if setval == 0:
+ setval = 'no limit '
+ else:
+ setval = str(setval)
+ if not setval in self.maxconnections_choices:
+ setval = self.maxconnections_choices[0]
+ self.maxconnections_data.SetStringSelection(setval)
+
+ self.superseeder_data=wxChoice(panel, -1,
+ choices = ['normal', 'super-seed'])
+ self.superseeder_data.SetFont(self.default_font)
+ self.superseeder_data.SetSelection(self.advancedConfig['super_seeder'])
+
+ self.expirecache_choices = ['never ', '3', '5', '7', '10', '15', '30', '60', '90']
+ self.expirecache_data=wxChoice(panel, -1, choices = self.expirecache_choices)
+ setval = self.advancedConfig['expire_cache_data']
+ if setval == 0:
+ setval = 'never '
+ else:
+ setval = str(setval)
+ if not setval in self.expirecache_choices:
+ setval = self.expirecache_choices[0]
+ self.expirecache_data.SetFont(self.default_font)
+ self.expirecache_data.SetStringSelection(setval)
+
+
+ twocolsizer = wxFlexGridSizer(cols = 2, hgap = 20)
+ datasizer = wxFlexGridSizer(cols = 2, vgap = 2)
+ datasizer.Add(StaticText('Local IP: '), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.ip_data)
+ datasizer.Add(StaticText('IP to bind to: '), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.bind_data)
+ if sys.version_info >= (2,3) and socket.has_ipv6:
+ datasizer.Add(StaticText('IPv6 socket handling: '), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.ipv6bindsv4_data)
+ datasizer.Add(StaticText('Minimum number of peers: '), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.minpeers_data)
+ datasizer.Add(StaticText('Display interval (ms): '), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.displayinterval_data)
+ datasizer.Add(StaticText('Disk allocation type:'), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.alloctype_data)
+ datasizer.Add(StaticText('Allocation rate (MiB/s):'), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.allocrate_data)
+ datasizer.Add(StaticText('File locking:'), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.locking_data)
+ datasizer.Add(StaticText('Extra data checking:'), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.doublecheck_data)
+ datasizer.Add(StaticText('Max files open:'), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.maxfilesopen_data)
+ datasizer.Add(StaticText('Max peer connections:'), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.maxconnections_data)
+ datasizer.Add(StaticText('Default seeding mode:'), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.superseeder_data)
+ datasizer.Add(StaticText('Expire resume data(days):'), 1, wxALIGN_CENTER_VERTICAL)
+ datasizer.Add(self.expirecache_data)
+
+ twocolsizer.Add(datasizer)
+
+ infosizer = wxFlexGridSizer(cols = 1)
+ self.hinttext = StaticText('', self.FONT, False, 'Blue')
+ infosizer.Add(self.hinttext, 1, wxALIGN_LEFT|wxALIGN_CENTER_VERTICAL)
+ infosizer.SetMinSize((180,100))
+ twocolsizer.Add(infosizer, 1, wxEXPAND)
+
+ colsizer.Add(twocolsizer)
+
+ savesizer = wxGridSizer(cols = 3, hgap = 20)
+ okButton = wxButton(panel, -1, 'OK')
+# okButton.SetFont(self.default_font)
+ savesizer.Add(okButton, 0, wxALIGN_CENTER)
+
+ cancelButton = wxButton(panel, -1, 'Cancel')
+# cancelButton.SetFont(self.default_font)
+ savesizer.Add(cancelButton, 0, wxALIGN_CENTER)
+
+ defaultsButton = wxButton(panel, -1, 'Revert to Defaults')
+# defaultsButton.SetFont(self.default_font)
+ savesizer.Add(defaultsButton, 0, wxALIGN_CENTER)
+ colsizer.Add(savesizer, 1, wxALIGN_CENTER)
+
+ resizewarningtext=StaticText('None of these settings will take effect until the next time you start BitTorrent', self.FONT-2)
+ colsizer.Add(resizewarningtext, 1, wxALIGN_CENTER)
+
+ border = wxBoxSizer(wxHORIZONTAL)
+ border.Add(colsizer, 1, wxEXPAND | wxALL, 4)
+
+ panel.SetSizer(border)
+ panel.SetAutoLayout(True)
+
+ def setDefaults(evt, self = self):
+ try:
+ self.ip_data.SetValue(self.defaults['ip'])
+ self.bind_data.SetValue(self.defaults['bind'])
+ if sys.version_info >= (2,3) and socket.has_ipv6:
+ self.ipv6bindsv4_data.SetSelection(self.defaults['ipv6_binds_v4'])
+ self.minpeers_data.SetValue(self.defaults['min_peers'])
+ self.displayinterval_data.SetValue(int(self.defaults['display_interval']*1000))
+ self.alloctype_data.SetStringSelection(self.defaults['alloc_type'])
+ self.allocrate_data.SetValue(int(self.defaults['alloc_rate']))
+ if self.defaults['lock_files']:
+ if self.defaults['lock_while_reading']:
+ self.locking_data.SetSelection(2)
+ else:
+ self.locking_data.SetSelection(1)
+ else:
+ self.locking_data.SetSelection(0)
+ if self.defaults['double_check']:
+ if self.defaults['triple_check']:
+ self.doublecheck_data.SetSelection(2)
+ else:
+ self.doublecheck_data.SetSelection(1)
+ else:
+ self.doublecheck_data.SetSelection(0)
+ setval = self.defaults['max_files_open']
+ if setval == 0:
+ setval = 'no limit '
+ else:
+ setval = str(setval)
+ if not setval in self.maxfilesopen_choices:
+ setval = self.maxfilesopen_choices[0]
+ self.maxfilesopen_data.SetStringSelection(setval)
+ setval = self.defaults['max_connections']
+ if setval == 0:
+ setval = 'no limit '
+ else:
+ setval = str(setval)
+ if not setval in self.maxconnections_choices:
+ setval = self.maxconnections_choices[0]
+ self.maxconnections_data.SetStringSelection(setval)
+ self.superseeder_data.SetSelection(int(self.defaults['super_seeder']))
+ setval = self.defaults['expire_cache_data']
+ if setval == 0:
+ setval = 'never '
+ else:
+ setval = str(setval)
+ if not setval in self.expirecache_choices:
+ setval = self.expirecache_choices[0]
+ self.expirecache_data.SetStringSelection(setval)
+ except:
+ self.parent.exception()
+
+ def saveConfigs(evt, self = self):
+ try:
+ self.advancedConfig['ip'] = self.ip_data.GetValue()
+ self.advancedConfig['bind'] = self.bind_data.GetValue()
+ if sys.version_info >= (2,3) and socket.has_ipv6:
+ self.advancedConfig['ipv6_binds_v4'] = self.ipv6bindsv4_data.GetSelection()
+ self.advancedConfig['min_peers'] = self.minpeers_data.GetValue()
+ self.advancedConfig['display_interval'] = float(self.displayinterval_data.GetValue())/1000
+ self.advancedConfig['alloc_type'] = self.alloctype_data.GetStringSelection()
+ self.advancedConfig['alloc_rate'] = float(self.allocrate_data.GetValue())
+ self.advancedConfig['lock_files'] = int(self.locking_data.GetSelection() >= 1)
+ self.advancedConfig['lock_while_reading'] = int(self.locking_data.GetSelection() > 1)
+ self.advancedConfig['double_check'] = int(self.doublecheck_data.GetSelection() >= 1)
+ self.advancedConfig['triple_check'] = int(self.doublecheck_data.GetSelection() > 1)
+ try:
+ self.advancedConfig['max_files_open'] = int(self.maxfilesopen_data.GetStringSelection())
+ except: # if it ain't a number, it must be "no limit"
+ self.advancedConfig['max_files_open'] = 0
+ try:
+ self.advancedConfig['max_connections'] = int(self.maxconnections_data.GetStringSelection())
+ self.advancedConfig['max_initiate'] = min(
+ 2*self.advancedConfig['min_peers'], self.advancedConfig['max_connections'])
+ except: # if it ain't a number, it must be "no limit"
+ self.advancedConfig['max_connections'] = 0
+ self.advancedConfig['max_initiate'] = 2*self.advancedConfig['min_peers']
+ self.advancedConfig['super_seeder']=int(self.superseeder_data.GetSelection())
+ try:
+ self.advancedConfig['expire_cache_data'] = int(self.expirecache_data.GetStringSelection())
+ except:
+ self.advancedConfig['expire_cache_data'] = 0
+ self.advancedMenuBox.Close()
+ except:
+ self.parent.exception()
+
+ def cancelConfigs(evt, self = self):
+ self.advancedMenuBox.Close()
+
+ def ip_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\n\nThe IP reported to the tracker.\n' +
+ 'unless the tracker is on the\n' +
+ 'same intranet as this client,\n' +
+ 'the tracker will autodetect the\n' +
+ "client's IP and ignore this\n" +
+ "value.")
+
+ def bind_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\n\nThe IP the client will bind to.\n' +
+ 'Only useful if your machine is\n' +
+ 'directly handling multiple IPs.\n' +
+ "If you don't know what this is,\n" +
+ "leave it blank.")
+
+ def ipv6bindsv4_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\n\nCertain operating systems will\n' +
+ 'open IPv4 protocol connections on\n' +
+ 'an IPv6 socket; others require you\n' +
+ "to open two sockets on the same\n" +
+ "port, one IPv4 and one IPv6.")
+
+ def minpeers_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\n\nThe minimum number of peers the\n' +
+ 'client tries to stay connected\n' +
+ 'with. Do not set this higher\n' +
+ 'unless you have a very fast\n' +
+ "connection and a lot of system\n" +
+ "resources.")
+
+ def displayinterval_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\n\nHow often to update the\n' +
+ 'graphical display, in 1/1000s\n' +
+ 'of a second. Setting this too low\n' +
+ "will strain your computer's\n" +
+ "processor and video access.")
+
+ def alloctype_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\nHow to allocate disk space.\n' +
+ 'normal allocates space as data is\n' +
+ 'received, background also adds\n' +
+ "space in the background, pre-\n" +
+ "allocate reserves up front, and\n" +
+ 'sparse is only for filesystems\n' +
+ 'that support it by default.')
+
+ def allocrate_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\n\nAt what rate to allocate disk\n' +
+ 'space when allocating in the\n' +
+ 'background. Set this too high on a\n' +
+ "slow filesystem and your download\n" +
+ "will slow to a crawl.")
+
+ def locking_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\n\n\nFile locking prevents other\n' +
+ 'programs (including other instances\n' +
+ 'of BitTorrent) from accessing files\n' +
+ "you are downloading.")
+
+ def doublecheck_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\n\nHow much extra checking to do\n' +
+ 'making sure no data is corrupted.\n' +
+ 'Double-check mode uses more CPU,\n' +
+ "while triple-check mode increases\n" +
+ "disk accesses.")
+
+ def maxfilesopen_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\n\nThe maximum number of files to\n' +
+ 'keep open at the same time. Zero\n' +
+ 'means no limit. Please note that\n' +
+ "if this option is in effect,\n" +
+ "files are not guaranteed to be\n" +
+ "locked.")
+
+ def maxconnections_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\nSome operating systems, most\n' +
+ 'notably Windows 9x/ME combined\n' +
+ 'with certain network drivers,\n' +
+ "cannot handle more than a certain\n" +
+ "number of open ports. If the\n" +
+ "client freezes, try setting this\n" +
+ "to 60 or below.")
+
+ def superseeder_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\nThe "super-seed" method allows\n' +
+ 'a single source to more efficiently\n' +
+ 'seed a large torrent, but is not\n' +
+ "necessary in a well-seeded torrent,\n" +
+ "and causes problems with statistics.\n" +
+ "Unless you routinely seed torrents\n" +
+ "you can enable this by selecting\n" +
+ '"SUPER-SEED" for connection type.\n' +
+ '(once enabled it does not turn off.)')
+
+ def expirecache_hint(evt, self = self):
+ self.hinttext.SetLabel('\n\nThe client stores temporary data\n' +
+ 'in order to handle downloading only\n' +
+ 'specific files from the torrent and\n' +
+ "so it can resume downloads more\n" +
+ "quickly. This sets how long the\n" +
+ "client will keep this data before\n" +
+ "deleting it to free disk space.")
+
+ EVT_BUTTON(self.advancedMenuBox, okButton.GetId(), saveConfigs)
+ EVT_BUTTON(self.advancedMenuBox, cancelButton.GetId(), cancelConfigs)
+ EVT_BUTTON(self.advancedMenuBox, defaultsButton.GetId(), setDefaults)
+ EVT_ENTER_WINDOW(self.ip_data, ip_hint)
+ EVT_ENTER_WINDOW(self.bind_data, bind_hint)
+ if sys.version_info >= (2,3) and socket.has_ipv6:
+ EVT_ENTER_WINDOW(self.ipv6bindsv4_data, ipv6bindsv4_hint)
+ EVT_ENTER_WINDOW(self.minpeers_data, minpeers_hint)
+ EVT_ENTER_WINDOW(self.displayinterval_data, displayinterval_hint)
+ EVT_ENTER_WINDOW(self.alloctype_data, alloctype_hint)
+ EVT_ENTER_WINDOW(self.allocrate_data, allocrate_hint)
+ EVT_ENTER_WINDOW(self.locking_data, locking_hint)
+ EVT_ENTER_WINDOW(self.doublecheck_data, doublecheck_hint)
+ EVT_ENTER_WINDOW(self.maxfilesopen_data, maxfilesopen_hint)
+ EVT_ENTER_WINDOW(self.maxconnections_data, maxconnections_hint)
+ EVT_ENTER_WINDOW(self.superseeder_data, superseeder_hint)
+ EVT_ENTER_WINDOW(self.expirecache_data, expirecache_hint)
+
+ self.advancedMenuBox.Show ()
+ border.Fit(panel)
+ self.advancedMenuBox.Fit()
+ except:
+ self.parent.exception()
+
+
+ def CloseAdvanced(self):
+ if self.advancedMenuBox is not None:
+ try:
+ self.advancedMenuBox.Close()
+ except wxPyDeadObjectError, e:
+ self.advancedMenuBox = None
+
diff --git a/BitTornado/ConnChoice.py b/BitTornado/ConnChoice.py
new file mode 100644
index 000000000..6a086d5a2
--- /dev/null
+++ b/BitTornado/ConnChoice.py
@@ -0,0 +1,31 @@
+connChoices=(
+ {'name':'automatic',
+ 'rate':{'min':0, 'max':5000, 'def': 0},
+ 'conn':{'min':0, 'max':100, 'def': 0},
+ 'automatic':1},
+ {'name':'unlimited',
+ 'rate':{'min':0, 'max':5000, 'def': 0, 'div': 50},
+ 'conn':{'min':4, 'max':100, 'def': 4}},
+ {'name':'dialup/isdn',
+ 'rate':{'min':3, 'max': 8, 'def': 5},
+ 'conn':{'min':2, 'max': 3, 'def': 2},
+ 'initiate': 12},
+ {'name':'dsl/cable slow',
+ 'rate':{'min':10, 'max': 48, 'def': 13},
+ 'conn':{'min':4, 'max': 20, 'def': 4}},
+ {'name':'dsl/cable fast',
+ 'rate':{'min':20, 'max': 100, 'def': 40},
+ 'conn':{'min':4, 'max': 30, 'def': 6}},
+ {'name':'T1',
+ 'rate':{'min':100, 'max': 300, 'def':150},
+ 'conn':{'min':4, 'max': 40, 'def':10}},
+ {'name':'T3+',
+ 'rate':{'min':400, 'max':2000, 'def':500},
+ 'conn':{'min':4, 'max':100, 'def':20}},
+ {'name':'seeder',
+ 'rate':{'min':0, 'max':5000, 'def':0, 'div': 50},
+ 'conn':{'min':1, 'max':100, 'def':1}},
+ {'name':'SUPER-SEED', 'super-seed':1}
+ )
+
+connChoiceList = map(lambda x:x['name'], connChoices)
diff --git a/BitTornado/CreateIcons.py b/BitTornado/CreateIcons.py
new file mode 100644
index 000000000..a61fc3d49
--- /dev/null
+++ b/BitTornado/CreateIcons.py
@@ -0,0 +1,105 @@
+# Generated from bt_MakeCreateIcons - 05/10/04 22:15:33
+# T-0.3.0 (BitTornado)
+
+from binascii import a2b_base64
+from zlib import decompress
+from os.path import join
+
+icons = {
+ "icon_bt.ico":
+ "eJyt1K+OFEEQx/FaQTh5GDRZhSQpiUHwCrxCBYXFrjyJLXeXEARPsZqUPMm+" +
+ "AlmP+PGtngoLDji69zMz2zt/qqtr1mxHv7621d4+MnvK/jl66Bl2drV+e7Wz" +
+ "S/v12A7rY4fDtuvOwfF4tOPXo52/fLLz+WwpWd6nqRXHKXux39sTrtnjNd7g" +
+ "PW7wGSd860f880kffjvJ2QYS1Zcw4AjcoaA5yRFIFDQXOgKJguZmjkCioB4T" +
+ "Y2CqxpTXA7sHEgVNEC8RSBQ0gfk7xtknCupgk3EEEgXlNgFHIFHQTMoRSBQ0" +
+ "E+1ouicKmsk7AomCJiGOQKKgSZIjkChoEucIJAqaZDoCiYImwb4iydULmqQ7" +
+ "AomC1kLcEQ/jSBQ0i+MIJAqaBXMEElVdi9siOgKJgmZhfWWlVjTddXW/FtsR" +
+ "SBQ0BeAIJAqaonAEEgVNoTgCiYKmeByBREHaqiVWRtSRrAJzBBIFTdE5AomC" +
+ "phBPpxPP57dVkDfrTl063nUVnWe383fZx9tb3uN+o7U+BLDtuvcQm8d/27Y/" +
+ "jO3o5/ay+YPv/+f6y30e1OyB7QcsGWFj",
+ "icon_done.ico":
+ "eJyt1K2OVEEQhuEaQbJyMWgyCklSEoPgFvYWKigsduRKbLndhCC4itGk5Erm" +
+ "Fsh4xMdbfSoMOGDpnuf89Jyf6uqaMdvRr69ttbdPzJ6xf4Eeeo6dXa3vXu/s" +
+ "0n49tsP62OGw7bpzcDwe7fj1aOcvn+x8PltKlg9pasVxyl7u9/aUe/Z4gxu8" +
+ "xy0+44Rv/Yp/vujDbxc520Ci+hYGHIF7FDQXOQKJguZGRyBR0DzMEUgU1GNi" +
+ "DEzVmPJ6YfdAoqAJ4hUCiYImMH/HOPtEQR1sMo5AoqDcJuAIJAqaSTkCiYJm" +
+ "oh1N90RBM3lHIFHQJMQRSBQ0SXIEEgVN4hyBREGTTEcgUdAk2FckuXpBk3RH" +
+ "IFHQWoh74mEciYJmcRyBREGzYI5AoqprcVtERyBR0Cysr6zUiqa7rh7WYjsC" +
+ "iYKmAByBREFTFI5AoqApFEcgUdAUjyOQKEhbtcTKiDqSVWCOQKKgKTpHIFHQ" +
+ "FOLpdOL9fLcK8nY9qUvHu66i8+x2/i77eHfH77h/0VofAth23Xuoz/+2bX8Y" +
+ "29HP7WXzB+f/5/7Lcx7V7JHtB9dPG3I=",
+ "black.ico":
+ "eJzt1zsOgkAYReFLLCztjJ2UlpLY485kOS7DpbgESwqTcQZDghjxZwAfyfl0" +
+ "LIieGzUWSom/pan840rHnbSUtPHHX9Je9+tAh2ybNe8TZZ/vk8ajJ4zl6JVJ" +
+ "+xFx+0R03Djx1/2B8bcT9L/bt0+4Wq+4se8e/VTfMvGqb4n3nYiIGz+lvt9s" +
+ "9EpE2T4xJN4xNFYWU6t+JWXuXDFzTom7SodSyi/S+iwtwjlJ80KaNY/C34rW" +
+ "aT8nvK5uhF7ohn7Yqfb87kffLAAAAAAAAAAAAAAAAAAAGMUNy7dADg==",
+ "blue.ico":
+ "eJzt10EOwUAYhuGv6cLSTux06QD2dTM9jmM4iiNYdiEZ81cIFTWddtDkfbQW" +
+ "De8XogtS5h9FIf+81H4jLSSt/ekvaavrdaCDez4SZV+PpPHoicBy9ErSfkQ8" +
+ "fCI6Hjgx6f7A+McJ+r/t95i46xMP7bf8Uz9o4k0/XMT338voP5shK0MkjXcM" +
+ "YSqam6Qunatyf7Nk7iztaqk8SaujNLfzIM0qKX88ZX8rWmf7Nfa+W8N61rW+" +
+ "7TR7fverHxYAAAAAAAAAAAAAAAAAAIziApVZ444=",
+ "green.ico":
+ "eJzt1zEOgjAAheFHGBzdjJuMHsAdbybxNB7Do3gERwaT2mJIBCOWlqok/yc4" +
+ "EP1fNDIoZfZRFLLPa5120krS1p72kvZ6XAeGHLtHouzrkTQePOFZDl5J2g+I" +
+ "+08Exz0nZt2PjH+coP/bvveEaY2L+/VN13/1PSbe9v0FfP+jTP6ziVmJkTQ+" +
+ "MISZaO6SujSmyu3dkpmbdKil8iptLtLSnWdpUUn58yn3t6J39l/j3tc2XM91" +
+ "Xd/tNHt296sfFgAAAAAAAAAAAAAAAAAATOIOVLEoDg==",
+ "red.ico":
+ "eJzt10EOwUAYhuGv6cLSTux06QD2dTOO4xiO4giWXUjG/BVCRTuddtDkfbQW" +
+ "De8XogtS5h9FIf+81GEjLSSt/ekvaavbdaCVez0SZd+PpPHoicBy9ErSfkQ8" +
+ "fCI6Hjgx6f7AeOcE/d/2QyceesaD+g1/1u+e+NwPF/H99zL6z2bIyhBJ4y1D" +
+ "mIb6LqlK5/a5v1syd5F2lVSepdVJmtt5lGZ7KX8+ZX8rGmfzNfa+e8N61rW+" +
+ "7dR7fverHxYAAAAAAAAAAAAAAAAAAIziCpgs444=",
+ "white.ico":
+ "eJzt1zsOgkAYReFLKCztjJ2ULsAed6bLcRnuwYTaJVhSmIwzGBLEiD8D+EjO" +
+ "p2NB9NyosVBK/C3L5B+XOmykhaS1P/6StrpfBzoUp6J5nyj7fJ80Hj1hLEev" +
+ "TNqPiNsnouPGib/uD4y/naD/3b59wtV6xY199+in+paJV31LvO9ERNz4KfX9" +
+ "ZqNXIsr2iSHxjqGxspha9Sspc+f2qXNK3FXalVJ+kVZnaR7OUZrtpbR5FP5W" +
+ "tE77OeF1dSP0Qjf0w06153c/+mYBAAAAAAAAAAAAAAAAAMAobj//I7s=",
+ "yellow.ico":
+ "eJzt1zsOgkAYReFLKCztjJ2ULsAedybLcRkuxSVYUpiM82M0ihGHgVFJzidY" +
+ "ED03vgqlzN+KQv5+qf1GWkha+9Nf0lbX60AX556ORNnXI2k8eiKwHL2StB8R" +
+ "D5+IjgdOTLo/MP5xgv5v+8ETd/3iYf2W/+oHTLzth4t4/3sZ/WszZGWIpPGO" +
+ "IUxE8yupS+eq3H9smTtLu1oqT9LqKM3tPEizSsofT9nfitbZfow979awnnWt" +
+ "bzvNnt/96osFAAAAAAAAAAAAAAAAAACjuABhjmIs",
+ "black1.ico":
+ "eJzt0zEOgkAUANEhFpZSGTstTWzkVt5Cj8ZROAIHMNGPWBCFDYgxMZkHn2Iz" +
+ "G5YCyOLKc+K54XSANbCPiSV2tOt/qjgW3XtSnN41FH/Qv29Jx/P7qefp7W8P" +
+ "4z85HQ+9JRG/7BpTft31DPUKyiVcFjEZzQ/TTtdzrWnKmCr6evv780qSJEmS" +
+ "JEmSJEmSJEmSpPnunVFDcA==",
+ "green1.ico":
+ "eJzt0zEKwkAQRuEXLCyTSuy0DHgxb6F4shzFI+QAgpkkFoombowIwvt2Z4vh" +
+ "X5gtFrJYRUGca/Y7WAFlVLTY0vf/1elxTwqP3xoKf5B/vjIenp+fOs+r/LWT" +
+ "/uQ34aGpUqQnv+1ygDqHagnHRVRG+2H6unfrtZkq6hz5evP7eSVJkiRJkiRJ" +
+ "kiRJkiRJ0nwNoWQ+AA==",
+ "yellow1.ico":
+ "eJzt0zEKwkAQRuEXLCxNJXZaCl7MW8Sj5SgeIQcQ4oS1UDTJxkhAeN/ubDH8" +
+ "C7PFQhGrLIlzx/kEW+AYFS0OpP6/atuXPSk8fKsv/EX+/cpweH5+6jyf8kn+" +
+ "k0fCfVPlyE/+2q2CZgP1Gi6rqILuw6R69uh1mTrqGvlmv/y8kiRJkiRJkiRJ" +
+ "kiRJkiRpvjsp9L8k",
+ "alloc.gif":
+ "eJxz93SzsEw0YRBh+M4ABi0MS3ue///P8H8UjIIRBhR/sjAyMDAx6IAyAihP" +
+ "MHAcYWDlkPHYsOBgM4ewVsyJDQsPNzEoebF8CHjo0smjH3dmRsDjI33C7Dw3" +
+ "MiYuOtjNyDShRSNwyemJguJJKhaGS32nGka61Vg2NJyYKRd+bY+nwtMzjbqV" +
+ "Qh84gxMCJgnlL4vJuqJyaa5NfFLNLsNVV2a7syacfVWkHd4bv7RN1ltM7ejm" +
+ "tMtNZ19Oyb02p8C3aqr3dr2GbXl/7fZyOej5rW653WZ7MzzHZV+v7O2/EZM+" +
+ "Pt45kbX6ScWHNWfOilo3n5thucXv8org1XF3DRQYrAEWiVY3"
+}
+
+def GetIcons():
+ return icons.keys()
+
+def CreateIcon(icon, savedir):
+ try:
+ f = open(join(savedir,icon),"wb")
+ f.write(decompress(a2b_base64(icons[icon])))
+ success = 1
+ except:
+ success = 0
+ try:
+ f.close()
+ except:
+ pass
+ return success
diff --git a/BitTornado/CurrentRateMeasure.py b/BitTornado/CurrentRateMeasure.py
new file mode 100644
index 000000000..ff828aaa2
--- /dev/null
+++ b/BitTornado/CurrentRateMeasure.py
@@ -0,0 +1,37 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from clock import clock
+
+class Measure:
+ def __init__(self, max_rate_period, fudge = 1):
+ self.max_rate_period = max_rate_period
+ self.ratesince = clock() - fudge
+ self.last = self.ratesince
+ self.rate = 0.0
+ self.total = 0l
+
+ def update_rate(self, amount):
+ self.total += amount
+ t = clock()
+ self.rate = (self.rate * (self.last - self.ratesince) +
+ amount) / (t - self.ratesince + 0.0001)
+ self.last = t
+ if self.ratesince < t - self.max_rate_period:
+ self.ratesince = t - self.max_rate_period
+
+ def get_rate(self):
+ self.update_rate(0)
+ return self.rate
+
+ def get_rate_noupdate(self):
+ return self.rate
+
+ def time_until_rate(self, newrate):
+ if self.rate <= newrate:
+ return 0
+ t = clock() - self.ratesince
+ return ((self.rate * t) / newrate) - t
+
+ def get_total(self):
+ return self.total
\ No newline at end of file
diff --git a/BitTornado/HTTPHandler.py b/BitTornado/HTTPHandler.py
new file mode 100644
index 000000000..0aa72cd1a
--- /dev/null
+++ b/BitTornado/HTTPHandler.py
@@ -0,0 +1,167 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from sys import stdout
+import time
+from clock import clock
+from gzip import GzipFile
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+DEBUG = False
+
+weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+class HTTPConnection:
+ def __init__(self, handler, connection):
+ self.handler = handler
+ self.connection = connection
+ self.buf = ''
+ self.closed = False
+ self.done = False
+ self.donereading = False
+ self.next_func = self.read_type
+
+ def get_ip(self):
+ return self.connection.get_ip()
+
+ def data_came_in(self, data):
+ if self.donereading or self.next_func is None:
+ return True
+ self.buf += data
+ while True:
+ try:
+ i = self.buf.index('\n')
+ except ValueError:
+ return True
+ val = self.buf[:i]
+ self.buf = self.buf[i+1:]
+ self.next_func = self.next_func(val)
+ if self.donereading:
+ return True
+ if self.next_func is None or self.closed:
+ return False
+
+ def read_type(self, data):
+ self.header = data.strip()
+ words = data.split()
+ if len(words) == 3:
+ self.command, self.path, garbage = words
+ self.pre1 = False
+ elif len(words) == 2:
+ self.command, self.path = words
+ self.pre1 = True
+ if self.command != 'GET':
+ return None
+ else:
+ return None
+ if self.command not in ('HEAD', 'GET'):
+ return None
+ self.headers = {}
+ return self.read_header
+
+ def read_header(self, data):
+ data = data.strip()
+ if data == '':
+ self.donereading = True
+ if self.headers.get('accept-encoding','').find('gzip') > -1:
+ self.encoding = 'gzip'
+ else:
+ self.encoding = 'identity'
+ r = self.handler.getfunc(self, self.path, self.headers)
+ if r is not None:
+ self.answer(r)
+ return None
+ try:
+ i = data.index(':')
+ except ValueError:
+ return None
+ self.headers[data[:i].strip().lower()] = data[i+1:].strip()
+ if DEBUG:
+ print data[:i].strip() + ": " + data[i+1:].strip()
+ return self.read_header
+
+ def answer(self, (responsecode, responsestring, headers, data)):
+ if self.closed:
+ return
+ if self.encoding == 'gzip':
+ compressed = StringIO()
+ gz = GzipFile(fileobj = compressed, mode = 'wb', compresslevel = 9)
+ gz.write(data)
+ gz.close()
+ cdata = compressed.getvalue()
+ if len(cdata) >= len(data):
+ self.encoding = 'identity'
+ else:
+ if DEBUG:
+ print "Compressed: %i Uncompressed: %i\n" % (len(cdata),len(data))
+ data = cdata
+ headers['Content-Encoding'] = 'gzip'
+
+ # i'm abusing the identd field here, but this should be ok
+ if self.encoding == 'identity':
+ ident = '-'
+ else:
+ ident = self.encoding
+ self.handler.log( self.connection.get_ip(), ident, '-',
+ self.header, responsecode, len(data),
+ self.headers.get('referer','-'),
+ self.headers.get('user-agent','-') )
+ self.done = True
+ r = StringIO()
+ r.write('HTTP/1.0 ' + str(responsecode) + ' ' +
+ responsestring + '\r\n')
+ if not self.pre1:
+ headers['Content-Length'] = len(data)
+ for key, value in headers.items():
+ r.write(key + ': ' + str(value) + '\r\n')
+ r.write('\r\n')
+ if self.command != 'HEAD':
+ r.write(data)
+ self.connection.write(r.getvalue())
+ if self.connection.is_flushed():
+ self.connection.shutdown(1)
+
+class HTTPHandler:
+ def __init__(self, getfunc, minflush):
+ self.connections = {}
+ self.getfunc = getfunc
+ self.minflush = minflush
+ self.lastflush = clock()
+
+ def external_connection_made(self, connection):
+ self.connections[connection] = HTTPConnection(self, connection)
+
+ def connection_flushed(self, connection):
+ if self.connections[connection].done:
+ connection.shutdown(1)
+
+ def connection_lost(self, connection):
+ ec = self.connections[connection]
+ ec.closed = True
+ del ec.connection
+ del ec.next_func
+ del self.connections[connection]
+
+ def data_came_in(self, connection, data):
+ c = self.connections[connection]
+ if not c.data_came_in(data) and not c.closed:
+ c.connection.shutdown(1)
+
+ def log(self, ip, ident, username, header,
+ responsecode, length, referrer, useragent):
+ year, month, day, hour, minute, second, a, b, c = time.localtime(time.time())
+ print '%s %s %s [%02d/%3s/%04d:%02d:%02d:%02d] "%s" %i %i "%s" "%s"' % (
+ ip, ident, username, day, months[month], year, hour,
+ minute, second, header, responsecode, length, referrer, useragent)
+ t = clock()
+ if t - self.lastflush > self.minflush:
+ self.lastflush = t
+ stdout.flush()
diff --git a/BitTornado/PSYCO.py b/BitTornado/PSYCO.py
new file mode 100644
index 000000000..36c7bd997
--- /dev/null
+++ b/BitTornado/PSYCO.py
@@ -0,0 +1,5 @@
+# edit this file to enable/disable Psyco
+# psyco = 1 -- enabled
+# psyco = 0 -- disabled
+
+psyco = 0
diff --git a/BitTornado/RateLimiter.py b/BitTornado/RateLimiter.py
new file mode 100644
index 000000000..94cd9bfd8
--- /dev/null
+++ b/BitTornado/RateLimiter.py
@@ -0,0 +1,153 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from traceback import print_exc
+from binascii import b2a_hex
+from clock import clock
+from CurrentRateMeasure import Measure
+from cStringIO import StringIO
+from math import sqrt
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+try:
+ sum([1])
+except:
+ sum = lambda a: reduce(lambda x,y: x+y, a, 0)
+
+DEBUG = False
+
+MAX_RATE_PERIOD = 20.0
+MAX_RATE = 10e10
+PING_BOUNDARY = 1.2
+PING_SAMPLES = 7
+PING_DISCARDS = 1
+PING_THRESHHOLD = 5
+PING_DELAY = 5 # cycles 'til first upward adjustment
+PING_DELAY_NEXT = 2 # 'til next
+ADJUST_UP = 1.05
+ADJUST_DOWN = 0.95
+UP_DELAY_FIRST = 5
+UP_DELAY_NEXT = 2
+SLOTS_STARTING = 6
+SLOTS_FACTOR = 1.66/1000
+
+class RateLimiter:
+ def __init__(self, sched, unitsize, slotsfunc = lambda x: None):
+ self.sched = sched
+ self.last = None
+ self.unitsize = unitsize
+ self.slotsfunc = slotsfunc
+ self.measure = Measure(MAX_RATE_PERIOD)
+ self.autoadjust = False
+ self.upload_rate = MAX_RATE * 1000
+ self.slots = SLOTS_STARTING # garbage if not automatic
+
+ def set_upload_rate(self, rate):
+ # rate = -1 # test automatic
+ if rate < 0:
+ if self.autoadjust:
+ return
+ self.autoadjust = True
+ self.autoadjustup = 0
+ self.pings = []
+ rate = MAX_RATE
+ self.slots = SLOTS_STARTING
+ self.slotsfunc(self.slots)
+ else:
+ self.autoadjust = False
+ if not rate:
+ rate = MAX_RATE
+ self.upload_rate = rate * 1000
+ self.lasttime = clock()
+ self.bytes_sent = 0
+
+ def queue(self, conn):
+ assert conn.next_upload is None
+ if self.last is None:
+ self.last = conn
+ conn.next_upload = conn
+ self.try_send(True)
+ else:
+ conn.next_upload = self.last.next_upload
+ self.last.next_upload = conn
+ self.last = conn
+
+ def try_send(self, check_time = False):
+ t = clock()
+ self.bytes_sent -= (t - self.lasttime) * self.upload_rate
+ self.lasttime = t
+ if check_time:
+ self.bytes_sent = max(self.bytes_sent, 0)
+ cur = self.last.next_upload
+ while self.bytes_sent <= 0:
+ bytes = cur.send_partial(self.unitsize)
+ self.bytes_sent += bytes
+ self.measure.update_rate(bytes)
+ if bytes == 0 or cur.backlogged():
+ if self.last is cur:
+ self.last = None
+ cur.next_upload = None
+ break
+ else:
+ self.last.next_upload = cur.next_upload
+ cur.next_upload = None
+ cur = self.last.next_upload
+ else:
+ self.last = cur
+ cur = cur.next_upload
+ else:
+ self.sched(self.try_send, self.bytes_sent / self.upload_rate)
+
+ def adjust_sent(self, bytes):
+ self.bytes_sent = min(self.bytes_sent+bytes, self.upload_rate*3)
+ self.measure.update_rate(bytes)
+
+
+ def ping(self, delay):
+ if DEBUG:
+ print delay
+ if not self.autoadjust:
+ return
+ self.pings.append(delay > PING_BOUNDARY)
+ if len(self.pings) < PING_SAMPLES+PING_DISCARDS:
+ return
+ if DEBUG:
+ print 'cycle'
+ pings = sum(self.pings[PING_DISCARDS:])
+ del self.pings[:]
+ if pings >= PING_THRESHHOLD: # assume flooded
+ if self.upload_rate == MAX_RATE:
+ self.upload_rate = self.measure.get_rate()*ADJUST_DOWN
+ else:
+ self.upload_rate = min(self.upload_rate,
+ self.measure.get_rate()*1.1)
+ self.upload_rate = max(int(self.upload_rate*ADJUST_DOWN),2)
+ self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR))
+ self.slotsfunc(self.slots)
+ if DEBUG:
+ print 'adjust down to '+str(self.upload_rate)
+ self.lasttime = clock()
+ self.bytes_sent = 0
+ self.autoadjustup = UP_DELAY_FIRST
+ else: # not flooded
+ if self.upload_rate == MAX_RATE:
+ return
+ self.autoadjustup -= 1
+ if self.autoadjustup:
+ return
+ self.upload_rate = int(self.upload_rate*ADJUST_UP)
+ self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR))
+ self.slotsfunc(self.slots)
+ if DEBUG:
+ print 'adjust up to '+str(self.upload_rate)
+ self.lasttime = clock()
+ self.bytes_sent = 0
+ self.autoadjustup = UP_DELAY_NEXT
+
+
+
+
diff --git a/BitTornado/RateMeasure.py b/BitTornado/RateMeasure.py
new file mode 100644
index 000000000..d1f8e37ac
--- /dev/null
+++ b/BitTornado/RateMeasure.py
@@ -0,0 +1,75 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from clock import clock
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+FACTOR = 0.999
+
+class RateMeasure:
+ def __init__(self):
+ self.last = None
+ self.time = 1.0
+ self.got = 0.0
+ self.remaining = None
+ self.broke = False
+ self.got_anything = False
+ self.last_checked = None
+ self.rate = 0
+ self.lastten = False
+
+ def data_came_in(self, amount):
+ if not self.got_anything:
+ self.got_anything = True
+ self.last = clock()
+ return
+ self.update(amount)
+
+ def data_rejected(self, amount):
+ pass
+
+ def get_time_left(self, left):
+ t = clock()
+ if not self.got_anything:
+ return None
+ if t - self.last > 15:
+ self.update(0)
+ try:
+ remaining = left/self.rate
+ if not self.lastten and remaining <= 10:
+ self.lastten = True
+ if self.lastten:
+ return remaining
+ delta = max(remaining/20,2)
+ if self.remaining is None:
+ self.remaining = remaining
+ elif abs(self.remaining-remaining) > delta:
+ self.remaining = remaining
+ else:
+ self.remaining -= t - self.last_checked
+ except ZeroDivisionError:
+ self.remaining = None
+ if self.remaining is not None and self.remaining < 0.1:
+ self.remaining = 0.1
+ self.last_checked = t
+ return self.remaining
+
+ def update(self, amount):
+ t = clock()
+ t1 = int(t)
+ l1 = int(self.last)
+ for i in xrange(l1,t1):
+ self.time *= FACTOR
+ self.got *= FACTOR
+ self.got += amount
+ if t - self.last < 20:
+ self.time += t - self.last
+ self.last = t
+ try:
+ self.rate = self.got / self.time
+ except ZeroDivisionError:
+ pass
diff --git a/BitTornado/RawServer.py b/BitTornado/RawServer.py
new file mode 100644
index 000000000..bbf1cb1df
--- /dev/null
+++ b/BitTornado/RawServer.py
@@ -0,0 +1,195 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from bisect import insort
+from SocketHandler import SocketHandler, UPnP_ERROR
+import socket
+from cStringIO import StringIO
+from traceback import print_exc
+from select import error
+from threading import Thread, Event
+from time import sleep
+from clock import clock
+import sys
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+
+def autodetect_ipv6():
+ try:
+ assert sys.version_info >= (2,3)
+ assert socket.has_ipv6
+ socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ except:
+ return 0
+ return 1
+
+def autodetect_socket_style():
+ if sys.platform.find('linux') < 0:
+ return 1
+ else:
+ try:
+ f = open('/proc/sys/net/ipv6/bindv6only','r')
+ dual_socket_style = int(f.read())
+ f.close()
+ return int(not dual_socket_style)
+ except:
+ return 0
+
+
+READSIZE = 100000
+
+class RawServer:
+ def __init__(self, doneflag, timeout_check_interval, timeout, noisy = True,
+ ipv6_enable = True, failfunc = lambda x: None, errorfunc = None,
+ sockethandler = None, excflag = Event()):
+ self.timeout_check_interval = timeout_check_interval
+ self.timeout = timeout
+ self.servers = {}
+ self.single_sockets = {}
+ self.dead_from_write = []
+ self.doneflag = doneflag
+ self.noisy = noisy
+ self.failfunc = failfunc
+ self.errorfunc = errorfunc
+ self.exccount = 0
+ self.funcs = []
+ self.externally_added = []
+ self.finished = Event()
+ self.tasks_to_kill = []
+ self.excflag = excflag
+
+ if sockethandler is None:
+ sockethandler = SocketHandler(timeout, ipv6_enable, READSIZE)
+ self.sockethandler = sockethandler
+ self.add_task(self.scan_for_timeouts, timeout_check_interval)
+
+ def get_exception_flag(self):
+ return self.excflag
+
+ def _add_task(self, func, delay, id = None):
+ assert float(delay) >= 0
+ insort(self.funcs, (clock() + delay, func, id))
+
+ def add_task(self, func, delay = 0, id = None):
+ assert float(delay) >= 0
+ self.externally_added.append((func, delay, id))
+
+ def scan_for_timeouts(self):
+ self.add_task(self.scan_for_timeouts, self.timeout_check_interval)
+ self.sockethandler.scan_for_timeouts()
+
+ def bind(self, port, bind = '', reuse = False,
+ ipv6_socket_style = 1, upnp = False):
+ self.sockethandler.bind(port, bind, reuse, ipv6_socket_style, upnp)
+
+ def find_and_bind(self, minport, maxport, bind = '', reuse = False,
+ ipv6_socket_style = 1, upnp = 0, randomizer = False):
+ return self.sockethandler.find_and_bind(minport, maxport, bind, reuse,
+ ipv6_socket_style, upnp, randomizer)
+
+ def start_connection_raw(self, dns, socktype, handler = None):
+ return self.sockethandler.start_connection_raw(dns, socktype, handler)
+
+ def start_connection(self, dns, handler = None, randomize = False):
+ return self.sockethandler.start_connection(dns, handler, randomize)
+
+ def get_stats(self):
+ return self.sockethandler.get_stats()
+
+ def pop_external(self):
+ while self.externally_added:
+ (a, b, c) = self.externally_added.pop(0)
+ self._add_task(a, b, c)
+
+
+ def listen_forever(self, handler):
+ self.sockethandler.set_handler(handler)
+ try:
+ while not self.doneflag.isSet():
+ try:
+ self.pop_external()
+ self._kill_tasks()
+ if self.funcs:
+ period = self.funcs[0][0] + 0.001 - clock()
+ else:
+ period = 2 ** 30
+ if period < 0:
+ period = 0
+ events = self.sockethandler.do_poll(period)
+ if self.doneflag.isSet():
+ return
+ while self.funcs and self.funcs[0][0] <= clock():
+ garbage1, func, id = self.funcs.pop(0)
+ if id in self.tasks_to_kill:
+ pass
+ try:
+# print func.func_name
+ func()
+ except (SystemError, MemoryError), e:
+ self.failfunc(str(e))
+ return
+ except KeyboardInterrupt:
+# self.exception(True)
+ return
+ except:
+ if self.noisy:
+ self.exception()
+ self.sockethandler.close_dead()
+ self.sockethandler.handle_events(events)
+ if self.doneflag.isSet():
+ return
+ self.sockethandler.close_dead()
+ except (SystemError, MemoryError), e:
+ self.failfunc(str(e))
+ return
+ except error:
+ if self.doneflag.isSet():
+ return
+ except KeyboardInterrupt:
+# self.exception(True)
+ return
+ except:
+ self.exception()
+ if self.exccount > 10:
+ return
+ finally:
+# self.sockethandler.shutdown()
+ self.finished.set()
+
+ def is_finished(self):
+ return self.finished.isSet()
+
+ def wait_until_finished(self):
+ self.finished.wait()
+
+ def _kill_tasks(self):
+ if self.tasks_to_kill:
+ new_funcs = []
+ for (t, func, id) in self.funcs:
+ if id not in self.tasks_to_kill:
+ new_funcs.append((t, func, id))
+ self.funcs = new_funcs
+ self.tasks_to_kill = []
+
+ def kill_tasks(self, id):
+ self.tasks_to_kill.append(id)
+
+ def exception(self, kbint = False):
+ if not kbint:
+ self.excflag.set()
+ self.exccount += 1
+ if self.errorfunc is None:
+ print_exc()
+ else:
+ data = StringIO()
+ print_exc(file = data)
+# print data.getvalue() # report exception here too
+ if not kbint: # don't report here if it's a keyboard interrupt
+ self.errorfunc(data.getvalue())
+
+ def shutdown(self):
+ self.sockethandler.shutdown()
diff --git a/BitTornado/ServerPortHandler.py b/BitTornado/ServerPortHandler.py
new file mode 100644
index 000000000..90d42b5a7
--- /dev/null
+++ b/BitTornado/ServerPortHandler.py
@@ -0,0 +1,188 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+#from RawServer import RawServer
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+from BT1.Encrypter import protocol_name
+
+default_task_id = []
+
+class SingleRawServer:
+ def __init__(self, info_hash, multihandler, doneflag, protocol):
+ self.info_hash = info_hash
+ self.doneflag = doneflag
+ self.protocol = protocol
+ self.multihandler = multihandler
+ self.rawserver = multihandler.rawserver
+ self.finished = False
+ self.running = False
+ self.handler = None
+ self.taskqueue = []
+
+ def shutdown(self):
+ if not self.finished:
+ self.multihandler.shutdown_torrent(self.info_hash)
+
+ def _shutdown(self):
+ if not self.finished:
+ self.finished = True
+ self.running = False
+ self.rawserver.kill_tasks(self.info_hash)
+ if self.handler:
+ self.handler.close_all()
+
+ def _external_connection_made(self, c, options, already_read):
+ if self.running:
+ c.set_handler(self.handler)
+ self.handler.externally_handshaked_connection_made(
+ c, options, already_read)
+
+ ### RawServer functions ###
+
+ def add_task(self, func, delay=0, id = default_task_id):
+ if id is default_task_id:
+ id = self.info_hash
+ if not self.finished:
+ self.rawserver.add_task(func, delay, id)
+
+# def bind(self, port, bind = '', reuse = False):
+# pass # not handled here
+
+ def start_connection(self, dns, handler = None):
+ if not handler:
+ handler = self.handler
+ c = self.rawserver.start_connection(dns, handler)
+ return c
+
+# def listen_forever(self, handler):
+# pass # don't call with this
+
+ def start_listening(self, handler):
+ self.handler = handler
+ self.running = True
+ return self.shutdown # obviously, doesn't listen forever
+
+ def is_finished(self):
+ return self.finished
+
+ def get_exception_flag(self):
+ return self.rawserver.get_exception_flag()
+
+
+class NewSocketHandler: # hand a new socket off where it belongs
+ def __init__(self, multihandler, connection):
+ self.multihandler = multihandler
+ self.connection = connection
+ connection.set_handler(self)
+ self.closed = False
+ self.buffer = StringIO()
+ self.complete = False
+ self.next_len, self.next_func = 1, self.read_header_len
+ self.multihandler.rawserver.add_task(self._auto_close, 15)
+
+ def _auto_close(self):
+ if not self.complete:
+ self.close()
+
+ def close(self):
+ if not self.closed:
+ self.connection.close()
+ self.closed = True
+
+
+# header format:
+# connection.write(chr(len(protocol_name)) + protocol_name +
+# (chr(0) * 8) + self.encrypter.download_id + self.encrypter.my_id)
+
+ # copied from Encrypter and modified
+
+ def read_header_len(self, s):
+ l = ord(s)
+ return l, self.read_header
+
+ def read_header(self, s):
+ self.protocol = s
+ return 8, self.read_reserved
+
+ def read_reserved(self, s):
+ self.options = s
+ return 20, self.read_download_id
+
+ def read_download_id(self, s):
+ if self.multihandler.singlerawservers.has_key(s):
+ if self.multihandler.singlerawservers[s].protocol == self.protocol:
+ return True
+ return None
+
+ def read_dead(self, s):
+ return None
+
+ def data_came_in(self, garbage, s):
+ while True:
+ if self.closed:
+ return
+ i = self.next_len - self.buffer.tell()
+ if i > len(s):
+ self.buffer.write(s)
+ return
+ self.buffer.write(s[:i])
+ s = s[i:]
+ m = self.buffer.getvalue()
+ self.buffer.reset()
+ self.buffer.truncate()
+ try:
+ x = self.next_func(m)
+ except:
+ self.next_len, self.next_func = 1, self.read_dead
+ raise
+ if x is None:
+ self.close()
+ return
+ if x == True: # ready to process
+ self.multihandler.singlerawservers[m]._external_connection_made(
+ self.connection, self.options, s)
+ self.complete = True
+ return
+ self.next_len, self.next_func = x
+
+ def connection_flushed(self, ss):
+ pass
+
+ def connection_lost(self, ss):
+ self.closed = True
+
+class MultiHandler:
+ def __init__(self, rawserver, doneflag):
+ self.rawserver = rawserver
+ self.masterdoneflag = doneflag
+ self.singlerawservers = {}
+ self.connections = {}
+ self.taskqueues = {}
+
+ def newRawServer(self, info_hash, doneflag, protocol=protocol_name):
+ new = SingleRawServer(info_hash, self, doneflag, protocol)
+ self.singlerawservers[info_hash] = new
+ return new
+
+ def shutdown_torrent(self, info_hash):
+ self.singlerawservers[info_hash]._shutdown()
+ del self.singlerawservers[info_hash]
+
+ def listen_forever(self):
+ self.rawserver.listen_forever(self)
+ for srs in self.singlerawservers.values():
+ srs.finished = True
+ srs.running = False
+ srs.doneflag.set()
+
+ ### RawServer handler functions ###
+ # be wary of name collisions
+
+ def external_connection_made(self, ss):
+ NewSocketHandler(self, ss)
diff --git a/BitTornado/SocketHandler.py b/BitTornado/SocketHandler.py
new file mode 100644
index 000000000..7a4372d7d
--- /dev/null
+++ b/BitTornado/SocketHandler.py
@@ -0,0 +1,375 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+import socket
+from errno import EWOULDBLOCK, ECONNREFUSED, EHOSTUNREACH
+try:
+ from select import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
+ timemult = 1000
+except ImportError:
+ from selectpoll import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
+ timemult = 1
+from time import sleep
+from clock import clock
+import sys
+from random import shuffle, randrange
+from natpunch import UPnP_open_port, UPnP_close_port
+# from BT1.StreamCheck import StreamCheck
+# import inspect
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+all = POLLIN | POLLOUT
+
+UPnP_ERROR = "unable to forward port via UPnP"
+
+class SingleSocket:
+ def __init__(self, socket_handler, sock, handler, ip = None):
+ self.socket_handler = socket_handler
+ self.socket = sock
+ self.handler = handler
+ self.buffer = []
+ self.last_hit = clock()
+ self.fileno = sock.fileno()
+ self.connected = False
+ self.skipped = 0
+# self.check = StreamCheck()
+ try:
+ self.ip = self.socket.getpeername()[0]
+ except:
+ if ip is None:
+ self.ip = 'unknown'
+ else:
+ self.ip = ip
+
+ def get_ip(self, real=False):
+ if real:
+ try:
+ self.ip = self.socket.getpeername()[0]
+ except:
+ pass
+ return self.ip
+
+ def close(self):
+ '''
+ for x in xrange(5,0,-1):
+ try:
+ f = inspect.currentframe(x).f_code
+ print (f.co_filename,f.co_firstlineno,f.co_name)
+ del f
+ except:
+ pass
+ print ''
+ '''
+ assert self.socket
+ self.connected = False
+ sock = self.socket
+ self.socket = None
+ self.buffer = []
+ del self.socket_handler.single_sockets[self.fileno]
+ self.socket_handler.poll.unregister(sock)
+ sock.close()
+
+ def shutdown(self, val):
+ self.socket.shutdown(val)
+
+ def is_flushed(self):
+ return not self.buffer
+
+ def write(self, s):
+# self.check.write(s)
+ assert self.socket is not None
+ self.buffer.append(s)
+ if len(self.buffer) == 1:
+ self.try_write()
+
+ def try_write(self):
+ if self.connected:
+ dead = False
+ try:
+ while self.buffer:
+ buf = self.buffer[0]
+ amount = self.socket.send(buf)
+ if amount == 0:
+ self.skipped += 1
+ break
+ self.skipped = 0
+ if amount != len(buf):
+ self.buffer[0] = buf[amount:]
+ break
+ del self.buffer[0]
+ except socket.error, e:
+ try:
+ dead = e[0] != EWOULDBLOCK
+ except:
+ dead = True
+ self.skipped += 1
+ if self.skipped >= 3:
+ dead = True
+ if dead:
+ self.socket_handler.dead_from_write.append(self)
+ return
+ if self.buffer:
+ self.socket_handler.poll.register(self.socket, all)
+ else:
+ self.socket_handler.poll.register(self.socket, POLLIN)
+
+ def set_handler(self, handler):
+ self.handler = handler
+
+class SocketHandler:
+ def __init__(self, timeout, ipv6_enable, readsize = 100000):
+ self.timeout = timeout
+ self.ipv6_enable = ipv6_enable
+ self.readsize = readsize
+ self.poll = poll()
+ # {socket: SingleSocket}
+ self.single_sockets = {}
+ self.dead_from_write = []
+ self.max_connects = 1000
+ self.port_forwarded = None
+ self.servers = {}
+
+ def scan_for_timeouts(self):
+ t = clock() - self.timeout
+ tokill = []
+ for s in self.single_sockets.values():
+ if s.last_hit < t:
+ tokill.append(s)
+ for k in tokill:
+ if k.socket is not None:
+ self._close_socket(k)
+
+ def bind(self, port, bind = '', reuse = False, ipv6_socket_style = 1, upnp = 0):
+ port = int(port)
+ addrinfos = []
+ self.servers = {}
+ self.interfaces = []
+ # if bind != "" thread it as a comma seperated list and bind to all
+ # addresses (can be ips or hostnames) else bind to default ipv6 and
+ # ipv4 address
+ if bind:
+ if self.ipv6_enable:
+ socktype = socket.AF_UNSPEC
+ else:
+ socktype = socket.AF_INET
+ bind = bind.split(',')
+ for addr in bind:
+ if sys.version_info < (2,2):
+ addrinfos.append((socket.AF_INET, None, None, None, (addr, port)))
+ else:
+ addrinfos.extend(socket.getaddrinfo(addr, port,
+ socktype, socket.SOCK_STREAM))
+ else:
+ if self.ipv6_enable:
+ addrinfos.append([socket.AF_INET6, None, None, None, ('', port)])
+ if not addrinfos or ipv6_socket_style != 0:
+ addrinfos.append([socket.AF_INET, None, None, None, ('', port)])
+ for addrinfo in addrinfos:
+ try:
+ server = socket.socket(addrinfo[0], socket.SOCK_STREAM)
+ if reuse:
+ server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ server.setblocking(0)
+ server.bind(addrinfo[4])
+ self.servers[server.fileno()] = server
+ if bind:
+ self.interfaces.append(server.getsockname()[0])
+ server.listen(64)
+ self.poll.register(server, POLLIN)
+ except socket.error, e:
+ for server in self.servers.values():
+ try:
+ server.close()
+ except:
+ pass
+ if self.ipv6_enable and ipv6_socket_style == 0 and self.servers:
+ raise socket.error('blocked port (may require ipv6_binds_v4 to be set)')
+ raise socket.error(str(e))
+ if not self.servers:
+ raise socket.error('unable to open server port')
+ if upnp:
+ if not UPnP_open_port(port):
+ for server in self.servers.values():
+ try:
+ server.close()
+ except:
+ pass
+ self.servers = None
+ self.interfaces = None
+ raise socket.error(UPnP_ERROR)
+ self.port_forwarded = port
+ self.port = port
+
+ def find_and_bind(self, minport, maxport, bind = '', reuse = False,
+ ipv6_socket_style = 1, upnp = 0, randomizer = False):
+ e = 'maxport less than minport - no ports to check'
+ if maxport-minport < 50 or not randomizer:
+ portrange = range(minport, maxport+1)
+ if randomizer:
+ shuffle(portrange)
+ portrange = portrange[:20] # check a maximum of 20 ports
+ else:
+ portrange = []
+ while len(portrange) < 20:
+ listen_port = randrange(minport, maxport+1)
+ if not listen_port in portrange:
+ portrange.append(listen_port)
+ for listen_port in portrange:
+ try:
+ self.bind(listen_port, bind,
+ ipv6_socket_style = ipv6_socket_style, upnp = upnp)
+ return listen_port
+ except socket.error, e:
+ pass
+ raise socket.error(str(e))
+
+
+ def set_handler(self, handler):
+ self.handler = handler
+
+
+ def start_connection_raw(self, dns, socktype = socket.AF_INET, handler = None):
+ if handler is None:
+ handler = self.handler
+ sock = socket.socket(socktype, socket.SOCK_STREAM)
+ sock.setblocking(0)
+ try:
+ sock.connect_ex(dns)
+ except socket.error:
+ raise
+ except Exception, e:
+ raise socket.error(str(e))
+ self.poll.register(sock, POLLIN)
+ s = SingleSocket(self, sock, handler, dns[0])
+ self.single_sockets[sock.fileno()] = s
+ return s
+
+
+ def start_connection(self, dns, handler = None, randomize = False):
+ if handler is None:
+ handler = self.handler
+ if sys.version_info < (2,2):
+ s = self.start_connection_raw(dns,socket.AF_INET,handler)
+ else:
+ if self.ipv6_enable:
+ socktype = socket.AF_UNSPEC
+ else:
+ socktype = socket.AF_INET
+ try:
+ addrinfos = socket.getaddrinfo(dns[0], int(dns[1]),
+ socktype, socket.SOCK_STREAM)
+ except socket.error, e:
+ raise
+ except Exception, e:
+ raise socket.error(str(e))
+ if randomize:
+ shuffle(addrinfos)
+ for addrinfo in addrinfos:
+ try:
+ s = self.start_connection_raw(addrinfo[4],addrinfo[0],handler)
+ break
+ except:
+ pass
+ else:
+ raise socket.error('unable to connect')
+ return s
+
+
+ def _sleep(self):
+ sleep(1)
+
+ def handle_events(self, events):
+ for sock, event in events:
+ s = self.servers.get(sock)
+ if s:
+ if event & (POLLHUP | POLLERR) != 0:
+ self.poll.unregister(s)
+ s.close()
+ del self.servers[sock]
+ print "lost server socket"
+ elif len(self.single_sockets) < self.max_connects:
+ try:
+ newsock, addr = s.accept()
+ newsock.setblocking(0)
+ nss = SingleSocket(self, newsock, self.handler)
+ self.single_sockets[newsock.fileno()] = nss
+ self.poll.register(newsock, POLLIN)
+ self.handler.external_connection_made(nss)
+ except socket.error:
+ self._sleep()
+ else:
+ s = self.single_sockets.get(sock)
+ if not s:
+ continue
+ s.connected = True
+ if (event & (POLLHUP | POLLERR)):
+ self._close_socket(s)
+ continue
+ if (event & POLLIN):
+ try:
+ s.last_hit = clock()
+ data = s.socket.recv(100000)
+ if not data:
+ self._close_socket(s)
+ else:
+ s.handler.data_came_in(s, data)
+ except socket.error, e:
+ code, msg = e
+ if code != EWOULDBLOCK:
+ self._close_socket(s)
+ continue
+ if (event & POLLOUT) and s.socket and not s.is_flushed():
+ s.try_write()
+ if s.is_flushed():
+ s.handler.connection_flushed(s)
+
+ def close_dead(self):
+ while self.dead_from_write:
+ old = self.dead_from_write
+ self.dead_from_write = []
+ for s in old:
+ if s.socket:
+ self._close_socket(s)
+
+ def _close_socket(self, s):
+ s.close()
+ s.handler.connection_lost(s)
+
+ def do_poll(self, t):
+ r = self.poll.poll(t*timemult)
+ if r is None:
+ connects = len(self.single_sockets)
+ to_close = int(connects*0.05)+1 # close 5% of sockets
+ self.max_connects = connects-to_close
+ closelist = self.single_sockets.values()
+ shuffle(closelist)
+ closelist = closelist[:to_close]
+ for sock in closelist:
+ self._close_socket(sock)
+ return []
+ return r
+
+ def get_stats(self):
+ return { 'interfaces': self.interfaces,
+ 'port': self.port,
+ 'upnp': self.port_forwarded is not None }
+
+
+ def shutdown(self):
+ for ss in self.single_sockets.values():
+ try:
+ ss.close()
+ except:
+ pass
+ for server in self.servers.values():
+ try:
+ server.close()
+ except:
+ pass
+ if self.port_forwarded is not None:
+ UPnP_close_port(self.port_forwarded)
+
diff --git a/BitTornado/__init__.py b/BitTornado/__init__.py
new file mode 100644
index 000000000..0802f02ac
--- /dev/null
+++ b/BitTornado/__init__.py
@@ -0,0 +1,63 @@
+product_name = 'BitTornado'
+version_short = 'T-0.3.17'
+
+version = version_short+' ('+product_name+')'
+report_email = version_short+'@degreez.net'
+
+from types import StringType
+from sha import sha
+from time import time, clock
+try:
+ from os import getpid
+except ImportError:
+ def getpid():
+ return 1
+
+mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
+
+_idprefix = version_short[0]
+for subver in version_short[2:].split('.'):
+ try:
+ subver = int(subver)
+ except:
+ subver = 0
+ _idprefix += mapbase64[subver]
+_idprefix += ('-' * (6-len(_idprefix)))
+_idrandom = [None]
+
+def resetPeerIDs():
+ try:
+ f = open('/dev/urandom','rb')
+ x = f.read(20)
+ f.close()
+ except:
+ x = ''
+
+ l1 = 0
+ t = clock()
+ while t == clock():
+ l1 += 1
+ l2 = 0
+ t = long(time()*100)
+ while t == long(time()*100):
+ l2 += 1
+ l3 = 0
+ if l2 < 1000:
+ t = long(time()*10)
+ while t == long(clock()*10):
+ l3 += 1
+ x += ( repr(time()) + '/' + str(time()) + '/'
+ + str(l1) + '/' + str(l2) + '/' + str(l3) + '/'
+ + str(getpid()) )
+
+ s = ''
+ for i in sha(x).digest()[-11:]:
+ s += mapbase64[ord(i) & 0x3F]
+ _idrandom[0] = s
+
+resetPeerIDs()
+
+def createPeerID(ins = '---'):
+ assert type(ins) is StringType
+ assert len(ins) == 3
+ return _idprefix + ins + _idrandom[0]
diff --git a/BitTornado/bencode.py b/BitTornado/bencode.py
new file mode 100644
index 000000000..b4f9bb85c
--- /dev/null
+++ b/BitTornado/bencode.py
@@ -0,0 +1,319 @@
+# Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman
+# see LICENSE.txt for license information
+
+from types import IntType, LongType, StringType, ListType, TupleType, DictType
+try:
+ from types import BooleanType
+except ImportError:
+ BooleanType = None
+try:
+ from types import UnicodeType
+except ImportError:
+ UnicodeType = None
+from cStringIO import StringIO
+
+def decode_int(x, f):
+ f += 1
+ newf = x.index('e', f)
+ try:
+ n = int(x[f:newf])
+ except:
+ n = long(x[f:newf])
+ if x[f] == '-':
+ if x[f + 1] == '0':
+ raise ValueError
+ elif x[f] == '0' and newf != f+1:
+ raise ValueError
+ return (n, newf+1)
+
+def decode_string(x, f):
+ colon = x.index(':', f)
+ try:
+ n = int(x[f:colon])
+ except (OverflowError, ValueError):
+ n = long(x[f:colon])
+ if x[f] == '0' and colon != f+1:
+ raise ValueError
+ colon += 1
+ return (x[colon:colon+n], colon+n)
+
+def decode_unicode(x, f):
+ s, f = decode_string(x, f+1)
+ return (s.decode('UTF-8'),f)
+
+def decode_list(x, f):
+ r, f = [], f+1
+ while x[f] != 'e':
+ v, f = decode_func[x[f]](x, f)
+ r.append(v)
+ return (r, f + 1)
+
+def decode_dict(x, f):
+ r, f = {}, f+1
+ lastkey = None
+ while x[f] != 'e':
+ k, f = decode_string(x, f)
+ if lastkey >= k:
+ raise ValueError
+ lastkey = k
+ r[k], f = decode_func[x[f]](x, f)
+ return (r, f + 1)
+
+decode_func = {}
+decode_func['l'] = decode_list
+decode_func['d'] = decode_dict
+decode_func['i'] = decode_int
+decode_func['0'] = decode_string
+decode_func['1'] = decode_string
+decode_func['2'] = decode_string
+decode_func['3'] = decode_string
+decode_func['4'] = decode_string
+decode_func['5'] = decode_string
+decode_func['6'] = decode_string
+decode_func['7'] = decode_string
+decode_func['8'] = decode_string
+decode_func['9'] = decode_string
+#decode_func['u'] = decode_unicode
+
+def bdecode(x, sloppy = 0):
+ try:
+ r, l = decode_func[x[0]](x, 0)
+# except (IndexError, KeyError):
+ except (IndexError, KeyError, ValueError):
+ raise ValueError, "bad bencoded data"
+ if not sloppy and l != len(x):
+ raise ValueError, "bad bencoded data"
+ return r
+
+def test_bdecode():
+ try:
+ bdecode('0:0:')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('ie')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('i341foo382e')
+ assert 0
+ except ValueError:
+ pass
+ assert bdecode('i4e') == 4L
+ assert bdecode('i0e') == 0L
+ assert bdecode('i123456789e') == 123456789L
+ assert bdecode('i-10e') == -10L
+ try:
+ bdecode('i-0e')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('i123')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('i6easd')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('35208734823ljdahflajhdf')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('2:abfdjslhfld')
+ assert 0
+ except ValueError:
+ pass
+ assert bdecode('0:') == ''
+ assert bdecode('3:abc') == 'abc'
+ assert bdecode('10:1234567890') == '1234567890'
+ try:
+ bdecode('02:xy')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('l')
+ assert 0
+ except ValueError:
+ pass
+ assert bdecode('le') == []
+ try:
+ bdecode('leanfdldjfh')
+ assert 0
+ except ValueError:
+ pass
+ assert bdecode('l0:0:0:e') == ['', '', '']
+ try:
+ bdecode('relwjhrlewjh')
+ assert 0
+ except ValueError:
+ pass
+ assert bdecode('li1ei2ei3ee') == [1, 2, 3]
+ assert bdecode('l3:asd2:xye') == ['asd', 'xy']
+ assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]]
+ try:
+ bdecode('d')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('defoobar')
+ assert 0
+ except ValueError:
+ pass
+ assert bdecode('de') == {}
+ assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'}
+ assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}}
+ try:
+ bdecode('d3:fooe')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('di1e0:e')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('d1:b0:1:a0:e')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('d1:a0:1:a0:e')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('i03e')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('l01:ae')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('9999:x')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('l0:')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('d0:0:')
+ assert 0
+ except ValueError:
+ pass
+ try:
+ bdecode('d0:')
+ assert 0
+ except ValueError:
+ pass
+
+bencached_marker = []
+
+class Bencached:
+ def __init__(self, s):
+ self.marker = bencached_marker
+ self.bencoded = s
+
+BencachedType = type(Bencached('')) # insufficient, but good as a filter
+
+def encode_bencached(x,r):
+ assert x.marker == bencached_marker
+ r.append(x.bencoded)
+
+def encode_int(x,r):
+ r.extend(('i',str(x),'e'))
+
+def encode_bool(x,r):
+ encode_int(int(x),r)
+
+def encode_string(x,r):
+ r.extend((str(len(x)),':',x))
+
+def encode_unicode(x,r):
+ #r.append('u')
+ encode_string(x.encode('UTF-8'),r)
+
+def encode_list(x,r):
+ r.append('l')
+ for e in x:
+ encode_func[type(e)](e, r)
+ r.append('e')
+
+def encode_dict(x,r):
+ r.append('d')
+ ilist = x.items()
+ ilist.sort()
+ for k,v in ilist:
+ r.extend((str(len(k)),':',k))
+ encode_func[type(v)](v, r)
+ r.append('e')
+
+encode_func = {}
+encode_func[BencachedType] = encode_bencached
+encode_func[IntType] = encode_int
+encode_func[LongType] = encode_int
+encode_func[StringType] = encode_string
+encode_func[ListType] = encode_list
+encode_func[TupleType] = encode_list
+encode_func[DictType] = encode_dict
+if BooleanType:
+ encode_func[BooleanType] = encode_bool
+if UnicodeType:
+ encode_func[UnicodeType] = encode_unicode
+
+def bencode(x):
+ r = []
+ try:
+ encode_func[type(x)](x, r)
+ except:
+ print "*** error *** could not encode type %s (value: %s)" % (type(x), x)
+ assert 0
+ return ''.join(r)
+
+def test_bencode():
+ assert bencode(4) == 'i4e'
+ assert bencode(0) == 'i0e'
+ assert bencode(-10) == 'i-10e'
+ assert bencode(12345678901234567890L) == 'i12345678901234567890e'
+ assert bencode('') == '0:'
+ assert bencode('abc') == '3:abc'
+ assert bencode('1234567890') == '10:1234567890'
+ assert bencode([]) == 'le'
+ assert bencode([1, 2, 3]) == 'li1ei2ei3ee'
+ assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee'
+ assert bencode({}) == 'de'
+ assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee'
+ assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee'
+ try:
+ bencode({1: 'foo'})
+ assert 0
+ except AssertionError:
+ pass
+
+
+try:
+ import psyco
+ psyco.bind(bdecode)
+ psyco.bind(bencode)
+except ImportError:
+ pass
diff --git a/BitTornado/bitfield.py b/BitTornado/bitfield.py
new file mode 100644
index 000000000..7788b08f7
--- /dev/null
+++ b/BitTornado/bitfield.py
@@ -0,0 +1,162 @@
+# Written by Bram Cohen, Uoti Urpala, and John Hoffman
+# see LICENSE.txt for license information
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+ bool = lambda x: not not x
+
+try:
+ sum([1])
+ negsum = lambda a: len(a)-sum(a)
+except:
+ negsum = lambda a: reduce(lambda x,y: x+(not y), a, 0)
+
+def _int_to_booleans(x):
+ r = []
+ for i in range(8):
+ r.append(bool(x & 0x80))
+ x <<= 1
+ return tuple(r)
+
+lookup_table = []
+reverse_lookup_table = {}
+for i in xrange(256):
+ x = _int_to_booleans(i)
+ lookup_table.append(x)
+ reverse_lookup_table[x] = chr(i)
+
+
+class Bitfield:
+ def __init__(self, length = None, bitstring = None, copyfrom = None):
+ if copyfrom is not None:
+ self.length = copyfrom.length
+ self.array = copyfrom.array[:]
+ self.numfalse = copyfrom.numfalse
+ return
+ if length is None:
+ raise ValueError, "length must be provided unless copying from another array"
+ self.length = length
+ if bitstring is not None:
+ extra = len(bitstring) * 8 - length
+ if extra < 0 or extra >= 8:
+ raise ValueError
+ t = lookup_table
+ r = []
+ for c in bitstring:
+ r.extend(t[ord(c)])
+ if extra > 0:
+ if r[-extra:] != [0] * extra:
+ raise ValueError
+ del r[-extra:]
+ self.array = r
+ self.numfalse = negsum(r)
+ else:
+ self.array = [False] * length
+ self.numfalse = length
+
+ def __setitem__(self, index, val):
+ val = bool(val)
+ self.numfalse += self.array[index]-val
+ self.array[index] = val
+
+ def __getitem__(self, index):
+ return self.array[index]
+
+ def __len__(self):
+ return self.length
+
+ def tostring(self):
+ booleans = self.array
+ t = reverse_lookup_table
+ s = len(booleans) % 8
+ r = [ t[tuple(booleans[x:x+8])] for x in xrange(0, len(booleans)-s, 8) ]
+ if s:
+ r += t[tuple(booleans[-s:] + ([0] * (8-s)))]
+ return ''.join(r)
+
+ def complete(self):
+ return not self.numfalse
+
+
+def test_bitfield():
+ try:
+ x = Bitfield(7, 'ab')
+ assert False
+ except ValueError:
+ pass
+ try:
+ x = Bitfield(7, 'ab')
+ assert False
+ except ValueError:
+ pass
+ try:
+ x = Bitfield(9, 'abc')
+ assert False
+ except ValueError:
+ pass
+ try:
+ x = Bitfield(0, 'a')
+ assert False
+ except ValueError:
+ pass
+ try:
+ x = Bitfield(1, '')
+ assert False
+ except ValueError:
+ pass
+ try:
+ x = Bitfield(7, '')
+ assert False
+ except ValueError:
+ pass
+ try:
+ x = Bitfield(8, '')
+ assert False
+ except ValueError:
+ pass
+ try:
+ x = Bitfield(9, 'a')
+ assert False
+ except ValueError:
+ pass
+ try:
+ x = Bitfield(7, chr(1))
+ assert False
+ except ValueError:
+ pass
+ try:
+ x = Bitfield(9, chr(0) + chr(0x40))
+ assert False
+ except ValueError:
+ pass
+ assert Bitfield(0, '').tostring() == ''
+ assert Bitfield(1, chr(0x80)).tostring() == chr(0x80)
+ assert Bitfield(7, chr(0x02)).tostring() == chr(0x02)
+ assert Bitfield(8, chr(0xFF)).tostring() == chr(0xFF)
+ assert Bitfield(9, chr(0) + chr(0x80)).tostring() == chr(0) + chr(0x80)
+ x = Bitfield(1)
+ assert x.numfalse == 1
+ x[0] = 1
+ assert x.numfalse == 0
+ x[0] = 1
+ assert x.numfalse == 0
+ assert x.tostring() == chr(0x80)
+ x = Bitfield(7)
+ assert len(x) == 7
+ x[6] = 1
+ assert x.numfalse == 6
+ assert x.tostring() == chr(0x02)
+ x = Bitfield(8)
+ x[7] = 1
+ assert x.tostring() == chr(1)
+ x = Bitfield(9)
+ x[8] = 1
+ assert x.numfalse == 8
+ assert x.tostring() == chr(0) + chr(0x80)
+ x = Bitfield(8, chr(0xC4))
+ assert len(x) == 8
+ assert x.numfalse == 5
+ assert x.tostring() == chr(0xC4)
diff --git a/BitTornado/clock.py b/BitTornado/clock.py
new file mode 100644
index 000000000..ad0cd6226
--- /dev/null
+++ b/BitTornado/clock.py
@@ -0,0 +1,27 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from time import *
+import sys
+
+_MAXFORWARD = 100
+_FUDGE = 1
+
+class RelativeTime:
+ def __init__(self):
+ self.time = time()
+ self.offset = 0
+
+ def get_time(self):
+ t = time() + self.offset
+ if t < self.time or t > self.time + _MAXFORWARD:
+ self.time += _FUDGE
+ self.offset += self.time - t
+ return self.time
+ self.time = t
+ return t
+
+if sys.platform != 'win32':
+ _RTIME = RelativeTime()
+ def clock():
+ return _RTIME.get_time()
\ No newline at end of file
diff --git a/BitTornado/download_bt1.py b/BitTornado/download_bt1.py
new file mode 100644
index 000000000..b1a584035
--- /dev/null
+++ b/BitTornado/download_bt1.py
@@ -0,0 +1,880 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from zurllib import urlopen
+from urlparse import urlparse
+from BT1.btformats import check_message
+from BT1.Choker import Choker
+from BT1.Storage import Storage
+from BT1.StorageWrapper import StorageWrapper
+from BT1.FileSelector import FileSelector
+from BT1.Uploader import Upload
+from BT1.Downloader import Downloader
+from BT1.HTTPDownloader import HTTPDownloader
+from BT1.Connecter import Connecter
+from RateLimiter import RateLimiter
+from BT1.Encrypter import Encoder
+from RawServer import RawServer, autodetect_ipv6, autodetect_socket_style
+from BT1.Rerequester import Rerequester
+from BT1.DownloaderFeedback import DownloaderFeedback
+from RateMeasure import RateMeasure
+from CurrentRateMeasure import Measure
+from BT1.PiecePicker import PiecePicker
+from BT1.Statistics import Statistics
+from ConfigDir import ConfigDir
+from bencode import bencode, bdecode
+from natpunch import UPnP_test
+from sha import sha
+from os import path, makedirs, listdir
+from parseargs import parseargs, formatDefinitions, defaultargs
+from socket import error as socketerror
+from random import seed
+from threading import Thread, Event
+from clock import clock
+from __init__ import createPeerID
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+defaults = [
+ ('max_uploads', 0,
+ "the maximum number of uploads to allow at once."),
+ ('keepalive_interval', 120.0,
+ 'number of seconds to pause between sending keepalives'),
+ ('download_slice_size', 2 ** 14,
+ "How many bytes to query for per request."),
+ ('upload_unit_size', 1460,
+ "when limiting upload rate, how many bytes to send at a time"),
+ ('request_backlog', 10,
+ "maximum number of requests to keep in a single pipe at once."),
+ ('max_message_length', 2 ** 23,
+ "maximum length prefix encoding you'll accept over the wire - larger values get the connection dropped."),
+ ('ip', '',
+ "ip to report you have to the tracker."),
+ ('minport', 10000, 'minimum port to listen on, counts up if unavailable'),
+ ('maxport', 60000, 'maximum port to listen on'),
+ ('random_port', 1, 'whether to choose randomly inside the port range ' +
+ 'instead of counting up linearly'),
+ ('responsefile', '',
+ 'file the server response was stored in, alternative to url'),
+ ('url', '',
+ 'url to get file from, alternative to responsefile'),
+ ('selector_enabled', 1,
+ 'whether to enable the file selector and fast resume function'),
+ ('expire_cache_data', 10,
+ 'the number of days after which you wish to expire old cache data ' +
+ '(0 = disabled)'),
+ ('priority', '',
+ 'a list of file priorities separated by commas, must be one per file, ' +
+ '0 = highest, 1 = normal, 2 = lowest, -1 = download disabled'),
+ ('saveas', '',
+ 'local file name to save the file as, null indicates query user'),
+ ('timeout', 300.0,
+ 'time to wait between closing sockets which nothing has been received on'),
+ ('timeout_check_interval', 60.0,
+ 'time to wait between checking if any connections have timed out'),
+ ('max_slice_length', 2 ** 17,
+ "maximum length slice to send to peers, larger requests are ignored"),
+ ('max_rate_period', 20.0,
+ "maximum amount of time to guess the current rate estimate represents"),
+ ('bind', '',
+ 'comma-separated list of ips/hostnames to bind to locally'),
+# ('ipv6_enabled', autodetect_ipv6(),
+ ('ipv6_enabled', 0,
+ 'allow the client to connect to peers via IPv6'),
+ ('ipv6_binds_v4', autodetect_socket_style(),
+ "set if an IPv6 server socket won't also field IPv4 connections"),
+ ('upnp_nat_access', 1,
+ 'attempt to autoconfigure a UPnP router to forward a server port ' +
+ '(0 = disabled, 1 = mode 1 [fast], 2 = mode 2 [slow])'),
+ ('upload_rate_fudge', 5.0,
+ 'time equivalent of writing to kernel-level TCP buffer, for rate adjustment'),
+ ('tcp_ack_fudge', 0.03,
+ 'how much TCP ACK download overhead to add to upload rate calculations ' +
+ '(0 = disabled)'),
+ ('display_interval', .5,
+ 'time between updates of displayed information'),
+ ('rerequest_interval', 5 * 60,
+ 'time to wait between requesting more peers'),
+ ('min_peers', 20,
+ 'minimum number of peers to not do rerequesting'),
+ ('http_timeout', 60,
+ 'number of seconds to wait before assuming that an http connection has timed out'),
+ ('max_initiate', 40,
+ 'number of peers at which to stop initiating new connections'),
+ ('check_hashes', 1,
+ 'whether to check hashes on disk'),
+ ('max_upload_rate', 0,
+ 'maximum kB/s to upload at (0 = no limit, -1 = automatic)'),
+ ('max_download_rate', 0,
+ 'maximum kB/s to download at (0 = no limit)'),
+ ('alloc_type', 'normal',
+ 'allocation type (may be normal, background, pre-allocate or sparse)'),
+ ('alloc_rate', 2.0,
+ 'rate (in MiB/s) to allocate space at using background allocation'),
+ ('buffer_reads', 1,
+ 'whether to buffer disk reads'),
+ ('write_buffer_size', 4,
+ 'the maximum amount of space to use for buffering disk writes ' +
+ '(in megabytes, 0 = disabled)'),
+ ('breakup_seed_bitfield', 1,
+ 'sends an incomplete bitfield and then fills with have messages, '
+ 'in order to get around stupid ISP manipulation'),
+ ('snub_time', 30.0,
+ "seconds to wait for data to come in over a connection before assuming it's semi-permanently choked"),
+ ('spew', 0,
+ "whether to display diagnostic info to stdout"),
+ ('rarest_first_cutoff', 2,
+ "number of downloads at which to switch from random to rarest first"),
+ ('rarest_first_priority_cutoff', 5,
+ 'the number of peers which need to have a piece before other partials take priority over rarest first'),
+ ('min_uploads', 4,
+ "the number of uploads to fill out to with extra optimistic unchokes"),
+ ('max_files_open', 20,
+ 'the maximum number of files to keep open at a time, 0 means no limit'),
+ ('round_robin_period', 30,
+ "the number of seconds between the client's switching upload targets"),
+ ('super_seeder', 0,
+ "whether to use special upload-efficiency-maximizing routines (only for dedicated seeds)"),
+ ('security', 1,
+ "whether to enable extra security features intended to prevent abuse"),
+ ('max_connections', 50,
+ "the absolute maximum number of peers to connect with (0 = no limit)"),
+ ('auto_kick', 1,
+ "whether to allow the client to automatically kick/ban peers that send bad data"),
+ ('double_check', 1,
+ "whether to double-check data being written to the disk for errors (may increase CPU load)"),
+ ('triple_check', 0,
+ "whether to thoroughly check data being written to the disk (may slow disk access)"),
+ ('lock_files', 1,
+ "whether to lock files the client is working with"),
+ ('lock_while_reading', 0,
+ "whether to lock access to files being read"),
+ ('auto_flush', 0,
+ "minutes between automatic flushes to disk (0 = disabled)"),
+ ('dedicated_seed_id', '',
+ "code to send to tracker identifying as a dedicated seed"),
+ ]
+
+argslistheader = 'Arguments are:\n\n'
+
+
+def _failfunc(x):
+ print x
+
+# old-style downloader
+def download(params, filefunc, statusfunc, finfunc, errorfunc, doneflag, cols,
+ pathFunc = None, presets = {}, exchandler = None,
+ failed = _failfunc, paramfunc = None):
+
+ try:
+ config = parse_params(params, presets)
+ except ValueError, e:
+ failed('error: ' + str(e) + '\nrun with no args for parameter explanations')
+ return
+ if not config:
+ errorfunc(get_usage())
+ return
+
+ myid = createPeerID()
+ seed(myid)
+
+ rawserver = RawServer(doneflag, config['timeout_check_interval'],
+ config['timeout'], ipv6_enable = config['ipv6_enabled'],
+ failfunc = failed, errorfunc = exchandler)
+
+ upnp_type = UPnP_test(config['upnp_nat_access'])
+ try:
+ listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
+ config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
+ upnp = upnp_type, randomizer = config['random_port'])
+ except socketerror, e:
+ failed("Couldn't listen - " + str(e))
+ return
+
+ response = get_response(config['responsefile'], config['url'], failed)
+ if not response:
+ return
+
+ infohash = sha(bencode(response['info'])).digest()
+
+ d = BT1Download(statusfunc, finfunc, errorfunc, exchandler, doneflag,
+ config, response, infohash, myid, rawserver, listen_port)
+
+ if not d.saveAs(filefunc):
+ return
+
+ if pathFunc:
+ pathFunc(d.getFilename())
+
+ hashcheck = d.initFiles(old_style = True)
+ if not hashcheck:
+ return
+ if not hashcheck():
+ return
+ if not d.startEngine():
+ return
+ d.startRerequester()
+ d.autoStats()
+
+ statusfunc(activity = 'connecting to peers')
+
+ if paramfunc:
+ paramfunc({ 'max_upload_rate' : d.setUploadRate, # change_max_upload_rate()
+ 'max_uploads': d.setConns, # change_max_uploads()
+ 'listen_port' : listen_port, # int
+ 'peer_id' : myid, # string
+ 'info_hash' : infohash, # string
+ 'start_connection' : d._startConnection, # start_connection((, ), )
+ })
+
+ rawserver.listen_forever(d.getPortHandler())
+
+ d.shutdown()
+
+
+def parse_params(params, presets = {}):
+ config, args = parseargs(params, defaults, 0, 1, presets = presets)
+ if args:
+ if config['responsefile'] or config['url']:
+ raise ValueError,'must have responsefile or url as arg or parameter, not both'
+ if path.isfile(args[0]):
+ config['responsefile'] = args[0]
+ else:
+ try:
+ urlparse(args[0])
+ except:
+ raise ValueError, 'bad filename or url'
+ config['url'] = args[0]
+ elif (config['responsefile'] == '') == (config['url'] == ''):
+ raise ValueError, 'need responsefile or url, must have one, cannot have both'
+ return config
+
+
+def get_usage(defaults = defaults, cols = 100, presets = {}):
+ return (argslistheader + formatDefinitions(defaults, cols, presets))
+
+
+def get_response(file, url, errorfunc):
+ try:
+ if file:
+ h = open(file, 'rb')
+ try:
+ line = h.read(10) # quick test to see if responsefile contains a dict
+ front,garbage = line.split(':',1)
+ assert front[0] == 'd'
+ int(front[1:])
+ except:
+ errorfunc(file+' is not a valid responsefile')
+ return None
+ try:
+ h.seek(0)
+ except:
+ try:
+ h.close()
+ except:
+ pass
+ h = open(file, 'rb')
+ else:
+ try:
+ h = urlopen(url)
+ except:
+ errorfunc(url+' bad url')
+ return None
+ response = h.read()
+
+ except IOError, e:
+ errorfunc('problem getting response info - ' + str(e))
+ return None
+ try:
+ h.close()
+ except:
+ pass
+ try:
+ try:
+ response = bdecode(response)
+ except:
+ errorfunc("warning: bad data in responsefile")
+ response = bdecode(response, sloppy=1)
+ check_message(response)
+ except ValueError, e:
+ errorfunc("got bad file info - " + str(e))
+ return None
+
+ return response
+
+
+class BT1Download:
+ def __init__(self, statusfunc, finfunc, errorfunc, excfunc, doneflag,
+ config, response, infohash, id, rawserver, port,
+ appdataobj = None):
+ self.statusfunc = statusfunc
+ self.finfunc = finfunc
+ self.errorfunc = errorfunc
+ self.excfunc = excfunc
+ self.doneflag = doneflag
+ self.config = config
+ self.response = response
+ self.infohash = infohash
+ self.myid = id
+ self.rawserver = rawserver
+ self.port = port
+
+ self.info = self.response['info']
+ self.pieces = [self.info['pieces'][x:x+20]
+ for x in xrange(0, len(self.info['pieces']), 20)]
+ self.len_pieces = len(self.pieces)
+ self.argslistheader = argslistheader
+ self.unpauseflag = Event()
+ self.unpauseflag.set()
+ self.downloader = None
+ self.storagewrapper = None
+ self.fileselector = None
+ self.super_seeding_active = False
+ self.filedatflag = Event()
+ self.spewflag = Event()
+ self.superseedflag = Event()
+ self.whenpaused = None
+ self.finflag = Event()
+ self.rerequest = None
+ self.tcp_ack_fudge = config['tcp_ack_fudge']
+
+ self.selector_enabled = config['selector_enabled']
+ if appdataobj:
+ self.appdataobj = appdataobj
+ elif self.selector_enabled:
+ self.appdataobj = ConfigDir()
+ self.appdataobj.deleteOldCacheData( config['expire_cache_data'],
+ [self.infohash] )
+
+ self.excflag = self.rawserver.get_exception_flag()
+ self.failed = False
+ self.checking = False
+ self.started = False
+
+ self.picker = PiecePicker(self.len_pieces, config['rarest_first_cutoff'],
+ config['rarest_first_priority_cutoff'])
+ self.choker = Choker(config, rawserver.add_task,
+ self.picker, self.finflag.isSet)
+
+
+ def checkSaveLocation(self, loc):
+ if self.info.has_key('length'):
+ return path.exists(loc)
+ for x in self.info['files']:
+ if path.exists(path.join(loc, x['path'][0])):
+ return True
+ return False
+
+
+ def saveAs(self, filefunc, pathfunc = None):
+ try:
+ def make(f, forcedir = False):
+ if not forcedir:
+ f = path.split(f)[0]
+ if f != '' and not path.exists(f):
+ makedirs(f)
+
+ if self.info.has_key('length'):
+ file_length = self.info['length']
+ file = filefunc(self.info['name'], file_length,
+ self.config['saveas'], False)
+ if file is None:
+ return None
+ make(file)
+ files = [(file, file_length)]
+ else:
+ file_length = 0L
+ for x in self.info['files']:
+ file_length += x['length']
+ file = filefunc(self.info['name'], file_length,
+ self.config['saveas'], True)
+ if file is None:
+ return None
+
+ # if this path exists, and no files from the info dict exist, we assume it's a new download and
+ # the user wants to create a new directory with the default name
+ existing = 0
+ if path.exists(file):
+ if not path.isdir(file):
+ self.errorfunc(file + 'is not a dir')
+ return None
+ if len(listdir(file)) > 0: # if it's not empty
+ for x in self.info['files']:
+ if path.exists(path.join(file, x['path'][0])):
+ existing = 1
+ if not existing:
+ file = path.join(file, self.info['name'])
+ if path.exists(file) and not path.isdir(file):
+ if file[-8:] == '.torrent':
+ file = file[:-8]
+ if path.exists(file) and not path.isdir(file):
+ self.errorfunc("Can't create dir - " + self.info['name'])
+ return None
+ make(file, True)
+
+ # alert the UI to any possible change in path
+ if pathfunc != None:
+ pathfunc(file)
+
+ files = []
+ for x in self.info['files']:
+ n = file
+ for i in x['path']:
+ n = path.join(n, i)
+ files.append((n, x['length']))
+ make(n)
+ except OSError, e:
+ self.errorfunc("Couldn't allocate dir - " + str(e))
+ return None
+
+ self.filename = file
+ self.files = files
+ self.datalength = file_length
+
+ return file
+
+
+ def getFilename(self):
+ return self.filename
+
+
+ def _finished(self):
+ self.finflag.set()
+ try:
+ self.storage.set_readonly()
+ except (IOError, OSError), e:
+ self.errorfunc('trouble setting readonly at end - ' + str(e))
+ if self.superseedflag.isSet():
+ self._set_super_seed()
+ self.choker.set_round_robin_period(
+ max( self.config['round_robin_period'],
+ self.config['round_robin_period'] *
+ self.info['piece length'] / 200000 ) )
+ self.rerequest_complete()
+ self.finfunc()
+
+ def _data_flunked(self, amount, index):
+ self.ratemeasure_datarejected(amount)
+ if not self.doneflag.isSet():
+ self.errorfunc('piece %d failed hash check, re-downloading it' % index)
+
+ def _failed(self, reason):
+ self.failed = True
+ self.doneflag.set()
+ if reason is not None:
+ self.errorfunc(reason)
+
+
+ def initFiles(self, old_style = False, statusfunc = None):
+ if self.doneflag.isSet():
+ return None
+ if not statusfunc:
+ statusfunc = self.statusfunc
+
+ disabled_files = None
+ if self.selector_enabled:
+ self.priority = self.config['priority']
+ if self.priority:
+ try:
+ self.priority = self.priority.split(',')
+ assert len(self.priority) == len(self.files)
+ self.priority = [int(p) for p in self.priority]
+ for p in self.priority:
+ assert p >= -1
+ assert p <= 2
+ except:
+ self.errorfunc('bad priority list given, ignored')
+ self.priority = None
+
+ data = self.appdataobj.getTorrentData(self.infohash)
+ try:
+ d = data['resume data']['priority']
+ assert len(d) == len(self.files)
+ disabled_files = [x == -1 for x in d]
+ except:
+ try:
+ disabled_files = [x == -1 for x in self.priority]
+ except:
+ pass
+
+ try:
+ try:
+ self.storage = Storage(self.files, self.info['piece length'],
+ self.doneflag, self.config, disabled_files)
+ except IOError, e:
+ self.errorfunc('trouble accessing files - ' + str(e))
+ return None
+ if self.doneflag.isSet():
+ return None
+
+ self.storagewrapper = StorageWrapper(self.storage, self.config['download_slice_size'],
+ self.pieces, self.info['piece length'], self._finished, self._failed,
+ statusfunc, self.doneflag, self.config['check_hashes'],
+ self._data_flunked, self.rawserver.add_task,
+ self.config, self.unpauseflag)
+
+ except ValueError, e:
+ self._failed('bad data - ' + str(e))
+ except IOError, e:
+ self._failed('IOError - ' + str(e))
+ if self.doneflag.isSet():
+ return None
+
+ if self.selector_enabled:
+ self.fileselector = FileSelector(self.files, self.info['piece length'],
+ self.appdataobj.getPieceDir(self.infohash),
+ self.storage, self.storagewrapper,
+ self.rawserver.add_task,
+ self._failed)
+ if data:
+ data = data.get('resume data')
+ if data:
+ self.fileselector.unpickle(data)
+
+ self.checking = True
+ if old_style:
+ return self.storagewrapper.old_style_init()
+ return self.storagewrapper.initialize
+
+
+ def getCachedTorrentData(self):
+ return self.appdataobj.getTorrentData(self.infohash)
+
+
+ def _make_upload(self, connection, ratelimiter, totalup):
+ return Upload(connection, ratelimiter, totalup,
+ self.choker, self.storagewrapper, self.picker,
+ self.config)
+
+ def _kick_peer(self, connection):
+ def k(connection = connection):
+ connection.close()
+ self.rawserver.add_task(k,0)
+
+ def _ban_peer(self, ip):
+ self.encoder_ban(ip)
+
+ def _received_raw_data(self, x):
+ if self.tcp_ack_fudge:
+ x = int(x*self.tcp_ack_fudge)
+ self.ratelimiter.adjust_sent(x)
+# self.upmeasure.update_rate(x)
+
+ def _received_data(self, x):
+ self.downmeasure.update_rate(x)
+ self.ratemeasure.data_came_in(x)
+
+ def _received_http_data(self, x):
+ self.downmeasure.update_rate(x)
+ self.ratemeasure.data_came_in(x)
+ self.downloader.external_data_received(x)
+
+ def _cancelfunc(self, pieces):
+ self.downloader.cancel_piece_download(pieces)
+ self.httpdownloader.cancel_piece_download(pieces)
+ def _reqmorefunc(self, pieces):
+ self.downloader.requeue_piece_download(pieces)
+
+ def startEngine(self, ratelimiter = None, statusfunc = None):
+ if self.doneflag.isSet():
+ return False
+ if not statusfunc:
+ statusfunc = self.statusfunc
+
+ self.checking = False
+
+ for i in xrange(self.len_pieces):
+ if self.storagewrapper.do_I_have(i):
+ self.picker.complete(i)
+ self.upmeasure = Measure(self.config['max_rate_period'],
+ self.config['upload_rate_fudge'])
+ self.downmeasure = Measure(self.config['max_rate_period'])
+
+ if ratelimiter:
+ self.ratelimiter = ratelimiter
+ else:
+ self.ratelimiter = RateLimiter(self.rawserver.add_task,
+ self.config['upload_unit_size'],
+ self.setConns)
+ self.ratelimiter.set_upload_rate(self.config['max_upload_rate'])
+
+ self.ratemeasure = RateMeasure()
+ self.ratemeasure_datarejected = self.ratemeasure.data_rejected
+
+ self.downloader = Downloader(self.storagewrapper, self.picker,
+ self.config['request_backlog'], self.config['max_rate_period'],
+ self.len_pieces, self.config['download_slice_size'],
+ self._received_data, self.config['snub_time'], self.config['auto_kick'],
+ self._kick_peer, self._ban_peer)
+ self.downloader.set_download_rate(self.config['max_download_rate'])
+ self.connecter = Connecter(self._make_upload, self.downloader, self.choker,
+ self.len_pieces, self.upmeasure, self.config,
+ self.ratelimiter, self.rawserver.add_task)
+ self.encoder = Encoder(self.connecter, self.rawserver,
+ self.myid, self.config['max_message_length'], self.rawserver.add_task,
+ self.config['keepalive_interval'], self.infohash,
+ self._received_raw_data, self.config)
+ self.encoder_ban = self.encoder.ban
+
+ self.httpdownloader = HTTPDownloader(self.storagewrapper, self.picker,
+ self.rawserver, self.finflag, self.errorfunc, self.downloader,
+ self.config['max_rate_period'], self.infohash, self._received_http_data,
+ self.connecter.got_piece)
+ if self.response.has_key('httpseeds') and not self.finflag.isSet():
+ for u in self.response['httpseeds']:
+ self.httpdownloader.make_download(u)
+
+ if self.selector_enabled:
+ self.fileselector.tie_in(self.picker, self._cancelfunc,
+ self._reqmorefunc, self.rerequest_ondownloadmore)
+ if self.priority:
+ self.fileselector.set_priorities_now(self.priority)
+ self.appdataobj.deleteTorrentData(self.infohash)
+ # erase old data once you've started modifying it
+
+ if self.config['super_seeder']:
+ self.set_super_seed()
+
+ self.started = True
+ return True
+
+
+ def rerequest_complete(self):
+ if self.rerequest:
+ self.rerequest.announce(1)
+
+ def rerequest_stopped(self):
+ if self.rerequest:
+ self.rerequest.announce(2)
+
+ def rerequest_lastfailed(self):
+ if self.rerequest:
+ return self.rerequest.last_failed
+ return False
+
+ def rerequest_ondownloadmore(self):
+ if self.rerequest:
+ self.rerequest.hit()
+
+ def startRerequester(self, seededfunc = None, force_rapid_update = False):
+ if self.response.has_key('announce-list'):
+ trackerlist = self.response['announce-list']
+ else:
+ trackerlist = [[self.response['announce']]]
+
+ self.rerequest = Rerequester(trackerlist, self.config['rerequest_interval'],
+ self.rawserver.add_task, self.connecter.how_many_connections,
+ self.config['min_peers'], self.encoder.start_connections,
+ self.rawserver.add_task, self.storagewrapper.get_amount_left,
+ self.upmeasure.get_total, self.downmeasure.get_total, self.port, self.config['ip'],
+ self.myid, self.infohash, self.config['http_timeout'],
+ self.errorfunc, self.excfunc, self.config['max_initiate'],
+ self.doneflag, self.upmeasure.get_rate, self.downmeasure.get_rate,
+ self.unpauseflag, self.config['dedicated_seed_id'],
+ seededfunc, force_rapid_update )
+
+ self.rerequest.start()
+
+
+ def _init_stats(self):
+ self.statistics = Statistics(self.upmeasure, self.downmeasure,
+ self.connecter, self.httpdownloader, self.ratelimiter,
+ self.rerequest_lastfailed, self.filedatflag)
+ if self.info.has_key('files'):
+ self.statistics.set_dirstats(self.files, self.info['piece length'])
+ if self.config['spew']:
+ self.spewflag.set()
+
+ def autoStats(self, displayfunc = None):
+ if not displayfunc:
+ displayfunc = self.statusfunc
+
+ self._init_stats()
+ DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task,
+ self.upmeasure.get_rate, self.downmeasure.get_rate,
+ self.ratemeasure, self.storagewrapper.get_stats,
+ self.datalength, self.finflag, self.spewflag, self.statistics,
+ displayfunc, self.config['display_interval'])
+
+ def startStats(self):
+ self._init_stats()
+ d = DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task,
+ self.upmeasure.get_rate, self.downmeasure.get_rate,
+ self.ratemeasure, self.storagewrapper.get_stats,
+ self.datalength, self.finflag, self.spewflag, self.statistics)
+ return d.gather
+
+
+ def getPortHandler(self):
+ return self.encoder
+
+
+ def shutdown(self, torrentdata = {}):
+ if self.checking or self.started:
+ self.storagewrapper.sync()
+ self.storage.close()
+ self.rerequest_stopped()
+ if self.fileselector and self.started:
+ if not self.failed:
+ self.fileselector.finish()
+ torrentdata['resume data'] = self.fileselector.pickle()
+ try:
+ self.appdataobj.writeTorrentData(self.infohash,torrentdata)
+ except:
+ self.appdataobj.deleteTorrentData(self.infohash) # clear it
+ return not self.failed and not self.excflag.isSet()
+ # if returns false, you may wish to auto-restart the torrent
+
+
+ def setUploadRate(self, rate):
+ try:
+ def s(self = self, rate = rate):
+ self.config['max_upload_rate'] = rate
+ self.ratelimiter.set_upload_rate(rate)
+ self.rawserver.add_task(s)
+ except AttributeError:
+ pass
+
+ def setConns(self, conns, conns2 = None):
+ if not conns2:
+ conns2 = conns
+ try:
+ def s(self = self, conns = conns, conns2 = conns2):
+ self.config['min_uploads'] = conns
+ self.config['max_uploads'] = conns2
+ if (conns > 30):
+ self.config['max_initiate'] = conns + 10
+ self.rawserver.add_task(s)
+ except AttributeError:
+ pass
+
+ def setDownloadRate(self, rate):
+ try:
+ def s(self = self, rate = rate):
+ self.config['max_download_rate'] = rate
+ self.downloader.set_download_rate(rate)
+ self.rawserver.add_task(s)
+ except AttributeError:
+ pass
+
+ def startConnection(self, ip, port, id):
+ self.encoder._start_connection((ip, port), id)
+
+ def _startConnection(self, ipandport, id):
+ self.encoder._start_connection(ipandport, id)
+
+ def setInitiate(self, initiate):
+ try:
+ def s(self = self, initiate = initiate):
+ self.config['max_initiate'] = initiate
+ self.rawserver.add_task(s)
+ except AttributeError:
+ pass
+
+ def getConfig(self):
+ return self.config
+
+ def getDefaults(self):
+ return defaultargs(defaults)
+
+ def getUsageText(self):
+ return self.argslistheader
+
+ def reannounce(self, special = None):
+ try:
+ def r(self = self, special = special):
+ if special is None:
+ self.rerequest.announce()
+ else:
+ self.rerequest.announce(specialurl = special)
+ self.rawserver.add_task(r)
+ except AttributeError:
+ pass
+
+ def getResponse(self):
+ try:
+ return self.response
+ except:
+ return None
+
+# def Pause(self):
+# try:
+# if self.storagewrapper:
+# self.rawserver.add_task(self._pausemaker, 0)
+# except:
+# return False
+# self.unpauseflag.clear()
+# return True
+#
+# def _pausemaker(self):
+# self.whenpaused = clock()
+# self.unpauseflag.wait() # sticks a monkey wrench in the main thread
+#
+# def Unpause(self):
+# self.unpauseflag.set()
+# if self.whenpaused and clock()-self.whenpaused > 60:
+# def r(self = self):
+# self.rerequest.announce(3) # rerequest automatically if paused for >60 seconds
+# self.rawserver.add_task(r)
+
+ def Pause(self):
+ if not self.storagewrapper:
+ return False
+ self.unpauseflag.clear()
+ self.rawserver.add_task(self.onPause)
+ return True
+
+ def onPause(self):
+ self.whenpaused = clock()
+ if not self.downloader:
+ return
+ self.downloader.pause(True)
+ self.encoder.pause(True)
+ self.choker.pause(True)
+
+ def Unpause(self):
+ self.unpauseflag.set()
+ self.rawserver.add_task(self.onUnpause)
+
+ def onUnpause(self):
+ if not self.downloader:
+ return
+ self.downloader.pause(False)
+ self.encoder.pause(False)
+ self.choker.pause(False)
+ if self.rerequest and self.whenpaused and clock()-self.whenpaused > 60:
+ self.rerequest.announce(3) # rerequest automatically if paused for >60 seconds
+
+ def set_super_seed(self):
+ try:
+ self.superseedflag.set()
+ def s(self = self):
+ if self.finflag.isSet():
+ self._set_super_seed()
+ self.rawserver.add_task(s)
+ except AttributeError:
+ pass
+
+ def _set_super_seed(self):
+ if not self.super_seeding_active:
+ self.super_seeding_active = True
+ self.errorfunc(' ** SUPER-SEED OPERATION ACTIVE **\n' +
+ ' please set Max uploads so each peer gets 6-8 kB/s')
+ def s(self = self):
+ self.downloader.set_super_seed()
+ self.choker.set_super_seed()
+ self.rawserver.add_task(s)
+ if self.finflag.isSet(): # mode started when already finished
+ def r(self = self):
+ self.rerequest.announce(3) # so after kicking everyone off, reannounce
+ self.rawserver.add_task(r)
+
+ def am_I_finished(self):
+ return self.finflag.isSet()
+
+ def get_transfer_stats(self):
+ return self.upmeasure.get_total(), self.downmeasure.get_total()
diff --git a/BitTornado/inifile.py b/BitTornado/inifile.py
new file mode 100644
index 000000000..091ce91b9
--- /dev/null
+++ b/BitTornado/inifile.py
@@ -0,0 +1,169 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+'''
+reads/writes a Windows-style INI file
+format:
+
+ aa = "bb"
+ cc = 11
+
+ [eee]
+ ff = "gg"
+
+decodes to:
+d = { '': {'aa':'bb','cc':'11'}, 'eee': {'ff':'gg'} }
+
+the encoder can also take this as input:
+
+d = { 'aa': 'bb, 'cc': 11, 'eee': {'ff':'gg'} }
+
+though it will only decode in the above format. Keywords must be strings.
+Values that are strings are written surrounded by quotes, and the decoding
+routine automatically strips any.
+Booleans are written as integers. Anything else aside from string/int/float
+may have unpredictable results.
+'''
+
+from cStringIO import StringIO
+from traceback import print_exc
+from types import DictType, StringType
+try:
+ from types import BooleanType
+except ImportError:
+ BooleanType = None
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+DEBUG = False
+
+def ini_write(f, d, comment=''):
+ try:
+ a = {'':{}}
+ for k,v in d.items():
+ assert type(k) == StringType
+ k = k.lower()
+ if type(v) == DictType:
+ if DEBUG:
+ print 'new section:' +k
+ if k:
+ assert not a.has_key(k)
+ a[k] = {}
+ aa = a[k]
+ for kk,vv in v:
+ assert type(kk) == StringType
+ kk = kk.lower()
+ assert not aa.has_key(kk)
+ if type(vv) == BooleanType:
+ vv = int(vv)
+ if type(vv) == StringType:
+ vv = '"'+vv+'"'
+ aa[kk] = str(vv)
+ if DEBUG:
+ print 'a['+k+']['+kk+'] = '+str(vv)
+ else:
+ aa = a['']
+ assert not aa.has_key(k)
+ if type(v) == BooleanType:
+ v = int(v)
+ if type(v) == StringType:
+ v = '"'+v+'"'
+ aa[k] = str(v)
+ if DEBUG:
+ print 'a[\'\']['+k+'] = '+str(v)
+ r = open(f,'w')
+ if comment:
+ for c in comment.split('\n'):
+ r.write('# '+c+'\n')
+ r.write('\n')
+ l = a.keys()
+ l.sort()
+ for k in l:
+ if k:
+ r.write('\n['+k+']\n')
+ aa = a[k]
+ ll = aa.keys()
+ ll.sort()
+ for kk in ll:
+ r.write(kk+' = '+aa[kk]+'\n')
+ success = True
+ except:
+ if DEBUG:
+ print_exc()
+ success = False
+ try:
+ r.close()
+ except:
+ pass
+ return success
+
+
+if DEBUG:
+ def errfunc(lineno, line, err):
+ print '('+str(lineno)+') '+err+': '+line
+else:
+ errfunc = lambda lineno, line, err: None
+
+def ini_read(f, errfunc = errfunc):
+ try:
+ r = open(f,'r')
+ ll = r.readlines()
+ d = {}
+ dd = {'':d}
+ for i in xrange(len(ll)):
+ l = ll[i]
+ l = l.strip()
+ if not l:
+ continue
+ if l[0] == '#':
+ continue
+ if l[0] == '[':
+ if l[-1] != ']':
+ errfunc(i,l,'syntax error')
+ continue
+ l1 = l[1:-1].strip().lower()
+ if not l1:
+ errfunc(i,l,'syntax error')
+ continue
+ if dd.has_key(l1):
+ errfunc(i,l,'duplicate section')
+ d = dd[l1]
+ continue
+ d = {}
+ dd[l1] = d
+ continue
+ try:
+ k,v = l.split('=',1)
+ except:
+ try:
+ k,v = l.split(':',1)
+ except:
+ errfunc(i,l,'syntax error')
+ continue
+ k = k.strip().lower()
+ v = v.strip()
+ if len(v) > 1 and ( (v[0] == '"' and v[-1] == '"') or
+ (v[0] == "'" and v[-1] == "'") ):
+ v = v[1:-1]
+ if not k:
+ errfunc(i,l,'syntax error')
+ continue
+ if d.has_key(k):
+ errfunc(i,l,'duplicate entry')
+ continue
+ d[k] = v
+ if DEBUG:
+ print dd
+ except:
+ if DEBUG:
+ print_exc()
+ dd = None
+ try:
+ r.close()
+ except:
+ pass
+ return dd
diff --git a/BitTornado/iprangeparse.py b/BitTornado/iprangeparse.py
new file mode 100644
index 000000000..f177f041a
--- /dev/null
+++ b/BitTornado/iprangeparse.py
@@ -0,0 +1,194 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from bisect import bisect, insort
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+ bool = lambda x: not not x
+
+
+def to_long_ipv4(ip):
+ ip = ip.split('.')
+ if len(ip) != 4:
+ raise ValueError, "bad address"
+ b = 0L
+ for n in ip:
+ b *= 256
+ b += int(n)
+ return b
+
+
+def to_long_ipv6(ip):
+ if ip == '':
+ raise ValueError, "bad address"
+ if ip == '::': # boundary handling
+ ip = ''
+ elif ip[:2] == '::':
+ ip = ip[1:]
+ elif ip[0] == ':':
+ raise ValueError, "bad address"
+ elif ip[-2:] == '::':
+ ip = ip[:-1]
+ elif ip[-1] == ':':
+ raise ValueError, "bad address"
+
+ b = []
+ doublecolon = False
+ for n in ip.split(':'):
+ if n == '': # double-colon
+ if doublecolon:
+ raise ValueError, "bad address"
+ doublecolon = True
+ b.append(None)
+ continue
+ if n.find('.') >= 0: # IPv4
+ n = n.split('.')
+ if len(n) != 4:
+ raise ValueError, "bad address"
+ for i in n:
+ b.append(int(i))
+ continue
+ n = ('0'*(4-len(n))) + n
+ b.append(int(n[:2],16))
+ b.append(int(n[2:],16))
+ bb = 0L
+ for n in b:
+ if n is None:
+ for i in xrange(17-len(b)):
+ bb *= 256
+ continue
+ bb *= 256
+ bb += n
+ return bb
+
+ipv4addrmask = 65535L*256*256*256*256
+
+class IP_List:
+ def __init__(self):
+ self.ipv4list = [] # starts of ranges
+ self.ipv4dict = {} # start: end of ranges
+ self.ipv6list = [] # "
+ self.ipv6dict = {} # "
+
+ def __nonzero__(self):
+ return bool(self.ipv4list or self.ipv6list)
+
+
+ def append(self, ip_beg, ip_end = None):
+ if ip_end is None:
+ ip_end = ip_beg
+ else:
+ assert ip_beg <= ip_end
+ if ip_beg.find(':') < 0: # IPv4
+ ip_beg = to_long_ipv4(ip_beg)
+ ip_end = to_long_ipv4(ip_end)
+ l = self.ipv4list
+ d = self.ipv4dict
+ else:
+ ip_beg = to_long_ipv6(ip_beg)
+ ip_end = to_long_ipv6(ip_end)
+ bb = ip_beg % (256*256*256*256)
+ if bb == ipv4addrmask:
+ ip_beg -= bb
+ ip_end -= bb
+ l = self.ipv4list
+ d = self.ipv4dict
+ else:
+ l = self.ipv6list
+ d = self.ipv6dict
+
+ pos = bisect(l,ip_beg)-1
+ done = pos < 0
+ while not done:
+ p = pos
+ while p < len(l):
+ range_beg = l[p]
+ if range_beg > ip_end+1:
+ done = True
+ break
+ range_end = d[range_beg]
+ if range_end < ip_beg-1:
+ p += 1
+ if p == len(l):
+ done = True
+ break
+ continue
+ # if neither of the above conditions is true, the ranges overlap
+ ip_beg = min(ip_beg, range_beg)
+ ip_end = max(ip_end, range_end)
+ del l[p]
+ del d[range_beg]
+ break
+
+ insort(l,ip_beg)
+ d[ip_beg] = ip_end
+
+
+ def includes(self, ip):
+ if not (self.ipv4list or self.ipv6list):
+ return False
+ if ip.find(':') < 0: # IPv4
+ ip = to_long_ipv4(ip)
+ l = self.ipv4list
+ d = self.ipv4dict
+ else:
+ ip = to_long_ipv6(ip)
+ bb = ip % (256*256*256*256)
+ if bb == ipv4addrmask:
+ ip -= bb
+ l = self.ipv4list
+ d = self.ipv4dict
+ else:
+ l = self.ipv6list
+ d = self.ipv6dict
+ for ip_beg in l[bisect(l,ip)-1:]:
+ if ip == ip_beg:
+ return True
+ ip_end = d[ip_beg]
+ if ip > ip_beg and ip <= ip_end:
+ return True
+ return False
+
+
+ # reads a list from a file in the format 'whatever:whatever:ip-ip'
+ # (not IPv6 compatible at all)
+ def read_rangelist(self, file):
+ f = open(file, 'r')
+ while True:
+ line = f.readline()
+ if not line:
+ break
+ line = line.strip()
+ if not line or line[0] == '#':
+ continue
+ line = line.split(':')[-1]
+ try:
+ ip1,ip2 = line.split('-')
+ except:
+ ip1 = line
+ ip2 = line
+ try:
+ self.append(ip1.strip(),ip2.strip())
+ except:
+ print '*** WARNING *** could not parse IP range: '+line
+ f.close()
+
+def is_ipv4(ip):
+ return ip.find(':') < 0
+
+def is_valid_ip(ip):
+ try:
+ if is_ipv4(ip):
+ a = ip.split('.')
+ assert len(a) == 4
+ for i in a:
+ chr(int(i))
+ return True
+ to_long_ipv6(ip)
+ return True
+ except:
+ return False
diff --git a/BitTornado/launchmanycore.py b/BitTornado/launchmanycore.py
new file mode 100644
index 000000000..2d5513326
--- /dev/null
+++ b/BitTornado/launchmanycore.py
@@ -0,0 +1,381 @@
+#!/usr/bin/env python
+
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+ try:
+ import psyco
+ assert psyco.__version__ >= 0x010100f0
+ psyco.full()
+ except:
+ pass
+
+from download_bt1 import BT1Download
+from RawServer import RawServer, UPnP_ERROR
+from RateLimiter import RateLimiter
+from ServerPortHandler import MultiHandler
+from parsedir import parsedir
+from natpunch import UPnP_test
+from random import seed
+from socket import error as socketerror
+from threading import Event
+from sys import argv, exit
+import sys, os
+from clock import clock
+from __init__ import createPeerID, mapbase64, version
+from cStringIO import StringIO
+from traceback import print_exc
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+
+def fmttime(n):
+ try:
+ n = int(n) # n may be None or too large
+ assert n < 5184000 # 60 days
+ except:
+ return 'downloading'
+ m, s = divmod(n, 60)
+ h, m = divmod(m, 60)
+ return '%d:%02d:%02d' % (h, m, s)
+
+class SingleDownload:
+ def __init__(self, controller, hash, response, config, myid):
+ self.controller = controller
+ self.hash = hash
+ self.response = response
+ self.config = config
+
+ self.doneflag = Event()
+ self.waiting = True
+ self.checking = False
+ self.working = False
+ self.seed = False
+ self.closed = False
+
+ self.status_msg = ''
+ self.status_err = ['']
+ self.status_errtime = 0
+ self.status_done = 0.0
+
+ self.rawserver = controller.handler.newRawServer(hash, self.doneflag)
+
+ d = BT1Download(self.display, self.finished, self.error,
+ controller.exchandler, self.doneflag, config, response,
+ hash, myid, self.rawserver, controller.listen_port)
+ self.d = d
+
+ def start(self):
+ if not self.d.saveAs(self.saveAs):
+ self._shutdown()
+ return
+ self._hashcheckfunc = self.d.initFiles()
+ if not self._hashcheckfunc:
+ self._shutdown()
+ return
+ self.controller.hashchecksched(self.hash)
+
+
+ def saveAs(self, name, length, saveas, isdir):
+ return self.controller.saveAs(self.hash, name, saveas, isdir)
+
+ def hashcheck_start(self, donefunc):
+ if self.is_dead():
+ self._shutdown()
+ return
+ self.waiting = False
+ self.checking = True
+ self._hashcheckfunc(donefunc)
+
+ def hashcheck_callback(self):
+ self.checking = False
+ if self.is_dead():
+ self._shutdown()
+ return
+ if not self.d.startEngine(ratelimiter = self.controller.ratelimiter):
+ self._shutdown()
+ return
+ self.d.startRerequester()
+ self.statsfunc = self.d.startStats()
+ self.rawserver.start_listening(self.d.getPortHandler())
+ self.working = True
+
+ def is_dead(self):
+ return self.doneflag.isSet()
+
+ def _shutdown(self):
+ self.shutdown(False)
+
+ def shutdown(self, quiet=True):
+ if self.closed:
+ return
+ self.doneflag.set()
+ self.rawserver.shutdown()
+ if self.checking or self.working:
+ self.d.shutdown()
+ self.waiting = False
+ self.checking = False
+ self.working = False
+ self.closed = True
+ self.controller.was_stopped(self.hash)
+ if not quiet:
+ self.controller.died(self.hash)
+
+
+ def display(self, activity = None, fractionDone = None):
+ # really only used by StorageWrapper now
+ if activity:
+ self.status_msg = activity
+ if fractionDone is not None:
+ self.status_done = float(fractionDone)
+
+ def finished(self):
+ self.seed = True
+
+ def error(self, msg):
+ if self.doneflag.isSet():
+ self._shutdown()
+ self.status_err.append(msg)
+ self.status_errtime = clock()
+
+
+class LaunchMany:
+ def __init__(self, config, Output):
+ try:
+ self.config = config
+ self.Output = Output
+
+ self.torrent_dir = config['torrent_dir']
+ self.torrent_cache = {}
+ self.file_cache = {}
+ self.blocked_files = {}
+ self.scan_period = config['parse_dir_interval']
+ self.stats_period = config['display_interval']
+
+ self.torrent_list = []
+ self.downloads = {}
+ self.counter = 0
+ self.doneflag = Event()
+
+ self.hashcheck_queue = []
+ self.hashcheck_current = None
+
+ self.rawserver = RawServer(self.doneflag, config['timeout_check_interval'],
+ config['timeout'], ipv6_enable = config['ipv6_enabled'],
+ failfunc = self.failed, errorfunc = self.exchandler)
+ upnp_type = UPnP_test(config['upnp_nat_access'])
+ while True:
+ try:
+ self.listen_port = self.rawserver.find_and_bind(
+ config['minport'], config['maxport'], config['bind'],
+ ipv6_socket_style = config['ipv6_binds_v4'],
+ upnp = upnp_type, randomizer = config['random_port'])
+ break
+ except socketerror, e:
+ if upnp_type and e == UPnP_ERROR:
+ self.Output.message('WARNING: COULD NOT FORWARD VIA UPnP')
+ upnp_type = 0
+ continue
+ self.failed("Couldn't listen - " + str(e))
+ return
+
+ self.ratelimiter = RateLimiter(self.rawserver.add_task,
+ config['upload_unit_size'])
+ self.ratelimiter.set_upload_rate(config['max_upload_rate'])
+
+ self.handler = MultiHandler(self.rawserver, self.doneflag)
+ seed(createPeerID())
+ self.rawserver.add_task(self.scan, 0)
+ self.rawserver.add_task(self.stats, 0)
+
+ self.handler.listen_forever()
+
+ self.Output.message('shutting down')
+ self.hashcheck_queue = []
+ for hash in self.torrent_list:
+ self.Output.message('dropped "'+self.torrent_cache[hash]['path']+'"')
+ self.downloads[hash].shutdown()
+ self.rawserver.shutdown()
+
+ except:
+ data = StringIO()
+ print_exc(file = data)
+ Output.exception(data.getvalue())
+
+
+ def scan(self):
+ self.rawserver.add_task(self.scan, self.scan_period)
+
+ r = parsedir(self.torrent_dir, self.torrent_cache,
+ self.file_cache, self.blocked_files,
+ return_metainfo = True, errfunc = self.Output.message)
+
+ ( self.torrent_cache, self.file_cache, self.blocked_files,
+ added, removed ) = r
+
+ for hash, data in removed.items():
+ self.Output.message('dropped "'+data['path']+'"')
+ self.remove(hash)
+ for hash, data in added.items():
+ self.Output.message('added "'+data['path']+'"')
+ self.add(hash, data)
+
+ def stats(self):
+ self.rawserver.add_task(self.stats, self.stats_period)
+ data = []
+ for hash in self.torrent_list:
+ cache = self.torrent_cache[hash]
+ if self.config['display_path']:
+ name = cache['path']
+ else:
+ name = cache['name']
+ size = cache['length']
+ d = self.downloads[hash]
+ progress = '0.0%'
+ peers = 0
+ seeds = 0
+ seedsmsg = "S"
+ dist = 0.0
+ uprate = 0.0
+ dnrate = 0.0
+ upamt = 0
+ dnamt = 0
+ t = 0
+ if d.is_dead():
+ status = 'stopped'
+ elif d.waiting:
+ status = 'waiting for hash check'
+ elif d.checking:
+ status = d.status_msg
+ progress = '%.1f%%' % (d.status_done*100)
+ else:
+ stats = d.statsfunc()
+ s = stats['stats']
+ if d.seed:
+ status = 'seeding'
+ progress = '100.0%'
+ seeds = s.numOldSeeds
+ seedsmsg = "s"
+ dist = s.numCopies
+ else:
+ if s.numSeeds + s.numPeers:
+ t = stats['time']
+ if t == 0: # unlikely
+ t = 0.01
+ status = fmttime(t)
+ else:
+ t = -1
+ status = 'connecting to peers'
+ progress = '%.1f%%' % (int(stats['frac']*1000)/10.0)
+ seeds = s.numSeeds
+ dist = s.numCopies2
+ dnrate = stats['down']
+ peers = s.numPeers
+ uprate = stats['up']
+ upamt = s.upTotal
+ dnamt = s.downTotal
+
+ if d.is_dead() or d.status_errtime+300 > clock():
+ msg = d.status_err[-1]
+ else:
+ msg = ''
+
+ data.append(( name, status, progress, peers, seeds, seedsmsg, dist,
+ uprate, dnrate, upamt, dnamt, size, t, msg ))
+ stop = self.Output.display(data)
+ if stop:
+ self.doneflag.set()
+
+ def remove(self, hash):
+ self.torrent_list.remove(hash)
+ self.downloads[hash].shutdown()
+ del self.downloads[hash]
+
+ def add(self, hash, data):
+ c = self.counter
+ self.counter += 1
+ x = ''
+ for i in xrange(3):
+ x = mapbase64[c & 0x3F]+x
+ c >>= 6
+ peer_id = createPeerID(x)
+ d = SingleDownload(self, hash, data['metainfo'], self.config, peer_id)
+ self.torrent_list.append(hash)
+ self.downloads[hash] = d
+ d.start()
+
+
+ def saveAs(self, hash, name, saveas, isdir):
+ x = self.torrent_cache[hash]
+ style = self.config['saveas_style']
+ if style == 1 or style == 3:
+ if saveas:
+ saveas = os.path.join(saveas,x['file'][:-1-len(x['type'])])
+ else:
+ saveas = x['path'][:-1-len(x['type'])]
+ if style == 3:
+ if not os.path.isdir(saveas):
+ try:
+ os.mkdir(saveas)
+ except:
+ raise OSError("couldn't create directory for "+x['path']
+ +" ("+saveas+")")
+ if not isdir:
+ saveas = os.path.join(saveas, name)
+ else:
+ if saveas:
+ saveas = os.path.join(saveas, name)
+ else:
+ saveas = os.path.join(os.path.split(x['path'])[0], name)
+
+ if isdir and not os.path.isdir(saveas):
+ try:
+ os.mkdir(saveas)
+ except:
+ raise OSError("couldn't create directory for "+x['path']
+ +" ("+saveas+")")
+ return saveas
+
+
+ def hashchecksched(self, hash = None):
+ if hash:
+ self.hashcheck_queue.append(hash)
+ if not self.hashcheck_current:
+ self._hashcheck_start()
+
+ def _hashcheck_start(self):
+ self.hashcheck_current = self.hashcheck_queue.pop(0)
+ self.downloads[self.hashcheck_current].hashcheck_start(self.hashcheck_callback)
+
+ def hashcheck_callback(self):
+ self.downloads[self.hashcheck_current].hashcheck_callback()
+ if self.hashcheck_queue:
+ self._hashcheck_start()
+ else:
+ self.hashcheck_current = None
+
+ def died(self, hash):
+ if self.torrent_cache.has_key(hash):
+ self.Output.message('DIED: "'+self.torrent_cache[hash]['path']+'"')
+
+ def was_stopped(self, hash):
+ try:
+ self.hashcheck_queue.remove(hash)
+ except:
+ pass
+ if self.hashcheck_current == hash:
+ self.hashcheck_current = None
+ if self.hashcheck_queue:
+ self._hashcheck_start()
+
+ def failed(self, s):
+ self.Output.message('FAILURE: '+s)
+
+ def exchandler(self, s):
+ self.Output.exception(s)
diff --git a/BitTornado/natpunch.py b/BitTornado/natpunch.py
new file mode 100644
index 000000000..4ae57f6e8
--- /dev/null
+++ b/BitTornado/natpunch.py
@@ -0,0 +1,254 @@
+# Written by John Hoffman
+# derived from NATPortMapping.py by Yejun Yang
+# and from example code by Myers Carpenter
+# see LICENSE.txt for license information
+
+import socket
+from traceback import print_exc
+from subnetparse import IP_List
+from clock import clock
+from __init__ import createPeerID
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+DEBUG = False
+
+EXPIRE_CACHE = 30 # seconds
+ID = "BT-"+createPeerID()[-4:]
+
+try:
+ import pythoncom, win32com.client
+ _supported = 1
+except ImportError:
+ _supported = 0
+
+
+
+class _UPnP1: # derived from Myers Carpenter's code
+ # seems to use the machine's local UPnP
+ # system for its operation. Runs fairly fast
+
+ def __init__(self):
+ self.map = None
+ self.last_got_map = -10e10
+
+ def _get_map(self):
+ if self.last_got_map + EXPIRE_CACHE < clock():
+ try:
+ dispatcher = win32com.client.Dispatch("HNetCfg.NATUPnP")
+ self.map = dispatcher.StaticPortMappingCollection
+ self.last_got_map = clock()
+ except:
+ self.map = None
+ return self.map
+
+ def test(self):
+ try:
+ assert self._get_map() # make sure a map was found
+ success = True
+ except:
+ success = False
+ return success
+
+
+ def open(self, ip, p):
+ map = self._get_map()
+ try:
+ map.Add(p,'TCP',p,ip,True,ID)
+ if DEBUG:
+ print 'port opened: '+ip+':'+str(p)
+ success = True
+ except:
+ if DEBUG:
+ print "COULDN'T OPEN "+str(p)
+ print_exc()
+ success = False
+ return success
+
+
+ def close(self, p):
+ map = self._get_map()
+ try:
+ map.Remove(p,'TCP')
+ success = True
+ if DEBUG:
+ print 'port closed: '+str(p)
+ except:
+ if DEBUG:
+ print 'ERROR CLOSING '+str(p)
+ print_exc()
+ success = False
+ return success
+
+
+ def clean(self, retry = False):
+ if not _supported:
+ return
+ try:
+ map = self._get_map()
+ ports_in_use = []
+ for i in xrange(len(map)):
+ try:
+ mapping = map[i]
+ port = mapping.ExternalPort
+ prot = str(mapping.Protocol).lower()
+ desc = str(mapping.Description).lower()
+ except:
+ port = None
+ if port and prot == 'tcp' and desc[:3] == 'bt-':
+ ports_in_use.append(port)
+ success = True
+ for port in ports_in_use:
+ try:
+ map.Remove(port,'TCP')
+ except:
+ success = False
+ if not success and not retry:
+ self.clean(retry = True)
+ except:
+ pass
+
+
+class _UPnP2: # derived from Yejun Yang's code
+ # apparently does a direct search for UPnP hardware
+ # may work in some cases where _UPnP1 won't, but is slow
+ # still need to implement "clean" method
+
+ def __init__(self):
+ self.services = None
+ self.last_got_services = -10e10
+
+ def _get_services(self):
+ if not self.services or self.last_got_services + EXPIRE_CACHE < clock():
+ self.services = []
+ try:
+ f=win32com.client.Dispatch("UPnP.UPnPDeviceFinder")
+ for t in ( "urn:schemas-upnp-org:service:WANIPConnection:1",
+ "urn:schemas-upnp-org:service:WANPPPConnection:1" ):
+ try:
+ conns = f.FindByType(t,0)
+ for c in xrange(len(conns)):
+ try:
+ svcs = conns[c].Services
+ for s in xrange(len(svcs)):
+ try:
+ self.services.append(svcs[s])
+ except:
+ pass
+ except:
+ pass
+ except:
+ pass
+ except:
+ pass
+ self.last_got_services = clock()
+ return self.services
+
+ def test(self):
+ try:
+ assert self._get_services() # make sure some services can be found
+ success = True
+ except:
+ success = False
+ return success
+
+
+ def open(self, ip, p):
+ svcs = self._get_services()
+ success = False
+ for s in svcs:
+ try:
+ s.InvokeAction('AddPortMapping',['',p,'TCP',p,ip,True,ID,0],'')
+ success = True
+ except:
+ pass
+ if DEBUG and not success:
+ print "COULDN'T OPEN "+str(p)
+ print_exc()
+ return success
+
+
+ def close(self, p):
+ svcs = self._get_services()
+ success = False
+ for s in svcs:
+ try:
+ s.InvokeAction('DeletePortMapping', ['',p,'TCP'], '')
+ success = True
+ except:
+ pass
+ if DEBUG and not success:
+ print "COULDN'T OPEN "+str(p)
+ print_exc()
+ return success
+
+
+class _UPnP: # master holding class
+ def __init__(self):
+ self.upnp1 = _UPnP1()
+ self.upnp2 = _UPnP2()
+ self.upnplist = (None, self.upnp1, self.upnp2)
+ self.upnp = None
+ self.local_ip = None
+ self.last_got_ip = -10e10
+
+ def get_ip(self):
+ if self.last_got_ip + EXPIRE_CACHE < clock():
+ local_ips = IP_List()
+ local_ips.set_intranet_addresses()
+ try:
+ for info in socket.getaddrinfo(socket.gethostname(),0,socket.AF_INET):
+ # exception if socket library isn't recent
+ self.local_ip = info[4][0]
+ if local_ips.includes(self.local_ip):
+ self.last_got_ip = clock()
+ if DEBUG:
+ print 'Local IP found: '+self.local_ip
+ break
+ else:
+ raise ValueError('couldn\'t find intranet IP')
+ except:
+ self.local_ip = None
+ if DEBUG:
+ print 'Error finding local IP'
+ print_exc()
+ return self.local_ip
+
+ def test(self, upnp_type):
+ if DEBUG:
+ print 'testing UPnP type '+str(upnp_type)
+ if not upnp_type or not _supported or self.get_ip() is None:
+ if DEBUG:
+ print 'not supported'
+ return 0
+ pythoncom.CoInitialize() # leave initialized
+ self.upnp = self.upnplist[upnp_type] # cache this
+ if self.upnp.test():
+ if DEBUG:
+ print 'ok'
+ return upnp_type
+ if DEBUG:
+ print 'tested bad'
+ return 0
+
+ def open(self, p):
+ assert self.upnp, "must run UPnP_test() with the desired UPnP access type first"
+ return self.upnp.open(self.get_ip(), p)
+
+ def close(self, p):
+ assert self.upnp, "must run UPnP_test() with the desired UPnP access type first"
+ return self.upnp.close(p)
+
+ def clean(self):
+ return self.upnp1.clean()
+
+_upnp_ = _UPnP()
+
+UPnP_test = _upnp_.test
+UPnP_open_port = _upnp_.open
+UPnP_close_port = _upnp_.close
+UPnP_reset = _upnp_.clean
+
diff --git a/BitTornado/parseargs.py b/BitTornado/parseargs.py
new file mode 100644
index 000000000..7ec9656d3
--- /dev/null
+++ b/BitTornado/parseargs.py
@@ -0,0 +1,145 @@
+# Written by Bill Bumgarner and Bram Cohen
+# see LICENSE.txt for license information
+
+from types import *
+from cStringIO import StringIO
+
+
+def splitLine(line, COLS=80, indent=10):
+ indent = " " * indent
+ width = COLS - (len(indent) + 1)
+ if indent and width < 15:
+ width = COLS - 2
+ indent = " "
+ s = StringIO()
+ i = 0
+ for word in line.split():
+ if i == 0:
+ s.write(indent+word)
+ i = len(word)
+ continue
+ if i + len(word) >= width:
+ s.write('\n'+indent+word)
+ i = len(word)
+ continue
+ s.write(' '+word)
+ i += len(word) + 1
+ return s.getvalue()
+
+def formatDefinitions(options, COLS, presets = {}):
+ s = StringIO()
+ for (longname, default, doc) in options:
+ s.write('--' + longname + ' \n')
+ default = presets.get(longname, default)
+ if type(default) in (IntType, LongType):
+ try:
+ default = int(default)
+ except:
+ pass
+ if default is not None:
+ doc += ' (defaults to ' + repr(default) + ')'
+ s.write(splitLine(doc,COLS,10))
+ s.write('\n\n')
+ return s.getvalue()
+
+
+def usage(str):
+ raise ValueError(str)
+
+
+def defaultargs(options):
+ l = {}
+ for (longname, default, doc) in options:
+ if default is not None:
+ l[longname] = default
+ return l
+
+
+def parseargs(argv, options, minargs = None, maxargs = None, presets = {}):
+ config = {}
+ longkeyed = {}
+ for option in options:
+ longname, default, doc = option
+ longkeyed[longname] = option
+ config[longname] = default
+ for longname in presets.keys(): # presets after defaults but before arguments
+ config[longname] = presets[longname]
+ options = []
+ args = []
+ pos = 0
+ while pos < len(argv):
+ if argv[pos][:2] != '--':
+ args.append(argv[pos])
+ pos += 1
+ else:
+ if pos == len(argv) - 1:
+ usage('parameter passed in at end with no value')
+
+ key, value = argv[pos][2:], argv[pos+1]
+ pos += 2
+
+ if not longkeyed.has_key(key):
+ usage('unknown key --' + key)
+
+ longname, default, doc = longkeyed[key]
+
+ try:
+ t = type(config[longname])
+ if t is NoneType or t is StringType:
+ config[longname] = value
+ elif t in (IntType, LongType):
+ config[longname] = long(value)
+ elif t is FloatType:
+ config[longname] = float(value)
+ else:
+ assert 0
+ except ValueError, e:
+ usage('wrong format of --%s - %s' % (key, str(e)))
+
+ for key, value in config.items():
+ if value is None:
+ usage("Option --%s is required." % key)
+
+ if minargs is not None and len(args) < minargs:
+ usage("Must supply at least %d args." % minargs)
+
+ if maxargs is not None and len(args) > maxargs:
+ usage("Too many args - %d max." % maxargs)
+
+ return (config, args)
+
+def test_parseargs():
+ assert parseargs(('d', '--a', 'pq', 'e', '--b', '3', '--c', '4.5', 'f'), (('a', 'x', ''), ('b', 1, ''), ('c', 2.3, ''))) == ({'a': 'pq', 'b': 3, 'c': 4.5}, ['d', 'e', 'f'])
+ assert parseargs([], [('a', 'x', '')]) == ({'a': 'x'}, [])
+ assert parseargs(['--a', 'x', '--a', 'y'], [('a', '', '')]) == ({'a': 'y'}, [])
+ try:
+ parseargs([], [('a', 'x', '')])
+ except ValueError:
+ pass
+ try:
+ parseargs(['--a', 'x'], [])
+ except ValueError:
+ pass
+ try:
+ parseargs(['--a'], [('a', 'x', '')])
+ except ValueError:
+ pass
+ try:
+ parseargs([], [], 1, 2)
+ except ValueError:
+ pass
+ assert parseargs(['x'], [], 1, 2) == ({}, ['x'])
+ assert parseargs(['x', 'y'], [], 1, 2) == ({}, ['x', 'y'])
+ try:
+ parseargs(['x', 'y', 'z'], [], 1, 2)
+ except ValueError:
+ pass
+ try:
+ parseargs(['--a', '2.0'], [('a', 3, '')])
+ except ValueError:
+ pass
+ try:
+ parseargs(['--a', 'z'], [('a', 2.1, '')])
+ except ValueError:
+ pass
+
diff --git a/BitTornado/parsedir.py b/BitTornado/parsedir.py
new file mode 100644
index 000000000..74bec1078
--- /dev/null
+++ b/BitTornado/parsedir.py
@@ -0,0 +1,150 @@
+# Written by John Hoffman and Uoti Urpala
+# see LICENSE.txt for license information
+from bencode import bencode, bdecode
+from BT1.btformats import check_info
+from os.path import exists, isfile
+from sha import sha
+import sys, os
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+NOISY = False
+
+def _errfunc(x):
+ print ":: "+x
+
+def parsedir(directory, parsed, files, blocked,
+ exts = ['.torrent'], return_metainfo = False, errfunc = _errfunc):
+ if NOISY:
+ errfunc('checking dir')
+ dirs_to_check = [directory]
+ new_files = {}
+ new_blocked = {}
+ torrent_type = {}
+ while dirs_to_check: # first, recurse directories and gather torrents
+ directory = dirs_to_check.pop()
+ newtorrents = False
+ for f in os.listdir(directory):
+ newtorrent = None
+ for ext in exts:
+ if f.endswith(ext):
+ newtorrent = ext[1:]
+ break
+ if newtorrent:
+ newtorrents = True
+ p = os.path.join(directory, f)
+ new_files[p] = [(os.path.getmtime(p), os.path.getsize(p)), 0]
+ torrent_type[p] = newtorrent
+ if not newtorrents:
+ for f in os.listdir(directory):
+ p = os.path.join(directory, f)
+ if os.path.isdir(p):
+ dirs_to_check.append(p)
+
+ new_parsed = {}
+ to_add = []
+ added = {}
+ removed = {}
+ # files[path] = [(modification_time, size), hash], hash is 0 if the file
+ # has not been successfully parsed
+ for p,v in new_files.items(): # re-add old items and check for changes
+ oldval = files.get(p)
+ if not oldval: # new file
+ to_add.append(p)
+ continue
+ h = oldval[1]
+ if oldval[0] == v[0]: # file is unchanged from last parse
+ if h:
+ if blocked.has_key(p): # parseable + blocked means duplicate
+ to_add.append(p) # other duplicate may have gone away
+ else:
+ new_parsed[h] = parsed[h]
+ new_files[p] = oldval
+ else:
+ new_blocked[p] = 1 # same broken unparseable file
+ continue
+ if parsed.has_key(h) and not blocked.has_key(p):
+ if NOISY:
+ errfunc('removing '+p+' (will re-add)')
+ removed[h] = parsed[h]
+ to_add.append(p)
+
+ to_add.sort()
+ for p in to_add: # then, parse new and changed torrents
+ new_file = new_files[p]
+ v,h = new_file
+ if new_parsed.has_key(h): # duplicate
+ if not blocked.has_key(p) or files[p][0] != v:
+ errfunc('**warning** '+
+ p +' is a duplicate torrent for '+new_parsed[h]['path'])
+ new_blocked[p] = 1
+ continue
+
+ if NOISY:
+ errfunc('adding '+p)
+ try:
+ ff = open(p, 'rb')
+ d = bdecode(ff.read())
+ check_info(d['info'])
+ h = sha(bencode(d['info'])).digest()
+ new_file[1] = h
+ if new_parsed.has_key(h):
+ errfunc('**warning** '+
+ p +' is a duplicate torrent for '+new_parsed[h]['path'])
+ new_blocked[p] = 1
+ continue
+
+ a = {}
+ a['path'] = p
+ f = os.path.basename(p)
+ a['file'] = f
+ a['type'] = torrent_type[p]
+ i = d['info']
+ l = 0
+ nf = 0
+ if i.has_key('length'):
+ l = i.get('length',0)
+ nf = 1
+ elif i.has_key('files'):
+ for li in i['files']:
+ nf += 1
+ if li.has_key('length'):
+ l += li['length']
+ a['numfiles'] = nf
+ a['length'] = l
+ a['name'] = i.get('name', f)
+ def setkey(k, d = d, a = a):
+ if d.has_key(k):
+ a[k] = d[k]
+ setkey('failure reason')
+ setkey('warning message')
+ setkey('announce-list')
+ if return_metainfo:
+ a['metainfo'] = d
+ except:
+ errfunc('**warning** '+p+' has errors')
+ new_blocked[p] = 1
+ continue
+ try:
+ ff.close()
+ except:
+ pass
+ if NOISY:
+ errfunc('... successful')
+ new_parsed[h] = a
+ added[h] = a
+
+ for p,v in files.items(): # and finally, mark removed torrents
+ if not new_files.has_key(p) and not blocked.has_key(p):
+ if NOISY:
+ errfunc('removing '+p)
+ removed[v[1]] = parsed[v[1]]
+
+ if NOISY:
+ errfunc('done checking')
+ return (new_parsed, new_files, new_blocked, added, removed)
+
diff --git a/BitTornado/piecebuffer.py b/BitTornado/piecebuffer.py
new file mode 100644
index 000000000..1c4e6683c
--- /dev/null
+++ b/BitTornado/piecebuffer.py
@@ -0,0 +1,86 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from array import array
+from threading import Lock
+# import inspect
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+DEBUG = False
+
+class SingleBuffer:
+ def __init__(self, pool):
+ self.pool = pool
+ self.buf = array('c')
+
+ def init(self):
+ if DEBUG:
+ print self.count
+ '''
+ for x in xrange(6,1,-1):
+ try:
+ f = inspect.currentframe(x).f_code
+ print (f.co_filename,f.co_firstlineno,f.co_name)
+ del f
+ except:
+ pass
+ print ''
+ '''
+ self.length = 0
+
+ def append(self, s):
+ l = self.length+len(s)
+ self.buf[self.length:l] = array('c',s)
+ self.length = l
+
+ def __len__(self):
+ return self.length
+
+ def __getslice__(self, a, b):
+ if b > self.length:
+ b = self.length
+ if b < 0:
+ b += self.length
+ if a == 0 and b == self.length and len(self.buf) == b:
+ return self.buf # optimization
+ return self.buf[a:b]
+
+ def getarray(self):
+ return self.buf[:self.length]
+
+ def release(self):
+ if DEBUG:
+ print -self.count
+ self.pool.release(self)
+
+
+class BufferPool:
+ def __init__(self):
+ self.pool = []
+ self.lock = Lock()
+ if DEBUG:
+ self.count = 0
+
+ def new(self):
+ self.lock.acquire()
+ if self.pool:
+ x = self.pool.pop()
+ else:
+ x = SingleBuffer(self)
+ if DEBUG:
+ self.count += 1
+ x.count = self.count
+ x.init()
+ self.lock.release()
+ return x
+
+ def release(self, x):
+ self.pool.append(x)
+
+
+_pool = BufferPool()
+PieceBuffer = _pool.new
diff --git a/BitTornado/selectpoll.py b/BitTornado/selectpoll.py
new file mode 100644
index 000000000..4703c4fc2
--- /dev/null
+++ b/BitTornado/selectpoll.py
@@ -0,0 +1,109 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from select import select, error
+from time import sleep
+from types import IntType
+from bisect import bisect
+POLLIN = 1
+POLLOUT = 2
+POLLERR = 8
+POLLHUP = 16
+
+class poll:
+ def __init__(self):
+ self.rlist = []
+ self.wlist = []
+
+ def register(self, f, t):
+ if type(f) != IntType:
+ f = f.fileno()
+ if (t & POLLIN):
+ insert(self.rlist, f)
+ else:
+ remove(self.rlist, f)
+ if (t & POLLOUT):
+ insert(self.wlist, f)
+ else:
+ remove(self.wlist, f)
+
+ def unregister(self, f):
+ if type(f) != IntType:
+ f = f.fileno()
+ remove(self.rlist, f)
+ remove(self.wlist, f)
+
+ def poll(self, timeout = None):
+ if self.rlist or self.wlist:
+ try:
+ r, w, e = select(self.rlist, self.wlist, [], timeout)
+ except ValueError:
+ return None
+ else:
+ sleep(timeout)
+ return []
+ result = []
+ for s in r:
+ result.append((s, POLLIN))
+ for s in w:
+ result.append((s, POLLOUT))
+ return result
+
+def remove(list, item):
+ i = bisect(list, item)
+ if i > 0 and list[i-1] == item:
+ del list[i-1]
+
+def insert(list, item):
+ i = bisect(list, item)
+ if i == 0 or list[i-1] != item:
+ list.insert(i, item)
+
+def test_remove():
+ x = [2, 4, 6]
+ remove(x, 2)
+ assert x == [4, 6]
+ x = [2, 4, 6]
+ remove(x, 4)
+ assert x == [2, 6]
+ x = [2, 4, 6]
+ remove(x, 6)
+ assert x == [2, 4]
+ x = [2, 4, 6]
+ remove(x, 5)
+ assert x == [2, 4, 6]
+ x = [2, 4, 6]
+ remove(x, 1)
+ assert x == [2, 4, 6]
+ x = [2, 4, 6]
+ remove(x, 7)
+ assert x == [2, 4, 6]
+ x = [2, 4, 6]
+ remove(x, 5)
+ assert x == [2, 4, 6]
+ x = []
+ remove(x, 3)
+ assert x == []
+
+def test_insert():
+ x = [2, 4]
+ insert(x, 1)
+ assert x == [1, 2, 4]
+ x = [2, 4]
+ insert(x, 3)
+ assert x == [2, 3, 4]
+ x = [2, 4]
+ insert(x, 5)
+ assert x == [2, 4, 5]
+ x = [2, 4]
+ insert(x, 2)
+ assert x == [2, 4]
+ x = [2, 4]
+ insert(x, 4)
+ assert x == [2, 4]
+ x = [2, 3, 4]
+ insert(x, 3)
+ assert x == [2, 3, 4]
+ x = []
+ insert(x, 3)
+ assert x == [3]
diff --git a/BitTornado/subnetparse.py b/BitTornado/subnetparse.py
new file mode 100644
index 000000000..1b7378765
--- /dev/null
+++ b/BitTornado/subnetparse.py
@@ -0,0 +1,218 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from bisect import bisect, insort
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+ bool = lambda x: not not x
+
+hexbinmap = {
+ '0': '0000',
+ '1': '0001',
+ '2': '0010',
+ '3': '0011',
+ '4': '0100',
+ '5': '0101',
+ '6': '0110',
+ '7': '0111',
+ '8': '1000',
+ '9': '1001',
+ 'a': '1010',
+ 'b': '1011',
+ 'c': '1100',
+ 'd': '1101',
+ 'e': '1110',
+ 'f': '1111',
+ 'x': '0000',
+}
+
+chrbinmap = {}
+for n in xrange(256):
+ b = []
+ nn = n
+ for i in xrange(8):
+ if nn & 0x80:
+ b.append('1')
+ else:
+ b.append('0')
+ nn <<= 1
+ chrbinmap[n] = ''.join(b)
+
+
+def to_bitfield_ipv4(ip):
+ ip = ip.split('.')
+ if len(ip) != 4:
+ raise ValueError, "bad address"
+ b = []
+ for i in ip:
+ b.append(chrbinmap[int(i)])
+ return ''.join(b)
+
+def to_bitfield_ipv6(ip):
+ b = ''
+ doublecolon = False
+
+ if ip == '':
+ raise ValueError, "bad address"
+ if ip == '::': # boundary handling
+ ip = ''
+ elif ip[:2] == '::':
+ ip = ip[1:]
+ elif ip[0] == ':':
+ raise ValueError, "bad address"
+ elif ip[-2:] == '::':
+ ip = ip[:-1]
+ elif ip[-1] == ':':
+ raise ValueError, "bad address"
+ for n in ip.split(':'):
+ if n == '': # double-colon
+ if doublecolon:
+ raise ValueError, "bad address"
+ doublecolon = True
+ b += ':'
+ continue
+ if n.find('.') >= 0: # IPv4
+ n = to_bitfield_ipv4(n)
+ b += n + '0'*(32-len(n))
+ continue
+ n = ('x'*(4-len(n))) + n
+ for i in n:
+ b += hexbinmap[i]
+ if doublecolon:
+ pos = b.find(':')
+ b = b[:pos]+('0'*(129-len(b)))+b[pos+1:]
+ if len(b) != 128: # always check size
+ raise ValueError, "bad address"
+ return b
+
+ipv4addrmask = to_bitfield_ipv6('::ffff:0:0')[:96]
+
+class IP_List:
+ def __init__(self):
+ self.ipv4list = []
+ self.ipv6list = []
+
+ def __nonzero__(self):
+ return bool(self.ipv4list or self.ipv6list)
+
+
+ def append(self, ip, depth = 256):
+ if ip.find(':') < 0: # IPv4
+ insort(self.ipv4list,to_bitfield_ipv4(ip)[:depth])
+ else:
+ b = to_bitfield_ipv6(ip)
+ if b.startswith(ipv4addrmask):
+ insort(self.ipv4list,b[96:][:depth-96])
+ else:
+ insort(self.ipv6list,b[:depth])
+
+
+ def includes(self, ip):
+ if not (self.ipv4list or self.ipv6list):
+ return False
+ if ip.find(':') < 0: # IPv4
+ b = to_bitfield_ipv4(ip)
+ else:
+ b = to_bitfield_ipv6(ip)
+ if b.startswith(ipv4addrmask):
+ b = b[96:]
+ if len(b) > 32:
+ l = self.ipv6list
+ else:
+ l = self.ipv4list
+ for map in l[bisect(l,b)-1:]:
+ if b.startswith(map):
+ return True
+ if map > b:
+ return False
+ return False
+
+
+ def read_fieldlist(self, file): # reads a list from a file in the format 'ip/len '
+ f = open(file, 'r')
+ while True:
+ line = f.readline()
+ if not line:
+ break
+ line = line.strip().expandtabs()
+ if not line or line[0] == '#':
+ continue
+ try:
+ line, garbage = line.split(' ',1)
+ except:
+ pass
+ try:
+ line, garbage = line.split('#',1)
+ except:
+ pass
+ try:
+ ip, depth = line.split('/')
+ except:
+ ip = line
+ depth = None
+ try:
+ if depth is not None:
+ depth = int(depth)
+ self.append(ip,depth)
+ except:
+ print '*** WARNING *** could not parse IP range: '+line
+ f.close()
+
+
+ def set_intranet_addresses(self):
+ self.append('127.0.0.1',8)
+ self.append('10.0.0.0',8)
+ self.append('172.16.0.0',12)
+ self.append('192.168.0.0',16)
+ self.append('169.254.0.0',16)
+ self.append('::1')
+ self.append('fe80::',16)
+ self.append('fec0::',16)
+
+ def set_ipv4_addresses(self):
+ self.append('::ffff:0:0',96)
+
+def ipv6_to_ipv4(ip):
+ ip = to_bitfield_ipv6(ip)
+ if not ip.startswith(ipv4addrmask):
+ raise ValueError, "not convertible to IPv4"
+ ip = ip[-32:]
+ x = ''
+ for i in range(4):
+ x += str(int(ip[:8],2))
+ if i < 3:
+ x += '.'
+ ip = ip[8:]
+ return x
+
+def to_ipv4(ip):
+ if is_ipv4(ip):
+ _valid_ipv4(ip)
+ return ip
+ return ipv6_to_ipv4(ip)
+
+def is_ipv4(ip):
+ return ip.find(':') < 0
+
+def _valid_ipv4(ip):
+ ip = ip.split('.')
+ if len(ip) != 4:
+ raise ValueError
+ for i in ip:
+ chr(int(i))
+
+def is_valid_ip(ip):
+ try:
+ if not ip:
+ return False
+ if is_ipv4(ip):
+ _valid_ipv4(ip)
+ return True
+ to_bitfield_ipv6(ip)
+ return True
+ except:
+ return False
diff --git a/BitTornado/torrentlistparse.py b/BitTornado/torrentlistparse.py
new file mode 100644
index 000000000..068209b8a
--- /dev/null
+++ b/BitTornado/torrentlistparse.py
@@ -0,0 +1,38 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from binascii import unhexlify
+
+try:
+ True
+except:
+ True = 1
+ False = 0
+
+
+# parses a list of torrent hashes, in the format of one hash per line in hex format
+
+def parsetorrentlist(filename, parsed):
+ new_parsed = {}
+ added = {}
+ removed = parsed
+ f = open(filename, 'r')
+ while True:
+ l = f.readline()
+ if not l:
+ break
+ l = l.strip()
+ try:
+ if len(l) != 40:
+ raise ValueError, 'bad line'
+ h = unhexlify(l)
+ except:
+ print '*** WARNING *** could not parse line in torrent list: '+l
+ if parsed.has_key(h):
+ del removed[h]
+ else:
+ added[h] = True
+ new_parsed[h] = True
+ f.close()
+ return (new_parsed, added, removed)
+
diff --git a/BitTornado/zurllib.py b/BitTornado/zurllib.py
new file mode 100644
index 000000000..f0d5f2821
--- /dev/null
+++ b/BitTornado/zurllib.py
@@ -0,0 +1,100 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from httplib import HTTPConnection, HTTPSConnection, HTTPException
+from urlparse import urlparse
+from bencode import bdecode
+import socket
+from gzip import GzipFile
+from StringIO import StringIO
+from urllib import quote, unquote
+from __init__ import product_name, version_short
+
+VERSION = product_name+'/'+version_short
+MAX_REDIRECTS = 10
+
+
+class btHTTPcon(HTTPConnection): # attempt to add automatic connection timeout
+ def connect(self):
+ HTTPConnection.connect(self)
+ try:
+ self.sock.settimeout(30)
+ except:
+ pass
+
+class btHTTPScon(HTTPSConnection): # attempt to add automatic connection timeout
+ def connect(self):
+ HTTPSConnection.connect(self)
+ try:
+ self.sock.settimeout(30)
+ except:
+ pass
+
+class urlopen:
+ def __init__(self, url):
+ self.tries = 0
+ self._open(url.strip())
+ self.error_return = None
+
+ def _open(self, url):
+ self.tries += 1
+ if self.tries > MAX_REDIRECTS:
+ raise IOError, ('http error', 500,
+ "Internal Server Error: Redirect Recursion")
+ (scheme, netloc, path, pars, query, fragment) = urlparse(url)
+ if scheme != 'http' and scheme != 'https':
+ raise IOError, ('url error', 'unknown url type', scheme, url)
+ url = path
+ if pars:
+ url += ';'+pars
+ if query:
+ url += '?'+query
+# if fragment:
+ try:
+ if scheme == 'http':
+ self.connection = btHTTPcon(netloc)
+ else:
+ self.connection = btHTTPScon(netloc)
+ self.connection.request('GET', url, None,
+ { 'User-Agent': VERSION,
+ 'Accept-Encoding': 'gzip' } )
+ self.response = self.connection.getresponse()
+ except HTTPException, e:
+ raise IOError, ('http error', str(e))
+ status = self.response.status
+ if status in (301,302):
+ try:
+ self.connection.close()
+ except:
+ pass
+ self._open(self.response.getheader('Location'))
+ return
+ if status != 200:
+ try:
+ data = self._read()
+ d = bdecode(data)
+ if d.has_key('failure reason'):
+ self.error_return = data
+ return
+ except:
+ pass
+ raise IOError, ('http error', status, self.response.reason)
+
+ def read(self):
+ if self.error_return:
+ return self.error_return
+ return self._read()
+
+ def _read(self):
+ data = self.response.read()
+ if self.response.getheader('Content-Encoding','').find('gzip') >= 0:
+ try:
+ compressed = StringIO(data)
+ f = GzipFile(fileobj = compressed)
+ data = f.read()
+ except:
+ raise IOError, ('http error', 'got corrupt response')
+ return data
+
+ def close(self):
+ self.connection.close()
diff --git a/LICENSE b/LICENSE
index 9cc00e905..cf24eae57 100644
--- a/LICENSE
+++ b/LICENSE
@@ -2,7 +2,7 @@
*
* Armory -- Bitcoin Wallet Software
*
-* Copyright (C) 2011-2013, Armory Technologies, Inc.
+* Copyright (C) 2011-2014, Armory Technologies, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
diff --git a/LICENSE.py b/LICENSE.py
index 913335050..d01296b88 100644
--- a/LICENSE.py
+++ b/LICENSE.py
@@ -4,7 +4,7 @@ def licenseText():
* *
* Armory -- Advanced Bitcoin Wallet Software *
* *
-* Copyright (C) 2011-2013, Armory Technologies, Inc. *
+* Copyright (C) 2011-2014, Armory Technologies, Inc. *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU Affero General Public License as *
@@ -33,6 +33,7 @@ def licenseText():
qtreactor4.py
qrcodenative.py
jsonrpc/*
+ bittornado/*
Everything in the cryptopp directory is considered public domain according
to http://www.cryptopp.com/ and included with the source distribution
diff --git a/Makefile b/Makefile
index e13aba02e..6826b2c96 100644
--- a/Makefile
+++ b/Makefile
@@ -1,30 +1,38 @@
# All the actual Makefiles are deeper in the directory tree.
# I am just calling them, here.
-DESTDIR=/usr
+PREFIX=/usr
+DESTDIR=
all :
- $(MAKE) -C cppForSwig swig
+ $(MAKE) -C cppForSwig
clean :
$(MAKE) -C cppForSwig clean
- rm -rf osxbuild/Armory.app
- rm -rf osxbuild/env
+ rm -f osxbuild/build-app.log.txt
+ rm -rf osxbuild/workspace/
install : all
- mkdir -p $(DESTDIR)/share/armory/img
- mkdir -p $(DESTDIR)/lib/armory/extras
- mkdir -p $(DESTDIR)/lib/armory/jsonrpc
- mkdir -p $(DESTDIR)/lib/armory/dialogs
- cp *.py *.so README $(DESTDIR)/lib/armory/
- cp img/* $(DESTDIR)/share/armory/img
- cp extras/*.py $(DESTDIR)/lib/armory/extras
- cp jsonrpc/*.py $(DESTDIR)/lib/armory/jsonrpc
- cp dialogs/*.py $(DESTDIR)/lib/armory/dialogs
- mkdir -p $(DESTDIR)/share/applications
- sed "s:python /usr:python $(DESTDIR):g" < dpkgfiles/armory.desktop > $(DESTDIR)/share/applications/armory.desktop
- sed "s:python /usr:python $(DESTDIR):g" < dpkgfiles/armoryoffline.desktop > $(DESTDIR)/share/applications/armoryoffline.desktop
- sed "s:python /usr:python $(DESTDIR):g" < dpkgfiles/armorytestnet.desktop > $(DESTDIR)/share/applications/armorytestnet.desktop
+ mkdir -p $(DESTDIR)$(PREFIX)/share/armory/img
+ mkdir -p $(DESTDIR)$(PREFIX)/lib/armory/extras
+ mkdir -p $(DESTDIR)$(PREFIX)/lib/armory/jsonrpc
+ mkdir -p $(DESTDIR)$(PREFIX)/lib/armory/ui
+ mkdir -p $(DESTDIR)$(PREFIX)/lib/armory/BitTornado/BT1
+ mkdir -p $(DESTDIR)$(PREFIX)/lib/armory/urllib3
+ cp *.py *.so README $(DESTDIR)$(PREFIX)/lib/armory/
+ rsync -rupE armoryengine $(DESTDIR)$(PREFIX)/lib/armory/
+ rsync -rupE img $(DESTDIR)$(PREFIX)/share/armory/
+ cp extras/*.py $(DESTDIR)$(PREFIX)/lib/armory/extras
+ cp jsonrpc/*.py $(DESTDIR)$(PREFIX)/lib/armory/jsonrpc
+ cp ui/*.py $(DESTDIR)$(PREFIX)/lib/armory/ui
+ cp -r urllib3/* $(DESTDIR)$(PREFIX)/lib/armory/urllib3
+ mkdir -p $(DESTDIR)$(PREFIX)/share/applications
+ cp BitTornado/*.py $(DESTDIR)$(PREFIX)/lib/armory/BitTornado
+ cp BitTornado/BT1/*.py $(DESTDIR)$(PREFIX)/lib/armory/BitTornado/BT1
+ cp default_bootstrap.torrent $(DESTDIR)$(PREFIX)/lib/armory
+ sed "s:python /usr:python $(PREFIX):g" < dpkgfiles/armory.desktop > $(DESTDIR)$(PREFIX)/share/applications/armory.desktop
+ sed "s:python /usr:python $(PREFIX):g" < dpkgfiles/armoryoffline.desktop > $(DESTDIR)$(PREFIX)/share/applications/armoryoffline.desktop
+ sed "s:python /usr:python $(PREFIX):g" < dpkgfiles/armorytestnet.desktop > $(DESTDIR)$(PREFIX)/share/applications/armorytestnet.desktop
osx :
diff --git a/README b/README
index 0a6ffaba5..bf2764ee9 100644
--- a/README
+++ b/README
@@ -1,6 +1,6 @@
################################################################################
# #
-# Copyright (C) 2011-2013, Alan C. Reiner #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
diff --git a/SDM.py b/SDM.py
new file mode 100644
index 000000000..9371ce88d
--- /dev/null
+++ b/SDM.py
@@ -0,0 +1,912 @@
+################################################################################
+# #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
+# Distributed under the GNU Affero General Public License (AGPL v3) #
+# See LICENSE or http://www.gnu.org/licenses/agpl.html #
+# #
+################################################################################
+import inspect
+import os.path
+import socket
+import stat
+import time
+from threading import Event
+from jsonrpc import ServiceProxy
+from CppBlockUtils import SecureBinaryData, CryptoECDSA
+from armoryengine.ArmoryUtils import BITCOIN_PORT, LOGERROR, hex_to_binary, \
+ ARMORY_INFO_SIGN_PUBLICKEY, LOGINFO, BTC_HOME_DIR, LOGDEBUG, OS_WINDOWS, \
+ SystemSpecs, subprocess_check_output, LOGEXCEPT, FileExistsError, OS_VARIANT, \
+ BITCOIN_RPC_PORT, binary_to_base58, isASCII, USE_TESTNET, GIGABYTE, \
+ launchProcess, killProcessTree, killProcess, LOGWARN, RightNow, HOUR, \
+ PyBackgroundThread, touchFile, DISABLE_TORRENTDL, secondsToHumanTime, \
+ bytesToHumanSize, MAGIC_BYTES, deleteBitcoindDBs, TheTDM
+from jsonrpc import authproxy
+
+
+#############################################################################
+def satoshiIsAvailable(host='127.0.0.1', port=BITCOIN_PORT, timeout=0.01):
+
+ if not isinstance(port, (list,tuple)):
+ port = [port]
+
+ for p in port:
+ s = socket.socket()
+ s.settimeout(timeout) # Most of the time checking localhost -- FAST
+ try:
+ s.connect((host, p))
+ s.close()
+ return p
+ except:
+ pass
+
+ return 0
+
+
+################################################################################
+def extractSignedDataFromVersionsDotTxt(wholeFile, doVerify=True):
+ """
+ This method returns a pair: a dictionary to lookup link by OS, and
+ a formatted string that is sorted by OS, and re-formatted list that
+ will hash the same regardless of original format or ordering
+ """
+
+ msgBegin = wholeFile.find('# -----BEGIN-SIGNED-DATA-')
+ msgBegin = wholeFile.find('\n', msgBegin+1) + 1
+ msgEnd = wholeFile.find('# -----SIGNATURE---------')
+ sigBegin = wholeFile.find('\n', msgEnd+1) + 3
+ sigEnd = wholeFile.find('# -----END-SIGNED-DATA---')
+
+ MSGRAW = wholeFile[msgBegin:msgEnd]
+ SIGHEX = wholeFile[sigBegin:sigEnd].strip()
+
+ if -1 in [msgBegin,msgEnd,sigBegin,sigEnd]:
+ LOGERROR('No signed data block found')
+ return ''
+
+
+ if doVerify:
+ Pub = SecureBinaryData(hex_to_binary(ARMORY_INFO_SIGN_PUBLICKEY))
+ Msg = SecureBinaryData(MSGRAW)
+ Sig = SecureBinaryData(hex_to_binary(SIGHEX))
+ isVerified = CryptoECDSA().VerifyData(Msg, Sig, Pub)
+
+ if not isVerified:
+ LOGERROR('Signed data block failed verification!')
+ return ''
+ else:
+ LOGINFO('Signature on signed data block is GOOD!')
+
+ return MSGRAW
+
+
+################################################################################
+def parseLinkList(theData):
+ """
+ Plug the verified data into here...
+ """
+ DLDICT,VERDICT = {},{}
+ sectStr = None
+ for line in theData.split('\n'):
+ pcs = line[1:].split()
+ if line.startswith('# SECTION-') and 'INSTALLERS' in line:
+ sectStr = pcs[0].split('-')[-1]
+ if not sectStr in DLDICT:
+ DLDICT[sectStr] = {}
+ VERDICT[sectStr] = ''
+ if len(pcs)>1:
+ VERDICT[sectStr] = pcs[-1]
+ continue
+
+ if len(pcs)==3 and pcs[1].startswith('http'):
+ DLDICT[sectStr][pcs[0]] = pcs[1:]
+
+ return DLDICT,VERDICT
+
+
+
+
+
+################################################################################
+# jgarzik'sjj jsonrpc-bitcoin code -- stupid-easy to talk to bitcoind
+class SatoshiDaemonManager(object):
+ """
+ Use an existing implementation of bitcoind
+ """
+
+ class BitcoindError(Exception): pass
+ class BitcoindNotAvailableError(Exception): pass
+ class BitcoinDotConfError(Exception): pass
+ class SatoshiHomeDirDNE(Exception): pass
+ class ConfigFileUserDNE(Exception): pass
+ class ConfigFilePwdDNE(Exception): pass
+
+
+ #############################################################################
+ def __init__(self):
+ self.executable = None
+ self.satoshiHome = None
+ self.bitconf = {}
+ self.proxy = None
+ self.bitcoind = None
+ self.isMidQuery = False
+ self.last20queries = []
+ self.disabled = False
+ self.failedFindExe = False
+ self.failedFindHome = False
+ self.foundExe = []
+ self.circBufferState = []
+ self.circBufferTime = []
+ self.btcOut = None
+ self.btcErr = None
+ self.lastTopBlockInfo = { \
+ 'numblks': -1,
+ 'tophash': '',
+ 'toptime': -1,
+ 'error': 'Uninitialized',
+ 'blkspersec': -1 }
+
+ # Added torrent DL before we *actually* start SDM (if it makes sense)
+ self.useTorrentFinalAnswer = False
+ self.useTorrentFile = ''
+ self.torrentDisabled = False
+ self.tdm = None
+ self.satoshiHome = None
+
+
+ #############################################################################
+ def setSatoshiDir(self, newDir):
+ self.satoshiHome = newDir
+
+ #############################################################################
+ def setDisableTorrentDL(self, b):
+ self.torrentDisabled = b
+
+ #############################################################################
+ def tryToSetupTorrentDL(self, torrentPath):
+ if self.torrentDisabled:
+ LOGWARN('Tried to setup torrent download mgr but we are disabled')
+ return False
+
+ if not torrentPath or not os.path.exists(torrentPath):
+ self.useTorrentFinalAnswer = False
+ return False
+
+ bootfile = os.path.join(self.satoshiHome, 'bootstrap.dat')
+ TheTDM.setupTorrent(torrentPath, bootfile)
+ if not TheTDM.getTDMState()=='ReadyToStart':
+ LOGERROR('Unknown error trying to start torrent manager')
+ self.useTorrentFinalAnswer = False
+ return False
+
+
+ # We will tell the TDM to write status updates to the log file, and only
+ # every 90 seconds. After it finishes (or fails), simply launch bitcoind
+ # as we would've done without the torrent
+ #####
+ def torrentLogToFile(dpflag=Event(), fractionDone=None, timeEst=None,
+ downRate=None, upRate=None, activity=None,
+ statistics=None, **kws):
+ statStr = ''
+ if fractionDone:
+ statStr += ' Done: %0.1f%% ' % (fractionDone*100)
+ if downRate:
+ statStr += ' / DLRate: %0.1f/sec' % (downRate/1024.)
+ if timeEst:
+ statStr += ' / TLeft: %s' % secondsToHumanTime(timeEst)
+ if statistics:
+ statStr += ' / Seeds: %d' % (statistics.numSeeds)
+ statStr += ' / Peers: %d' % (statistics.numPeers)
+
+ if len(statStr)==0:
+ statStr = 'No torrent info available'
+
+ LOGINFO('Torrent: %s' % statStr)
+
+ #####
+ def torrentFinished():
+ bootsz = ''
+ if os.path.exists(bootfile):
+ bootsz = bytesToHumanSize(os.path.getsize(bootfile))
+
+ LOGINFO('Torrent finished; size of %s is %s', torrentPath, bootsz)
+ LOGINFO('Remove the core btc databases before doing bootstrap')
+ deleteBitcoindDBs()
+ self.launchBitcoindAndGuardian()
+
+ #####
+ def torrentFailed():
+ # Not sure there's actually anything we need to do here...
+ bootsz = ''
+ if os.path.exists(bootfile):
+ bootsz = bytesToHumanSize(os.path.getsize(bootfile))
+
+ LOGERROR('Torrent failed; size of %s is %s', torrentPath, bootsz)
+ self.launchBitcoindAndGuardian()
+
+ TheTDM.setSecondsBetweenUpdates(90)
+ TheTDM.setCallback('displayFunc', torrentLogToFile)
+ TheTDM.setCallback('finishedFunc', torrentFinished)
+ TheTDM.setCallback('failedFunc', torrentFailed)
+
+ LOGINFO('Bootstrap file is %s' % bytesToHumanSize(TheTDM.torrentSize))
+
+ self.useTorrentFinalAnswer = True
+ self.useTorrentFile = torrentPath
+ return True
+
+
+ #############################################################################
+ def shouldTryBootstrapTorrent(self):
+ if DISABLE_TORRENTDL or TheTDM.getTDMState()=='Disabled':
+ return False
+
+ # The only torrent we have is for the primary Bitcoin network
+ if not MAGIC_BYTES=='\xf9\xbe\xb4\xd9':
+ return False
+
+
+
+ if TheTDM.torrentSize:
+ bootfile = os.path.join(self.satoshiHome, 'bootstrap.dat')
+ if os.path.exists(bootfile):
+ if os.path.getsize(bootfile) >= TheTDM.torrentSize/2:
+ LOGWARN('Looks like a full bootstrap is already here')
+ LOGWARN('Skipping torrent download')
+ return False
+
+
+ # If they don't even have a BTC_HOME_DIR, corebtc never been installed
+ blockDir = os.path.join(self.satoshiHome, 'blocks')
+ if not os.path.exists(self.satoshiHome) or not os.path.exists(blockDir):
+ return True
+
+ # Get the cumulative size of the blk*.dat files
+ blockDirSize = sum([os.path.getsize(os.path.join(blockDir, a)) \
+ for a in os.listdir(blockDir) if a.startswith('blk')])
+ sizeStr = bytesToHumanSize(blockDirSize)
+ LOGINFO('Total size of files in %s is %s' % (blockDir, sizeStr))
+
+ # If they have only a small portion of the blockchain, do it
+ szThresh = 100*MEGABYTE if USE_TESTNET else 6*GIGABYTE
+ if blockDirSize < szThresh:
+ return True
+
+ # So far we know they have a BTC_HOME_DIR, with more than 6GB in blocks/
+ # The only thing that can induce torrent now is if we have a partially-
+ # finished bootstrap file bigger than the blocks dir.
+ bootFiles = ['','']
+ bootFiles[0] = os.path.join(self.satoshiHome, 'bootstrap.dat')
+ bootFiles[1] = os.path.join(self.satoshiHome, 'bootstrap.dat.partial')
+ for fn in bootFiles:
+ if os.path.exists(fn):
+ if os.path.getsize(fn) > blockDirSize:
+ return True
+
+ # Okay, we give up -- just download [the rest] via P2P
+ return False
+
+
+ #############################################################################
+ #def setSatoshiDir(self, newDir):
+ #self.satoshiHome = newDir
+
+ #############################################################################
+ def setupSDM(self, pathToBitcoindExe=None, satoshiHome=None, \
+ extraExeSearch=[], createHomeIfDNE=True):
+ LOGDEBUG('Exec setupSDM')
+ self.failedFindExe = False
+ self.failedFindHome = False
+ # If we are supplied a path, then ignore the extra exe search paths
+ if pathToBitcoindExe==None:
+ pathToBitcoindExe = self.findBitcoind(extraExeSearch)
+ if len(pathToBitcoindExe)==0:
+ LOGDEBUG('Failed to find bitcoind')
+ self.failedFindExe = True
+ else:
+ LOGINFO('Found bitcoind in the following places:')
+ for p in pathToBitcoindExe:
+ LOGINFO(' %s', p)
+ pathToBitcoindExe = pathToBitcoindExe[0]
+ LOGINFO('Using: %s', pathToBitcoindExe)
+
+ if not os.path.exists(pathToBitcoindExe):
+ LOGINFO('Somehow failed to find exe even after finding it...?')
+ self.failedFindExe = True
+
+ self.executable = pathToBitcoindExe
+
+ # Four possible conditions for already-set satoshi home dir, and input arg
+ if satoshiHome is not None:
+ self.satoshiHome = satoshiHome
+ else:
+ if self.satoshiHome is None:
+ self.satoshiHome = BTC_HOME_DIR
+
+ # If no new dir is specified, leave satoshi home if it's already set
+ # Give it a default BTC_HOME_DIR if not.
+ if not os.path.exists(self.satoshiHome):
+ if createHomeIfDNE:
+ LOGINFO('Making satoshi home dir')
+ os.makedirs(self.satoshiHome)
+ else:
+ LOGINFO('No home dir, makedir not requested')
+ self.failedFindHome = True
+
+ if self.failedFindExe: raise self.BitcoindError, 'bitcoind not found'
+ if self.failedFindHome: raise self.BitcoindError, 'homedir not found'
+
+ self.disabled = False
+ self.proxy = None
+ self.bitcoind = None # this will be a Popen object
+ self.isMidQuery = False
+ self.last20queries = []
+
+ self.readBitcoinConf(makeIfDNE=True)
+
+
+
+
+
+ #############################################################################
+ def setDisabled(self, newBool=True):
+ s = self.getSDMState()
+
+ if newBool==True:
+ if s in ('BitcoindInitializing', 'BitcoindSynchronizing', 'BitcoindReady'):
+ self.stopBitcoind()
+
+ self.disabled = newBool
+
+
+ #############################################################################
+ def getAllFoundExe(self):
+ return list(self.foundExe)
+
+
+ #############################################################################
+ def findBitcoind(self, extraSearchPaths=[]):
+ self.foundExe = []
+
+ searchPaths = list(extraSearchPaths) # create a copy
+
+ if OS_WINDOWS:
+ # Making sure the search path argument comes with /daemon and /Bitcoin on Windows
+
+ searchPaths.extend([os.path.join(sp, 'Bitcoin') for sp in searchPaths])
+ searchPaths.extend([os.path.join(sp, 'daemon') for sp in searchPaths])
+
+ possBaseDir = []
+
+ from platform import machine
+ if '64' in machine():
+ possBaseDir.append(os.getenv("ProgramW6432"))
+ possBaseDir.append(os.getenv('PROGRAMFILES(X86)'))
+ else:
+ possBaseDir.append(os.getenv('PROGRAMFILES'))
+
+ # check desktop for links
+
+ home = os.path.expanduser('~')
+ desktop = os.path.join(home, 'Desktop')
+
+ if os.path.exists(desktop):
+ dtopfiles = os.listdir(desktop)
+ for path in [os.path.join(desktop, fn) for fn in dtopfiles]:
+ if 'bitcoin' in path.lower() and path.lower().endswith('.lnk'):
+ import win32com.client
+ shell = win32com.client.Dispatch('WScript.Shell')
+ targ = shell.CreateShortCut(path).Targetpath
+ targDir = os.path.dirname(targ)
+ LOGINFO('Found Bitcoin-Qt link on desktop: %s', targDir)
+ possBaseDir.append( targDir )
+
+ # Also look in default place in ProgramFiles dirs
+
+
+
+
+ # Now look at a few subdirs of the
+ searchPaths.extend(possBaseDir)
+ searchPaths.extend([os.path.join(p, 'Bitcoin', 'daemon') for p in possBaseDir])
+ searchPaths.extend([os.path.join(p, 'daemon') for p in possBaseDir])
+ searchPaths.extend([os.path.join(p, 'Bitcoin') for p in possBaseDir])
+
+ for p in searchPaths:
+ testPath = os.path.join(p, 'bitcoind.exe')
+ if os.path.exists(testPath):
+ self.foundExe.append(testPath)
+
+ else:
+ # In case this was a downloaded copy, make sure we traverse to bin/64 dir
+ if SystemSpecs.IsX64:
+ searchPaths.extend([os.path.join(p, 'bin/64') for p in extraSearchPaths])
+ else:
+ searchPaths.extend([os.path.join(p, 'bin/32') for p in extraSearchPaths])
+
+ searchPaths.extend(['/usr/bin/', '/usr/lib/bitcoin/'])
+
+ for p in searchPaths:
+ testPath = os.path.join(p, 'bitcoind')
+ if os.path.exists(testPath):
+ self.foundExe.append(testPath)
+
+ try:
+ locs = subprocess_check_output(['whereis','bitcoind']).split()
+ if len(locs)>1:
+ locs = filter(lambda x: os.path.basename(x)=='bitcoind', locs)
+ LOGINFO('"whereis" returned: %s', str(locs))
+ self.foundExe.extend(locs)
+ except:
+ LOGEXCEPT('Error executing "whereis" command')
+
+
+ # For logging purposes, check that the first answer matches one of the
+ # extra search paths. There should be some kind of notification that
+ # their supplied search path was invalid and we are using something else.
+ if len(self.foundExe)>0 and len(extraSearchPaths)>0:
+ foundIt = False
+ for p in extraSearchPaths:
+ if self.foundExe[0].startswith(p):
+ foundIt=True
+
+ if not foundIt:
+ LOGERROR('Bitcoind could not be found in the specified installation:')
+ for p in extraSearchPaths:
+ LOGERROR(' %s', p)
+ LOGERROR('Bitcoind is being started from:')
+ LOGERROR(' %s', self.foundExe[0])
+
+ return self.foundExe
+
+ #############################################################################
+ def getGuardianPath(self):
+ if OS_WINDOWS:
+ armoryInstall = os.path.dirname(inspect.getsourcefile(SatoshiDaemonManager))
+ # This should return a zip file because of py2exe
+ if armoryInstall.endswith('.zip'):
+ armoryInstall = os.path.dirname(armoryInstall)
+ gpath = os.path.join(armoryInstall, 'guardian.exe')
+ else:
+ theDir = os.path.dirname(inspect.getsourcefile(SatoshiDaemonManager))
+ gpath = os.path.join(theDir, 'guardian.py')
+
+ if not os.path.exists(gpath):
+ LOGERROR('Could not find guardian script: %s', gpath)
+ raise FileExistsError
+ return gpath
+
+ #############################################################################
+ def readBitcoinConf(self, makeIfDNE=False):
+ LOGINFO('Reading bitcoin.conf file')
+ bitconf = os.path.join( self.satoshiHome, 'bitcoin.conf' )
+ if not os.path.exists(bitconf):
+ if not makeIfDNE:
+ raise self.BitcoinDotConfError, 'Could not find bitcoin.conf'
+ else:
+ LOGINFO('No bitcoin.conf available. Creating it...')
+ touchFile(bitconf)
+
+ # Guarantee that bitcoin.conf file has very strict permissions
+ if OS_WINDOWS:
+ if OS_VARIANT[0].lower()=='xp':
+ LOGERROR('Cannot set permissions correctly in XP!')
+ LOGERROR('Please confirm permissions on the following file ')
+ LOGERROR('are set to exclusive access only for your user ')
+ LOGERROR('(it usually is, but Armory cannot guarantee it ')
+ LOGERROR('on XP systems):')
+ LOGERROR(' %s', bitconf)
+ else:
+ LOGINFO('Setting permissions on bitcoin.conf')
+ import win32api
+ username = win32api.GetUserName()
+ LOGINFO('Setting permissions on bitcoin.conf')
+ cmd_icacls = ['icacls',bitconf,'/inheritance:r','/grant:r', '%s:F' % username]
+ icacls_out = subprocess_check_output(cmd_icacls, shell=True)
+ LOGINFO('icacls returned: %s', icacls_out)
+ else:
+ LOGINFO('Setting permissions on bitcoin.conf')
+ os.chmod(bitconf, stat.S_IRUSR | stat.S_IWUSR)
+
+
+ with open(bitconf,'r') as f:
+ # Find the last character of the each line: either a newline or '#'
+ endchr = lambda line: line.find('#') if line.find('#')>1 else len(line)
+
+ # Reduce each line to a list of key,value pairs separated with '='
+ allconf = [l[:endchr(l)].strip().split('=') for l in f.readlines()]
+
+ # Need to convert to (x[0],x[1:]) in case the password has '=' in it
+ allconfPairs = [[x[0], '='.join(x[1:])] for x in allconf if len(x)>1]
+
+ # Convert the list of pairs to a dictionary
+ self.bitconf = dict(allconfPairs)
+
+
+ # Look for rpcport, use default if not there
+ self.bitconf['rpcport'] = int(self.bitconf.get('rpcport', BITCOIN_RPC_PORT))
+
+ # We must have a username and password. If not, append to file
+ if not self.bitconf.has_key('rpcuser'):
+ LOGDEBUG('No rpcuser: creating one')
+ with open(bitconf,'a') as f:
+ f.write('\n')
+ f.write('rpcuser=generated_by_armory\n')
+ self.bitconf['rpcuser'] = 'generated_by_armory'
+
+ if not self.bitconf.has_key('rpcpassword'):
+ LOGDEBUG('No rpcpassword: creating one')
+ with open(bitconf,'a') as f:
+ randBase58 = SecureBinaryData().GenerateRandom(32).toBinStr()
+ randBase58 = binary_to_base58(randBase58)
+ f.write('\n')
+ f.write('rpcpassword=%s' % randBase58)
+ self.bitconf['rpcpassword'] = randBase58
+
+
+ if not isASCII(self.bitconf['rpcuser']):
+ LOGERROR('Non-ASCII character in bitcoin.conf (rpcuser)!')
+ if not isASCII(self.bitconf['rpcpassword']):
+ LOGERROR('Non-ASCII character in bitcoin.conf (rpcpassword)!')
+
+ self.bitconf['host'] = '127.0.0.1'
+
+
+ #############################################################################
+ def cleanupFailedTorrent(self):
+ # Right now I think don't do anything
+ pass
+
+ #############################################################################
+ def startBitcoind(self):
+ self.btcOut, self.btcErr = None,None
+ if self.disabled:
+ LOGERROR('SDM was disabled, must be re-enabled before starting')
+ return
+
+ LOGINFO('Called startBitcoind')
+
+ if self.isRunningBitcoind() or TheTDM.getTDMState()=='Downloading':
+ raise self.BitcoindError, 'Looks like we have already started theSDM'
+
+ if not os.path.exists(self.executable):
+ raise self.BitcoindError, 'Could not find bitcoind'
+
+
+ chk1 = os.path.exists(self.useTorrentFile)
+ chk2 = self.shouldTryBootstrapTorrent()
+ chk3 = TheTDM.getTDMState()=='ReadyToStart'
+
+ if chk1 and chk2 and chk3:
+ TheTDM.startDownload()
+ else:
+ self.launchBitcoindAndGuardian()
+
+
+
+ #############################################################################
+ def launchBitcoindAndGuardian(self):
+
+ pargs = [self.executable]
+
+ if USE_TESTNET:
+ testhome = self.satoshiHome[:]
+ if self.satoshiHome.endswith('/testnet3/'):
+ pargs.append('-datadir=%s' % self.satoshiHome[:-10])
+ elif self.satoshiHome.endswith('/testnet3'):
+ pargs.append('-datadir=%s' % self.satoshiHome[:-9])
+ pargs.append('-testnet')
+ else:
+ pargs.append('-datadir=%s' % self.satoshiHome)
+ try:
+ # Don't want some strange error in this size-check to abort loading
+ blocksdir = os.path.join(self.satoshiHome, 'blocks')
+ sz = long(0)
+ if os.path.exists(blocksdir):
+ for fn in os.listdir(blocksdir):
+ fnpath = os.path.join(blocksdir, fn)
+ sz += long(os.path.getsize(fnpath))
+
+ if sz < 5*GIGABYTE:
+ if SystemSpecs.Memory>9.0:
+ pargs.append('-dbcache=2000')
+ elif SystemSpecs.Memory>5.0:
+ pargs.append('-dbcache=1000')
+ elif SystemSpecs.Memory>3.0:
+ pargs.append('-dbcache=500')
+ except:
+ LOGEXCEPT('Failed size check of blocks directory')
+
+
+ # Startup bitcoind and get its process ID (along with our own)
+ self.bitcoind = launchProcess(pargs)
+
+ self.btcdpid = self.bitcoind.pid
+ self.selfpid = os.getpid()
+
+ LOGINFO('PID of bitcoind: %d', self.btcdpid)
+ LOGINFO('PID of armory: %d', self.selfpid)
+
+ # Startup guardian process -- it will watch Armory's PID
+ gpath = self.getGuardianPath()
+ pargs = [gpath, str(self.selfpid), str(self.btcdpid)]
+ if not OS_WINDOWS:
+ pargs.insert(0, 'python')
+ launchProcess(pargs)
+
+
+
+ #############################################################################
+ def stopBitcoind(self):
+ LOGINFO('Called stopBitcoind')
+ if not self.isRunningBitcoind():
+ LOGINFO('...but bitcoind is not running, to be able to stop')
+ return
+
+ killProcessTree(self.bitcoind.pid)
+ killProcess(self.bitcoind.pid)
+
+ time.sleep(1)
+ self.bitcoind = None
+
+
+ #############################################################################
+ def isRunningBitcoind(self):
+ """
+ armoryengine satoshiIsAvailable() only tells us whether there's a
+ running bitcoind that is actively responding on its port. But it
+ won't be responding immediately after we've started it (still doing
+ startup operations). If bitcoind was started and still running,
+ then poll() will return None. Any othe poll() return value means
+ that the process terminated
+ """
+ if self.bitcoind==None:
+ return False
+ else:
+ if not self.bitcoind.poll()==None:
+ LOGDEBUG('Bitcoind is no more')
+ if self.btcOut==None:
+ self.btcOut, self.btcErr = self.bitcoind.communicate()
+ LOGWARN('bitcoind exited, bitcoind STDOUT:')
+ for line in self.btcOut.split('\n'):
+ LOGWARN(line)
+ LOGWARN('bitcoind exited, bitcoind STDERR:')
+ for line in self.btcErr.split('\n'):
+ LOGWARN(line)
+ return self.bitcoind.poll()==None
+
+ #############################################################################
+ def wasRunningBitcoind(self):
+ return (not self.bitcoind==None)
+
+ #############################################################################
+ def bitcoindIsResponsive(self):
+ return satoshiIsAvailable(self.bitconf['host'], self.bitconf['rpcport'])
+
+ #############################################################################
+ def getSDMState(self):
+ """
+ As for why I'm doing this: it turns out that between "initializing"
+ and "synchronizing", bitcoind temporarily stops responding entirely,
+ which causes "not-available" to be the state. I need to smooth that
+ out because it wreaks havoc on the GUI which will switch to showing
+ a nasty error.
+ """
+
+ state = self.getSDMStateLogic()
+ self.circBufferState.append(state)
+ self.circBufferTime.append(RightNow())
+ if len(self.circBufferTime)>2 and \
+ (self.circBufferTime[-1] - self.circBufferTime[1]) > 5:
+ # Only remove the first element if we have at least 5s history
+ self.circBufferState = self.circBufferState[1:]
+ self.circBufferTime = self.circBufferTime[1:]
+
+ # Here's where we modify the output to smooth out the gap between
+ # "initializing" and "synchronizing" (which is a couple seconds
+ # of "not available"). "NotAvail" keeps getting added to the
+ # buffer, but if it was "initializing" in the last 5 seconds,
+ # we will keep "initializing"
+ if state=='BitcoindNotAvailable':
+ if 'BitcoindInitializing' in self.circBufferState:
+ LOGWARN('Overriding not-available state. This should happen 0-5 times')
+ return 'BitcoindInitializing'
+
+ return state
+
+ #############################################################################
+ def getSDMStateLogic(self):
+
+ if self.disabled:
+ return 'BitcoindMgmtDisabled'
+
+ if self.failedFindExe:
+ return 'BitcoindExeMissing'
+
+ if self.failedFindHome:
+ return 'BitcoindHomeMissing'
+
+ if TheTDM.isRunning():
+ return 'TorrentSynchronizing'
+
+ latestInfo = self.getTopBlockInfo()
+
+ if self.bitcoind==None and latestInfo['error']=='Uninitialized':
+ return 'BitcoindNeverStarted'
+
+ if not self.isRunningBitcoind():
+ # Not running at all: either never started, or process terminated
+ if not self.btcErr==None and len(self.btcErr)>0:
+ errstr = self.btcErr.replace(',',' ').replace('.',' ').replace('!',' ')
+ errPcs = set([a.lower() for a in errstr.split()])
+ runPcs = set(['cannot','obtain','lock','already','running'])
+ dbePcs = set(['database', 'recover','backup','except','wallet','dat'])
+ if len(errPcs.intersection(runPcs))>=(len(runPcs)-1):
+ return 'BitcoindAlreadyRunning'
+ elif len(errPcs.intersection(dbePcs))>=(len(dbePcs)-1):
+ return 'BitcoindDatabaseEnvError'
+ else:
+ return 'BitcoindUnknownCrash'
+ else:
+ return 'BitcoindNotAvailable'
+ elif not self.bitcoindIsResponsive():
+ # Running but not responsive... must still be initializing
+ return 'BitcoindInitializing'
+ else:
+ # If it's responsive, get the top block and check
+ # TODO: These conditionals are based on experimental results. May
+ # not be accurate what the specific errors mean...
+ if latestInfo['error']=='ValueError':
+ return 'BitcoindWrongPassword'
+ elif latestInfo['error']=='JsonRpcException':
+ return 'BitcoindInitializing'
+ elif latestInfo['error']=='SocketError':
+ return 'BitcoindNotAvailable'
+
+ if 'BitcoindReady' in self.circBufferState:
+ # If ready, always ready
+ return 'BitcoindReady'
+
+ # If we get here, bitcoind is gave us a response.
+ secSinceLastBlk = RightNow() - latestInfo['toptime']
+ blkspersec = latestInfo['blkspersec']
+ #print 'Blocks per 10 sec:', ('UNKNOWN' if blkspersec==-1 else blkspersec*10)
+ if secSinceLastBlk > 4*HOUR or blkspersec==-1:
+ return 'BitcoindSynchronizing'
+ else:
+ if blkspersec*20 > 2 and not 'BitcoindReady' in self.circBufferState:
+ return 'BitcoindSynchronizing'
+ else:
+ return 'BitcoindReady'
+
+
+
+
+ #############################################################################
+ def createProxy(self, forceNew=False):
+ if self.proxy==None or forceNew:
+ LOGDEBUG('Creating proxy')
+ usr,pas,hst,prt = [self.bitconf[k] for k in ['rpcuser','rpcpassword',\
+ 'host', 'rpcport']]
+ pstr = 'http://%s:%s@%s:%d' % (usr,pas,hst,prt)
+ LOGINFO('Creating proxy in SDM: host=%s, port=%s', hst,prt)
+ self.proxy = ServiceProxy(pstr)
+
+
+ #############################################################################
+ def __backgroundRequestTopBlock(self):
+ self.createProxy()
+ self.isMidQuery = True
+ try:
+ numblks = self.proxy.getinfo()['blocks']
+ blkhash = self.proxy.getblockhash(numblks)
+ toptime = self.proxy.getblock(blkhash)['time']
+ #LOGDEBUG('RPC Call: numBlks=%d, toptime=%d', numblks, toptime)
+ # Only overwrite once all outputs are retrieved
+ self.lastTopBlockInfo['numblks'] = numblks
+ self.lastTopBlockInfo['tophash'] = blkhash
+ self.lastTopBlockInfo['toptime'] = toptime
+ self.lastTopBlockInfo['error'] = None # Holds error info
+
+ if len(self.last20queries)==0 or \
+ (RightNow()-self.last20queries[-1][0]) > 0.99:
+ # This conditional guarantees last 20 queries spans at least 20s
+ self.last20queries.append([RightNow(), numblks])
+ self.last20queries = self.last20queries[-20:]
+ t0,b0 = self.last20queries[0]
+ t1,b1 = self.last20queries[-1]
+
+ # Need at least 10s of data to give meaning answer
+ if (t1-t0)<10:
+ self.lastTopBlockInfo['blkspersec'] = -1
+ else:
+ self.lastTopBlockInfo['blkspersec'] = float(b1-b0)/float(t1-t0)
+
+ except ValueError:
+ # I believe this happens when you used the wrong password
+ LOGEXCEPT('ValueError in bkgd req top blk')
+ self.lastTopBlockInfo['error'] = 'ValueError'
+ except authproxy.JSONRPCException:
+ # This seems to happen when bitcoind is overwhelmed... not quite ready
+ LOGDEBUG('generic jsonrpc exception')
+ self.lastTopBlockInfo['error'] = 'JsonRpcException'
+ except socket.error:
+ # Connection isn't available... is bitcoind not running anymore?
+ LOGDEBUG('generic socket error')
+ self.lastTopBlockInfo['error'] = 'SocketError'
+ except:
+ LOGEXCEPT('generic error')
+ self.lastTopBlockInfo['error'] = 'UnknownError'
+ raise
+ finally:
+ self.isMidQuery = False
+
+
+ #############################################################################
+ def updateTopBlockInfo(self):
+ """
+ We want to get the top block information, but if bitcoind is rigorously
+ downloading and verifying the blockchain, it can sometimes take 10s to
+ to respond to JSON-RPC calls! We must do it in the background...
+
+ If it's already querying, no need to kick off another background request,
+ just return the last value, which may be "stale" but we don't really
+ care for this particular use-case
+ """
+ if not self.isRunningBitcoind():
+ return
+
+ if self.isMidQuery:
+ return
+
+ self.createProxy()
+ self.queryThread = PyBackgroundThread(self.__backgroundRequestTopBlock)
+ self.queryThread.start()
+
+
+ #############################################################################
+ def getTopBlockInfo(self):
+ if self.isRunningBitcoind():
+ self.updateTopBlockInfo()
+ self.queryThread.join(0.001) # In most cases, result should come in 1 ms
+ # We return a copy so that the data is not changing as we use it
+
+ return self.lastTopBlockInfo.copy()
+
+
+ #############################################################################
+ def callJSON(self, func, *args):
+ state = self.getSDMState()
+ if not state in ('BitcoindReady', 'BitcoindSynchronizing'):
+ LOGERROR('Called callJSON(%s, %s)', func, str(args))
+ LOGERROR('Current SDM state: %s', state)
+ raise self.BitcoindError, 'callJSON while %s'%state
+
+ return self.proxy.__getattr__(func)(*args)
+
+
+ #############################################################################
+ def returnSDMInfo(self):
+ sdminfo = {}
+ for key,val in self.bitconf.iteritems():
+ sdminfo['bitconf_%s'%key] = val
+
+ for key,val in self.lastTopBlockInfo.iteritems():
+ sdminfo['topblk_%s'%key] = val
+
+ sdminfo['executable'] = self.executable
+ sdminfo['isrunning'] = self.isRunningBitcoind()
+ sdminfo['homedir'] = self.satoshiHome
+ sdminfo['proxyinit'] = (not self.proxy==None)
+ sdminfo['ismidquery'] = self.isMidQuery
+ sdminfo['querycount'] = len(self.last20queries)
+
+ return sdminfo
+
+ #############################################################################
+ def printSDMInfo(self):
+ print '\nCurrent SDM State:'
+ print '\t', 'SDM State Str'.ljust(20), ':', self.getSDMState()
+ for key,value in self.returnSDMInfo().iteritems():
+ print '\t', str(key).ljust(20), ':', str(value)
+
+
diff --git a/announcefetch.py b/announcefetch.py
new file mode 100644
index 000000000..9ffbfcc91
--- /dev/null
+++ b/announcefetch.py
@@ -0,0 +1,386 @@
+from armoryengine.ALL import *
+from threading import Event
+from jasvet import verifySignature, readSigBlock
+import os
+import sys
+import time
+import urllib
+
+
+DEFAULT_FETCH_INTERVAL = 30*MINUTE
+DEFAULT_MIN_PRIORITY = 2048
+
+if not CLI_OPTIONS.testAnnounceCode:
+ # Signed with the Bitcoin offline announce key (see top of ArmoryUtils.py)
+ ANNOUNCE_SIGN_PUBKEY = ARMORY_INFO_SIGN_PUBLICKEY
+ ANNOUNCE_URL = 'https://bitcoinarmory.com/announce.txt'
+ ANNOUNCE_URL_BACKUP = 'https://s3.amazonaws.com/bitcoinarmory-media/announce.txt'
+else:
+ # This is a lower-security announce file, fake data, just for testing
+ ANNOUNCE_SIGN_PUBKEY = ('04'
+ '601c891a2cbc14a7b2bb1ecc9b6e42e166639ea4c2790703f8e2ed126fce432c'
+ '62fe30376497ad3efcd2964aa0be366010c11b8d7fc8209f586eac00bb763015')
+ ANNOUNCE_URL = 'https://s3.amazonaws.com/bitcoinarmory-testing/testannounce.txt'
+ ANNOUNCE_URL_BACKUP = ANNOUNCE_URL
+
+
+class AnnounceDataFetcher(object):
+ """
+ Armory Technologies, Inc, will post occasional SIGNED updates to be
+ processed by running instances of Armory that haven't disabled it.
+
+ The files in the fetchDir will be small. At the time of this writing,
+ the only files we will fetch and store:
+
+ announce.txt : announcements & alerts to be displayed to the user
+ changelog.txt : changelog of versions, triggers update nofications
+ dllinks.txt : URLs and hashes of installers for all OS and versions
+ notify.txt : Notifications & alerts
+ bootstrap.dat.torrent : torrent file for quick blockchain download
+ """
+
+ #############################################################################
+ def __init__(self, announceURL=ANNOUNCE_URL, \
+ backupURL=ANNOUNCE_URL_BACKUP, \
+ fetchDir=None):
+
+ self.loopIsIdle = Event()
+ self.forceCheckFlag = Event()
+ self.forceIsFinished = Event()
+ self.firstSuccess = Event()
+ self.shutdownFlag = Event()
+ self.lastFetch = 0
+ self.lastChange = 0
+ self.loopThread = None
+ self.disabled = False
+ self.setFetchInterval(DEFAULT_FETCH_INTERVAL)
+ self.loopIsIdle.set()
+ self.lastAnnounceChange = 0
+
+ # Where to fetch the data from
+ self.announceURL = announceURL
+ self.announceURL_backup = backupURL
+
+ # Just disable ourselves if we have continuous exceptions
+ self.numConsecutiveExceptions = 0
+
+ # If we are on testnet, we may require matching a mainnnet addr
+ a160 = hash160(hex_to_binary(ANNOUNCE_SIGN_PUBKEY))
+ self.validAddrStr = hash160_to_addrStr(a160)
+
+
+
+ # Make sure the fetch directory exists (where we put downloaded files)
+ self.fetchDir = fetchDir
+ if fetchDir is None:
+ self.fetchDir = os.path.join(ARMORY_HOME_DIR, 'announcefiles')
+ if not os.path.exists(self.fetchDir):
+ os.mkdir(self.fetchDir)
+
+
+ # Read and hash existing files in that directory
+ self.fileHashMap = {}
+ LOGINFO('Reading files in fetcher directory:')
+ for fname in os.listdir(self.fetchDir):
+ fpath = os.path.join(self.fetchDir, fname)
+ if not fname.endswith('.file') or os.path.getsize(fpath) > 16*MEGABYTE:
+ continue
+
+ fid = fname[:-5]
+ with open(fpath, 'rb') as f:
+ self.fileHashMap[fid] = binary_to_hex(sha256(f.read()))
+ LOGINFO(' %s : %s', fid.ljust(16), self.fileHashMap[fid])
+
+
+ #############################################################################
+ def start(self):
+ if not self.disabled:
+ self.loopThread = self.__runFetchLoop(async=True)
+
+ #############################################################################
+ def isDisabled(self):
+ return self.disabled
+
+ #############################################################################
+ def shutdown(self):
+ LOGINFO('Called AnnounceDataFetcher.shutdown()')
+ self.shutdownFlag.set()
+
+ #############################################################################
+ def setFetchInterval(self, newInterval):
+ self.fetchInterval = max(newInterval,10)
+
+ #############################################################################
+ def setDisabled(self, b=True):
+ self.disabled = b
+
+ #############################################################################
+ def isRunning(self):
+ return self.loopThread.isRunning() if self.loopThread else False
+
+ #############################################################################
+ def atLeastOneSuccess(self):
+ return self.firstSuccess.isSet()
+
+ #############################################################################
+ def numFiles(self):
+ return len(self.fileHashMap)
+
+ #############################################################################
+ def fetchRightNow(self, doWait=0):
+ self.forceIsFinished.clear()
+ self.forceCheckFlag.set()
+
+ if doWait > 0:
+ self.forceIsFinished.wait(doWait)
+ self.forceCheckFlag.clear()
+
+ #############################################################################
+ def getAnnounceFilePath(self, fileID):
+ fpath = os.path.join(self.fetchDir, fileID+'.file')
+ return fpath if os.path.exists(fpath) else None
+
+ #############################################################################
+ def getAnnounceFile(self, fileID, forceCheck=False, forceWait=10):
+ if forceCheck:
+ LOGINFO('Forcing fetch before returning file')
+ if not self.isRunning():
+ # This is safe because there's no one to collide with
+ self.__runFetchSequence()
+ else:
+ self.forceIsFinished.clear()
+ self.forceCheckFlag.set()
+
+ if not self.forceIsFinished.wait(forceWait):
+ self.forceCheckFlag.clear()
+ return None
+
+ self.forceCheckFlag.clear()
+ else:
+ # Wait up to one second for any current ops to finish
+ if not self.loopIsIdle.wait(1):
+ LOGERROR('Loop was busy for more than one second')
+ return None
+
+ # If the above succeeded, it will be in the fetchedFiles dir
+ # We may have
+ fpath = self.getAnnounceFilePath(fileID)
+
+ if not (fpath and os.path.exists(fpath)):
+ LOGERROR('No file with ID=%s was fetched', fileID)
+ return None
+
+ with open(fpath, 'rb') as f:
+ returnData = f.read()
+
+ return returnData
+
+
+ #############################################################################
+ def getFileModTime(self, fileID):
+ fpath = self.getAnnounceFilePath(fileID)
+ if not fpath or not os.path.exists(fpath):
+ #LOGERROR('No file with ID=%s was fetched', fileID)
+ return 0
+
+ return os.path.getmtime(fpath)
+
+
+
+ #############################################################################
+ def getLastSuccessfulFetchTime(self):
+ announcePath = os.path.join(self.fetchDir, 'announce.file')
+ if os.path.exists(announcePath):
+ return os.path.getmtime(announcePath)
+ else:
+ return 0
+
+
+
+
+ #############################################################################
+ def getDecoratedURL(self, url, verbose=False):
+ """
+ This always decorates the URL with at least Armory version. Use the
+ verbose=True option to add OS, subOS, and a few "random" bytes that help
+ reject duplicate queries.
+ """
+ argsMap = {}
+ argsMap['ver'] = getVersionString(BTCARMORY_VERSION)
+
+ if verbose:
+ if OS_WINDOWS:
+ argsMap['os'] = 'win'
+ elif OS_LINUX:
+ argsMap['os'] = 'lin'
+ elif OS_MACOSX:
+ argsMap['os'] = 'mac'
+ else:
+ argsMap['os'] = 'unk'
+
+ try:
+ if OS_MACOSX:
+ argsMap['osvar'] = OS_VARIANT
+ else:
+ argsMap['osvar'] = OS_VARIANT[0].lower()
+ except:
+ LOGERR('Unrecognized OS while constructing version URL')
+ argsMap['osvar'] = 'unk'
+
+ argsMap['id'] = binary_to_hex(hash256(USER_HOME_DIR)[:4])
+
+ return url + '?' + urllib.urlencode(argsMap)
+
+
+
+ #############################################################################
+ def __fetchAnnounceDigests(self, doDecorate=False):
+ self.lastFetch = RightNow()
+ digestURL = self.getDecoratedURL(self.announceURL, verbose=doDecorate)
+ backupURL = None
+ if self.announceURL_backup:
+ backupURL = self.getDecoratedURL(self.announceURL_backup)
+ return self.__fetchFile(digestURL, backupURL)
+
+
+
+ #############################################################################
+ def __fetchFile(self, url, backupURL=None):
+ LOGINFO('Fetching: %s', url)
+ try:
+ import urllib2
+ import socket
+ LOGDEBUG('Downloading URL: %s' % url)
+ socket.setdefaulttimeout(CLI_OPTIONS.nettimeout)
+ urlobj = urllib2.urlopen(url, timeout=CLI_OPTIONS.nettimeout)
+ return urlobj.read()
+ except ImportError:
+ LOGERROR('No module urllib2 -- cannot download anything')
+ return ''
+ except (urllib2.URLError, urllib2.HTTPError):
+ LOGERROR('Specified URL was inaccessible')
+ LOGERROR('Tried: %s', url)
+ return self.__fetchFile(backupURL) if backupURL else ''
+ except:
+ LOGEXCEPT('Unspecified error downloading URL')
+ return self.__fetchFile(backupURL) if backupURL else ''
+
+
+ #############################################################################
+ def __runFetchSequence(self):
+ ##### Always decorate the URL with OS, Armory version on the first run
+ digestData = self.__fetchAnnounceDigests(not self.firstSuccess.isSet())
+
+ if len(digestData)==0:
+ LOGWARN('Error fetching announce digest')
+ return
+
+ self.firstSuccess.set()
+
+ ##### Digests come in signature blocks. Verify sig using jasvet.
+ try:
+ sig, msg = readSigBlock(digestData)
+ signAddress = verifySignature(sig, msg, 'v1', ord(ADDRBYTE))
+ if not signAddress == self.validAddrStr:
+ LOGERROR('Announce info carried invalid signature!')
+ LOGERROR('Signature addr: %s' % signAddress)
+ LOGERROR('Expected address: %s', self.validAddrStr)
+ return
+ except:
+ LOGEXCEPT('Could not verify data in signed message block')
+ return
+
+ # Always rewrite file; it's small and will use mtime for info
+ with open(os.path.join(self.fetchDir, 'announce.file'), 'w') as f:
+ f.write(digestData)
+
+
+ ##### We have a valid digest, now parse it
+ justDownloadedMap = {}
+ for row in [line.split() for line in msg.strip().split('\n')]:
+ if len(row)==3:
+ justDownloadedMap[row[0]] = [row[1], row[2]]
+ else:
+ LOGERROR('Malformed announce matrix: %s' % str(row))
+ return
+
+ ##### Check whether any of the hashes have changed
+ for key,val in justDownloadedMap.iteritems():
+ jdURL,jdHash = val[0],val[1]
+
+ if not (key in self.fileHashMap and self.fileHashMap[key]==jdHash):
+ LOGINFO('Changed [ "%s" ] == [%s, %s]', key, jdURL, jdHash)
+ newData = self.__fetchFile(jdURL)
+ if len(newData) == 0:
+ LOGERROR('Failed downloading announce file : %s', key)
+ return
+ newHash = binary_to_hex(sha256(newData))
+ if not newHash == jdHash:
+ LOGERROR('Downloaded file hash does not match!')
+ LOGERROR('Hash of downloaded data: %s', newHash)
+ return
+
+ filename = os.path.join(self.fetchDir, key+'.file')
+ with open(filename, 'wb') as f:
+ f.write(newData)
+ self.lastChange = RightNow()
+ self.fileHashMap[key] = jdHash
+
+ ##### Clean up as needed
+ if self.forceCheckFlag.isSet():
+ self.forceIsFinished.set()
+ self.forceCheckFlag.clear()
+ self.numConsecutiveExceptions = 0
+
+
+ #############################################################################
+ # I'm taking a shortcut around adding all the threading code here
+ # Simply use @AllowAsync and only call with async=True. Done.
+ @AllowAsync
+ def __runFetchLoop(self):
+ """
+ All this code runs in a separate thread (your app will freeze if
+ you don't call this with the async=True argument). It will
+ periodically check for new announce data, and update members that
+ are visible to other threads.
+
+ By default, it will check once per hour. If you call
+ self.forceCheckFlag.set()
+ It will skip the time check and force a download right now.
+ Using getAnnounceFile(forceCheck=True) will do this for you,
+ and will wait until the operation completes before returning
+ the result.
+ """
+
+ while True:
+
+ try:
+ if self.isDisabled() or self.shutdownFlag.isSet():
+ self.shutdownFlag.clear()
+ break
+
+ ##### Only check once per hour unless force flag is set
+ if not self.forceCheckFlag.isSet():
+ if RightNow()-self.lastFetch < self.fetchInterval:
+ continue
+ else:
+ LOGINFO('Forcing announce data fetch')
+ self.forceIsFinished.clear()
+
+ self.loopIsIdle.clear()
+ self.__runFetchSequence()
+
+ except:
+ self.numConsecutiveExceptions += 1
+ LOGEXCEPT('Failed download')
+ if self.numConsecutiveExceptions > 20:
+ self.setDisabled(True)
+ finally:
+ self.loopIsIdle.set()
+ time.sleep(0.5)
+
+
+
+
+
+
diff --git a/armorycolors.py b/armorycolors.py
index 64b459d53..b4ae662e6 100644
--- a/armorycolors.py
+++ b/armorycolors.py
@@ -1,6 +1,6 @@
################################################################################
# #
-# Copyright (C) 2011-2013, Armory Technologies, Inc. #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
@@ -85,6 +85,11 @@ def luminance(qcolor):
QAPP = QApplication(sys.argv)
qpal = QAPP.palette()
+# workaround for https://bugs.launchpad.net/ubuntu/+source/qt4-x11/+bug/877236
+qpal.setColor(QPalette.ToolTipBase, qpal.color(QPalette.Window))
+qpal.setColor(QPalette.ToolTipText, qpal.color(QPalette.WindowText))
+QAPP.setPalette(qpal)
+
# Some of the standard colors to be tweaked
class ArbitraryStruct: pass
Colors = ArbitraryStruct()
diff --git a/armoryd.py b/armoryd.py
index f5b251797..7dd82040d 100644
--- a/armoryd.py
+++ b/armoryd.py
@@ -1,6 +1,6 @@
################################################################################
# #
-# Copyright (C) 2011-2013, Armory Technologies, Inc. #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
@@ -24,7 +24,7 @@
# Where possible this follows conventions established by the Satoshi client.
# Does not require armory to be installed or running, this is a standalone application.
# Requires bitcoind process to be running before starting armory-daemon.
-# Requires an armory watch-only wallet to be in the same folder as the
+# Requires an armory wallet (can be watching only) to be in the same folder as the
# armory-daemon script.
# Works with testnet, use --testnet flag when starting the script.
#
@@ -40,29 +40,35 @@
# https://bitcointalk.org/index.php?topic=92496.0
#####
-from twisted.internet import reactor
-from twisted.web import server
-from txjsonrpc.web import jsonrpc
-from txjsonrpc.auth import wrapResource
-from twisted.cred.checkers import FilePasswordDB
-
-from armoryengine import *
-
import datetime
import decimal
+import json
import os
+import random
+import socket
import sys
import time
-import socket
+
+from twisted.cred.checkers import FilePasswordDB
+from twisted.internet import reactor
+from twisted.web import server
+from txjsonrpc.auth import wrapResource
+from txjsonrpc.web import jsonrpc
+
+from CppBlockUtils import SecureBinaryData
+from armoryengine.ALL import *
+from jsonrpc import ServiceProxy
+from armoryengine.Decorators import EmailOutput
+from armoryengine.ArmoryUtils import addrStr_to_hash160
+from armoryengine.PyBtcWalletRecovery import ParseWallet
+
# Some non-twisted json imports from jgarzik's code and his UniversalEncoder
-import json
-from jsonrpc import ServiceProxy
class UniversalEncoder(json.JSONEncoder):
- def default(self, obj):
- if isinstance(obj, decimal.Decimal):
- return float(obj)
- return json.JSONEncoder.default(self, obj)
+ def default(self, obj):
+ if isinstance(obj, decimal.Decimal):
+ return float(obj)
+ return json.JSONEncoder.default(self, obj)
ARMORYD_CONF_FILE = os.path.join(ARMORY_HOME_DIR, 'armoryd.conf')
@@ -83,19 +89,175 @@ class UnrecognizedCommand(Exception): pass
################################################################################
################################################################################
+class NotEnoughCoinsError(Exception): pass
+class CoinSelectError(Exception): pass
+class WalletUnlockNeeded(Exception): pass
+class InvalidBitcoinAddress(Exception): pass
+class PrivateKeyNotFound(Exception): pass
+class AddressNotInWallet(Exception): pass
+
+NOT_IMPLEMENTED = '--Not Implemented--'
+
class Armory_Json_Rpc_Server(jsonrpc.JSONRPC):
- #############################################################################
+ ###########################################################################g##
def __init__(self, wallet):
self.wallet = wallet
+ # Used with wallet notification code
+ self.addressMetaData = {}
+
+ #############################################################################
+ def jsonrpc_backupwallet(self, backupFilePath):
+ self.wallet.backupWalletFile(backupFilePath)
+
+ #############################################################################
+ def jsonrpc_listunspent(self):
+ utxoList = self.wallet.getTxOutList('unspent')
+ result = [u.getOutPoint().serialize() for u in utxoList]
+ return result
+
+ #############################################################################
+ def jsonrpc_importprivkey(self, privkey):
+ self.wallet.importExternalAddressData(privKey=privkey)
+
+ #############################################################################
+ def jsonrpc_getrawtransaction(self, txHash, verbose=0, endianness=BIGENDIAN):
+ rawTx = None
+ cppTx = TheBDM.getTxByHash(hex_to_binary(txHash, endianness))
+ if cppTx.isInitialized():
+ txBinary = cppTx.serialize()
+ pyTx = PyTx().unserialize(txBinary)
+ rawTx = binary_to_hex(pyTx.serialize())
+ if verbose:
+ result = self.jsonrpc_decoderawtransaction(rawTx)
+ result['hex'] = rawTx
+ else:
+ result = rawTx
+ else:
+ LOGERROR('Tx hash not recognized by TheBDM: %s' % txHash)
+ result = None
+
+ return result
+
+ #############################################################################
+ def jsonrpc_gettxout(self, txHash, n):
+ txOut = None
+ cppTx = TheBDM.getTxByHash(hex_to_binary(txHash, BIGENDIAN))
+ if cppTx.isInitialized():
+ txBinary = cppTx.serialize()
+ pyTx = PyTx().unserialize(txBinary)
+ if n < len(pyTx.outputs):
+ txOut = pyTx.outputs[n]
+ else:
+ LOGERROR('Tx no output #: %s' % n)
+ else:
+ LOGERROR('Tx hash not recognized by TheBDM: %s' % binary_to_hex(txHash))
+ return txOut
+
+ #############################################################################
+ def jsonrpc_encryptwallet(self, passphrase):
+ if self.wallet.isLocked:
+ raise WalletUnlockNeeded
+ self.wallet.changeWalletEncryption( securePassphrase=SecureBinaryData(passphrase) )
+ self.wallet.lock()
+
+ #############################################################################
+ def jsonrpc_unlockwallet(self, passphrase, timeout):
+ self.wallet.unlock( securePassphrase=SecureBinaryData(passphrase),
+ tempKeyLifetime=timeout)
+
+
+ #############################################################################
+ def getScriptPubKey(self, txOut):
+ addrList = []
+ scriptType = getTxOutScriptType(txOut.binScript)
+ if scriptType in CPP_TXOUT_STDSINGLESIG:
+ M = 1
+ addrList = [script_to_addrStr(txOut.binScript)]
+ elif scriptType == CPP_TXOUT_P2SH:
+ M = -1
+ addrList = [script_to_addrStr(txOut.binScript)]
+ elif scriptType==CPP_TXOUT_MULTISIG:
+ M, N, addr160List, pub65List = getMultisigScriptInfo(txOut.binScript)
+ addrList = [hash160_to_addrStr(a160) for a160 in addr160List]
+ elif scriptType == CPP_TXOUT_NONSTANDARD:
+ M = -1
+
+ opStringList = convertScriptToOpStrings(txOut.binScript)
+ return { 'asm' : ' '.join(opStringList),
+ 'hex' : binary_to_hex(txOut.binScript),
+ 'reqSigs' : M,
+ 'type' : CPP_TXOUT_SCRIPT_NAMES[scriptType],
+ 'addresses' : addrList }
+
+ #############################################################################
+ def jsonrpc_decoderawtransaction(self, hexString):
+ pyTx = PyTx().unserialize(hex_to_binary(hexString))
+
+ #####
+ # Accumulate TxIn info
+ vinList = []
+ for txin in pyTx.inputs:
+ prevHash = txin.outpoint.txHash
+ scrType = getTxInScriptType(txin)
+ # ACR: What is asm, and why is basically just binScript?
+ oplist = convertScriptToOpStrings(txin.binScript)
+ scriptSigDict = { 'asm' : ' '.join(oplist),
+ 'hex' : binary_to_hex(txin.binScript) }
+
+ if not scrType == CPP_TXIN_COINBASE:
+ vinList.append( { 'txid' : binary_to_hex(prevHash, BIGENDIAN),
+ 'vout' : txin.outpoint.txOutIndex,
+ 'scriptSig' : scriptSigDict,
+ 'sequence' : txin.intSeq})
+ else:
+ vinList.append( { 'coinbase' : binary_to_hex(txin.binScript),
+ 'sequence' : txin.intSeq })
+
+ #####
+ # Accumulate TxOut info
+ voutList = []
+ for n,txout in enumerate(pyTx.outputs):
+ voutList.append( { 'value' : AmountToJSON(txout.value),
+ 'n' : n,
+ 'scriptPubKey' : self.getScriptPubKey(txout) } )
+
+
+ #####
+ # Accumulate all the data to return
+ result = { 'txid' : pyTx.getHashHex(BIGENDIAN),
+ 'version' : pyTx.version,
+ 'locktime' : pyTx.lockTime,
+ 'vin' : vinList,
+ 'vout' : voutList }
+
+ return result
+
#############################################################################
def jsonrpc_getnewaddress(self):
addr = self.wallet.getNextUnusedAddress()
return addr.getAddrStr()
+ #############################################################################
+ def jsonrpc_dumpprivkey(self, addr58):
+ # Cannot dump the private key for a locked wallet
+ if self.wallet.isLocked:
+ raise WalletUnlockNeeded
+ # The first byte must be the correct net byte, and the
+ # last 4 bytes must be the correct checksum
+ if not checkAddrStrValid(addr58):
+ raise InvalidBitcoinAddress
+
+ atype, addr160 = addrStr_to_hash160(addr58, False)
+
+ pyBtcAddress = self.wallet.getAddrByHash160(addr160)
+ if pyBtcAddress == None:
+ raise PrivateKeyNotFound
+ return pyBtcAddress.serializePlainPrivateKey()
+
#############################################################################
def jsonrpc_getwalletinfo(self):
wltInfo = { \
@@ -108,7 +270,6 @@ def jsonrpc_getwalletinfo(self):
}
return wltInfo
-
#############################################################################
def jsonrpc_getbalance(self, baltype='spendable'):
if not baltype in ['spendable','spend', 'unconf', 'unconfirmed', \
@@ -123,7 +284,8 @@ def jsonrpc_getreceivedbyaddress(self, address):
if CLI_OPTIONS.offline:
raise ValueError('Cannot get received amount when offline')
# Only gets correct amount for addresses in the wallet, otherwise 0
- addr160 = addrStr_to_hash160(address)
+ atype, addr160 = addrStr_to_hash160(address, False)
+
txs = self.wallet.getAddrTxLedger(addr160)
balance = sum([x.getValue() for x in txs if x.getValue() > 0])
return AmountToJSON(balance)
@@ -132,21 +294,21 @@ def jsonrpc_getreceivedbyaddress(self, address):
def jsonrpc_sendtoaddress(self, bitcoinaddress, amount):
if CLI_OPTIONS.offline:
raise ValueError('Cannot create transactions when offline')
- addr160 = addrStr_to_hash160(bitcoinaddress)
+ scraddr = addrStr_to_scrAddr(bitcoinaddress)
amtCoin = JSONtoAmount(amount)
- return self.create_unsigned_transaction([[addr160, amtCoin]])
+ return self.create_unsigned_transaction([[scraddr, amtCoin]])
#############################################################################
def jsonrpc_sendmany(self, *args):
if CLI_OPTIONS.offline:
raise ValueError('Cannot create transactions when offline')
- recipvalpairs = []
+ scraddrValuePairs = []
for a in args:
r,v = a.split(':')
- recipvalpairs.append([addrStr_to_hash160(r), JSONtoAmount(v)])
+ scraddrValuePairs.append([addrStr_to_scrAddr(r), JSONtoAmount(v)])
- return self.create_unsigned_transaction(recipvalpairs)
+ return self.create_unsigned_transaction(scraddrValuePairs)
#############################################################################
@@ -173,8 +335,9 @@ def jsonrpc_getledger(self, tx_count=10, from_tx=0, simple=False):
if not cppTx.isInitialized():
LOGERROR('Tx hash not recognized by TheBDM: %s' % txHashHex)
- cppHead = cppTx.getHeaderPtr()
- if not cppHead.isInitialized:
+ #cppHead = cppTx.getHeaderPtr()
+ cppHead = TheBDM.getHeaderPtrForTx(cppTx)
+ if not cppHead.isInitialized():
LOGERROR('Header pointer is not available!')
headHashBin = ''
headHashHex = ''
@@ -188,7 +351,8 @@ def jsonrpc_getledger(self, tx_count=10, from_tx=0, simple=False):
netCoins = le.getValue()
feeCoins = getFeeForTx(txHashBin)
- allRecips = [cppTx.getTxOut(i).getRecipientAddr() for i in range(cppTx.getNumTxOut())]
+ scrAddrs = [cppTx.getTxOutCopy(i).getScrAddressStr() for i in range(cppTx.getNumTxOut())]
+ allRecips = [CheckHash160(r) for r in scrAddrs]
first160 = ''
if cppTx.getNumTxOut()==1:
first160 = allRecips[0]
@@ -239,8 +403,8 @@ def jsonrpc_getledger(self, tx_count=10, from_tx=0, simple=False):
myinputs, otherinputs = [],[]
for iin in range(cppTx.getNumTxIn()):
- sender = TheBDM.getSenderAddr20(cppTx.getTxIn(iin))
- val = TheBDM.getSentValue(cppTx.getTxIn(iin))
+ sender = CheckHash160(TheBDM.getSenderScrAddr(cppTx.getTxInCopy(iin)))
+ val = TheBDM.getSentValue(cppTx.getTxInCopy(iin))
addTo = (myinputs if self.wallet.hasAddr(sender) else otherinputs)
addTo.append( {'address': hash160_to_addrStr(sender), \
'amount': AmountToJSON(val)} )
@@ -248,8 +412,8 @@ def jsonrpc_getledger(self, tx_count=10, from_tx=0, simple=False):
myoutputs, otheroutputs = [], []
for iout in range(cppTx.getNumTxOut()):
- recip = cppTx.getTxOut(iout).getRecipientAddr();
- val = cppTx.getTxOut(iout).getValue();
+ recip = CheckHash160(cppTx.getTxOutCopy(iout).getScrAddressStr())
+ val = cppTx.getTxOutCopy(iout).getValue();
addTo = (myoutputs if self.wallet.hasAddr(recip) else otheroutputs)
addTo.append( {'address': hash160_to_addrStr(recip), \
'amount': AmountToJSON(val)} )
@@ -299,7 +463,7 @@ def jsonrpc_listtransactions(self, tx_count=10, from_tx=0):
txSet = set([])
- for i in range(sz):
+ for i in range(lower,upper):
le = ledgerEntries[i]
txHashBin = le.getTxHash()
@@ -313,7 +477,8 @@ def jsonrpc_listtransactions(self, tx_count=10, from_tx=0):
if not cppTx.isInitialized():
LOGERROR('Tx hash not recognized by TheBDM: %s' % txHashHex)
- cppHead = cppTx.getHeaderPtr()
+ #cppHead = cppTx.getHeaderPtr()
+ cppHead = TheBDM.getHeaderPtrForTx(cppTx)
if not cppHead.isInitialized:
LOGERROR('Header pointer is not available!')
@@ -331,15 +496,15 @@ def jsonrpc_listtransactions(self, tx_count=10, from_tx=0):
# are receives
recipVals = []
for iout in range(cppTx.getNumTxOut()):
- recip = cppTx.getTxOut(iout).getRecipientAddr()
- val = cppTx.getTxOut(iout).getValue()
+ recip = CheckHash160(cppTx.getTxOutCopy(iout).getScrAddressStr())
+ val = cppTx.getTxOutCopy(iout).getValue()
recipVals.append([recip,val])
if cppTx.getNumTxOut()==1:
changeAddr160 = ""
- targAddr160 = cppTx.getTxOut(0).getRecipientAddr()
+ targAddr160 = CheckHash160(cppTx.getTxOutCopy(0).getScrAddressStr())
elif isToSelf:
selfamt,changeIdx = determineSentToSelfAmt(le, self.wallet)
if changeIdx==-1:
@@ -439,11 +604,7 @@ def jsonrpc_listtransactions(self, tx_count=10, from_tx=0):
final_tx_list.append(tx_info)
return final_tx_list
-
-
-
-
-
+
#############################################################################
def jsonrpc_getinfo(self):
isReady = TheBDM.getBDMState() == 'BlockchainReady'
@@ -518,7 +679,7 @@ def jsonrpc_gettransaction(self, txHash):
inputvalues = []
outputvalues = []
for i in range(tx.getNumTxIn()):
- op = tx.getTxIn(i).getOutPoint()
+ op = tx.getTxInCopy(i).getOutPoint()
prevtx = TheBDM.getTxByHash(op.getTxHash())
if not prevtx.isInitialized():
haveAllInputs = False
@@ -529,9 +690,9 @@ def jsonrpc_gettransaction(self, txHash):
'fromtxindex': op.getTxOutIndex()})
else:
- txout = prevtx.getTxOut(op.getTxOutIndex())
+ txout = prevtx.getTxOutCopy(op.getTxOutIndex())
inputvalues.append(txout.getValue())
- recip160 = txout.getRecipientAddr()
+ recip160 = CheckHash160(txout.getScrAddressStr())
txindata.append( { 'address': hash160_to_addrStr(recip160),
'value': AmountToJSON(txout.getValue()),
'ismine': self.wallet.hasAddr(recip160),
@@ -540,10 +701,11 @@ def jsonrpc_gettransaction(self, txHash):
txoutdata = []
for i in range(tx.getNumTxOut()):
- txout = tx.getTxOut(i)
+ txout = tx.getTxOutCopy(i)
+ a160 = CheckHash160(txout.getScrAddressStr())
txoutdata.append( { 'value': AmountToJSON(txout.getValue()),
- 'ismine': self.wallet.hasAddr(txout.getRecipientAddr()),
- 'address': hash160_to_addrStr(txout.getRecipientAddr())})
+ 'ismine': self.wallet.hasAddr(a160),
+ 'address': hash160_to_addrStr(a160)})
outputvalues.append(txout.getValue())
fee = sum(inputvalues)-sum(outputvalues)
@@ -585,11 +747,11 @@ def jsonrpc_gettransaction(self, txHash):
#############################################################################
# https://bitcointalk.org/index.php?topic=92496.msg1126310#msg1126310
- def create_unsigned_transaction(self, recipValPairs):
+ def create_unsigned_transaction(self, scraddrValuePairs):
# Get unspent TxOutList and select the coins
#addr160_recipient = addrStr_to_hash160(bitcoinaddress_str)
- totalSend = long( sum([rv[1] for rv in recipValPairs]) )
+ totalSend = long( sum([rv[1] for rv in scraddrValuePairs]) )
fee = 0
spendBal = self.wallet.getBalance('Spendable')
@@ -609,16 +771,73 @@ def create_unsigned_transaction(self, recipValPairs):
totalSelected = sum([u.getValue() for u in utxoSelect])
totalChange = totalSelected - (totalSend + fee)
- outputPairs = recipValPairs
+ outputPairs = scraddrValuePairs[:]
if totalChange > 0:
- outputPairs.append( [self.wallet.getNextUnusedAddress().getAddr160(), totalChange] )
+ nextAddr = self.wallet.getNextUnusedAddress().getAddrStr()
+ outputPairs.append( [addrStr_to_scrAddr(nextAddr), totalChange] )
random.shuffle(outputPairs)
txdp = PyTxDistProposal().createFromTxOutSelection(utxoSelect, outputPairs)
return txdp.serializeAscii()
-
+ ################################################################################
+ # For each transaction in a block that triggers a notification:
+ # List the inputs, and output, indicate the one we are watching, displays balance data
+ # Also, display meta data associated with the address.
+ #
+ # Example usage:
+ # started the daemon with these arguments: --testnet armory_286jcNJRc_.wallet
+ # Then I called the daemon with: --testnet watchwallet
+ def jsonrpc_watchwallet(self, send_from=None, password=None, send_to=None, subject=None):
+
+ @EmailOutput(send_from, password, [send_to], subject)
+ def reportTxFromAddrInNewBlock(pyHeader, pyTxList):
+ result = ''
+ for pyTx in pyTxList:
+ for pyTxIn in pyTx.inputs:
+ sendingAddrStr = TxInExtractAddrStrIfAvail(pyTxIn)
+ if len(sendingAddrStr) > 0:
+ sendingAddrHash160 = addrStr_to_hash160(sendingAddrStr, False)[1]
+ if self.wallet.addrMap.has_key(sendingAddrHash160):
+ sendingAddr = self.wallet.addrMap[sendingAddrHash160]
+ result = ''.join([result, '\n', sendingAddr.toString(), '\n'])
+ # print the meta data
+ if sendingAddrStr in self.addressMetaData:
+ result = ''.join([result, "\nMeta Data: ", str(self.addressMetaData[sendingAddrStr]), '\n'])
+ result = ''.join([result, '\n', pyTx.toString()])
+ return result
+
+ # TODO: Need stop assuming that this is the only method using newBlockFunctions
+ # Remove existing newBlockFunction to allow user to change the email args
+ rpc_server.newBlockFunctions = []
+ rpc_server.newBlockFunctions.append(reportTxFromAddrInNewBlock)
+
+ ################################################################################
+ # Associate meta data to an address or addresses
+ # Example input: "{\"mzAtXhy3Z6SLd7rAwNJrL17e8mQkjDVDXh\": {\"chain\": 5,
+ # \"index\": 2}, \"mkF5L93F5HLhLmQagX26TdXcvPGHvfjoTM\": {\"CrazyField\": \"what\",
+ # \"1\": 1, \"2\": 2}}"
+ def jsonrpc_setaddressmetadata(self, newAddressMetaData):
+ # Loop once to check the addresses
+ # Don't add any meta data if one of the addresses wrong.
+ for addr in newAddressMetaData.keys():
+ if not checkAddrStrValid(addr):
+ raise InvalidBitcoinAddress
+ if not self.wallet.addrMap.has_key(addrStr_to_hash160(addr, False)[1]):
+ raise AddressNotInWallet
+ self.addressMetaData.update(newAddressMetaData)
+
+ ################################################################################
+ # Clear the meta data
+ def jsonrpc_clearaddressmetadata(self):
+ self.addressMetaData = {}
+
+ ################################################################################
+ # get the meta data
+ def jsonrpc_getaddressmetadata(self):
+ return self.addressMetaData
+
################################################################################
################################################################################
class Armory_Daemon(object):
@@ -629,6 +848,11 @@ def __init__(self):
# Check if armoryd is already running, bail if it is
self.checkForAlreadyRunning()
+ self.lock = threading.Lock()
+ self.lastChecked
+
+ #check wallet consistency every hour
+ self.checkStep = 3600
print ''
print '*'*80
@@ -690,10 +914,19 @@ def set_auth(self, resource):
#############################################################################
def start(self):
+ #run a wallet consistency check before starting the BDM
+ self.checkWallet()
+
+ #try to grab checkWallet lock to block start() until the check is over
+ self.lock.acquire()
+ self.lock.release()
+
+ # This is not a UI so no need to worry about the main thread being blocked.
+ # Any UI that uses this Daemon can put the call to the Daemon on it's own thread.
+ TheBDM.setBlocking(True)
LOGINFO('Server started...')
if(not TheBDM.getBDMState()=='Offline'):
TheBDM.registerWallet(self.wallet)
- TheBDM.setBlocking(False)
TheBDM.setOnlineMode(True)
LOGINFO('Blockchain loading')
@@ -715,12 +948,12 @@ def start(self):
# This is CONNECT call for armoryd to talk to bitcoind
LOGINFO('Set up connection to bitcoind')
self.NetworkingFactory = ArmoryClientFactory( \
+ TheBDM,
func_loseConnect = self.showOfflineMsg, \
func_madeConnect = self.showOnlineMsg, \
func_newTx = self.execOnNewTx, \
func_newBlock = self.execOnNewBlock)
reactor.connectTCP('127.0.0.1', BITCOIN_PORT, self.NetworkingFactory)
-
reactor.run()
@@ -739,11 +972,15 @@ def checkForAlreadyRunning(self):
if CLI_ARGS:
proxyobj = ServiceProxy("http://%s:%s@127.0.0.1:%d" % (usr,pwd,ARMORY_RPC_PORT))
- extraArgs = [] if len(CLI_ARGS)==1 else CLI_ARGS[1:]
try:
#if not proxyobj.__hasattr__(CLI_ARGS[0]):
#raise UnrecognizedCommand, 'No json command %s'%CLI_ARGS[0]
-
+ extraArgs = []
+ for arg in ([] if len(CLI_ARGS)==1 else CLI_ARGS[1:]):
+ if arg[0] == '{':
+ extraArgs.append(json.loads(arg))
+ else:
+ extraArgs.append(arg)
result = proxyobj.__getattr__(CLI_ARGS[0])(*extraArgs)
print json.dumps(result,
indent=4, \
@@ -775,7 +1012,7 @@ def execOnNewTx(self, pytxObj):
TheBDM.addNewZeroConfTx(pytxObj.serialize(), long(RightNow()), True)
TheBDM.rescanWalletZeroConf(self.wallet.cppWallet)
- # Add anything else you'd like to do on a new block
+ # Add anything else you'd like to do on a new transaction
#
for txFunc in self.newTxFunctions:
txFunc(pytxObj)
@@ -826,6 +1063,22 @@ def checkMemoryPoolCorruption(self, mempoolname):
PyTx().unserialize(binunpacker)
except:
os.remove(mempoolname);
+
+ #############################################################################
+ @AllowAsync
+ def checkWallet(self):
+ if self.lock.acquire(False) == False: return
+ wltStatus = ParseWallet(None, self.wallet, 5, None)
+ if wltStatus != 0:
+ print 'Wallet consistency check failed in wallet %s!!!' \
+ % (self.wallet.uniqueIDB58)
+ print 'Aborting...'
+
+ quit()
+ else:
+ self.lastChecked = datetime.time.now()
+ self.lock.release()
+
#############################################################################
def Heartbeat(self, nextBeatSec=1):
@@ -836,7 +1089,14 @@ def Heartbeat(self, nextBeatSec=1):
"""
# Check for new blocks in the blk000X.dat file
if TheBDM.getBDMState()=='BlockchainReady':
-
+
+ #check wallet every checkStep seconds
+ nextCheck = self.lastChecked + \
+ datetime.timedelta(seconds=self.checkStep)
+ if nextCheck >= datetime.time.now():
+ self.checkWallet()
+
+ # Check for new blocks in the blk000X.dat file
prevTopBlock = TheBDM.getTopBlockHeight()
newBlks = TheBDM.readBlkFileUpdate()
if newBlks>0:
@@ -861,9 +1121,11 @@ def Heartbeat(self, nextBeatSec=1):
# blocks on the main chain, not the invalid ones
for blknum in range(prevTopBlock+1, self.latestBlockNum+1):
cppHeader = TheBDM.getHeaderByHeight(blknum)
- txHashToPy = lambda h: PyTx().unserialize(TheBDM.getTxByHash(h).serialize())
- pyHeader = PyBlockHeader().unserialize(header.serialize())
- pyTxList = [txHashToPy(hsh) for hsh in header.getTxHashList()]
+ pyHeader = PyBlockHeader().unserialize(cppHeader.serialize())
+
+ cppBlock = TheBDM.getMainBlockFromDB(blknum)
+ pyTxList = [PyTx().unserialize(cppBlock.getSerializedTx(i)) for
+ i in range(cppBlock.getNumTx())]
for blockFunc in self.newBlockFunctions:
blockFunc(pyHeader, pyTxList)
@@ -892,9 +1154,7 @@ def default(self, obj):
-#if __name__ == "__main__":
-if True:
-
+if __name__ == "__main__":
rpc_server = Armory_Daemon()
rpc_server.start()
diff --git a/armoryengine.py b/armoryengine.py
deleted file mode 100644
index f3f5969ac..000000000
--- a/armoryengine.py
+++ /dev/null
@@ -1,13830 +0,0 @@
-################################################################################
-# #
-# Copyright (C) 2011-2013, Armory Technologies, Inc. #
-# Distributed under the GNU Affero General Public License (AGPL v3) #
-# See LICENSE or http://www.gnu.org/licenses/agpl.html #
-# #
-################################################################################
-
-# Version Numbers
-BTCARMORY_VERSION = (0, 90, 0, 0) # (Major, Minor, Bugfix, AutoIncrement)
-PYBTCWALLET_VERSION = (1, 35, 0, 0) # (Major, Minor, Bugfix, AutoIncrement)
-
-ARMORY_DONATION_ADDR = '1ArmoryXcfq7TnCSuZa9fQjRYwJ4bkRKfv'
-ARMORY_DONATION_PUBKEY = ( '04'
- '11d14f8498d11c33d08b0cd7b312fb2e6fc9aebd479f8e9ab62b5333b2c395c5'
- 'f7437cab5633b5894c4a5c2132716bc36b7571cbe492a7222442b75df75b9a84')
-ARMORY_INFO_SIGN_ADDR = '1NWvhByxfTXPYNT4zMBmEY3VL8QJQtQoei'
-ARMORY_INFO_SIGN_PUBLICKEY = ('04'
- 'af4abc4b24ef57547dd13a1110e331645f2ad2b99dfe1189abb40a5b24e4ebd8'
- 'de0c1c372cc46bbee0ce3d1d49312e416a1fa9c7bb3e32a7eb3867d1c6d1f715')
-SATOSHI_PUBLIC_KEY = ( '04'
- 'fc9702847840aaf195de8442ebecedf5b095cdbb9bc716bda9110971b28a49e0'
- 'ead8564ff0db22209e0374782c093bb899692d524e9d6a6956e7c5ecbcd68284')
-
-
-
-
-import copy
-import hashlib
-import random
-import time
-import os
-import string
-import sys
-import stat
-import shutil
-import math
-import logging
-import logging.handlers
-import locale
-import ast
-import traceback
-import threading
-import signal
-import inspect
-import multiprocessing
-import psutil
-from struct import pack, unpack
-from datetime import datetime
-
-# In Windows with py2exe, we have a problem unless we PIPE all streams
-from subprocess import Popen, PIPE
-
-from sys import argv
-
-import optparse
-parser = optparse.OptionParser(usage="%prog [options]\n")
-parser.add_option("--settings", dest="settingsPath",default='DEFAULT', type="str", help="load Armory with a specific settings file")
-parser.add_option("--datadir", dest="datadir", default='DEFAULT', type="str", help="Change the directory that Armory calls home")
-parser.add_option("--satoshi-datadir", dest="satoshiHome", default='DEFAULT', type='str', help="The Bitcoin-Qt/bitcoind home directory")
-parser.add_option("--satoshi-port", dest="satoshiPort", default='DEFAULT', type="str", help="For Bitcoin-Qt instances operating on a non-standard port")
-parser.add_option("--dbdir", dest="leveldbDir", default='DEFAULT', type='str', help="Location to store blocks database (defaults to --datadir)")
-parser.add_option("--rpcport", dest="rpcport", default='DEFAULT', type="str", help="RPC port for running armoryd.py")
-parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the testnet protocol")
-parser.add_option("--offline", dest="offline", default=False, action="store_true", help="Force Armory to run in offline mode")
-parser.add_option("--nettimeout", dest="nettimeout", default=2, type="int", help="Timeout for detecting internet connection at startup")
-parser.add_option("--interport", dest="interport", default=-1, type="int", help="Port for inter-process communication between Armory instances")
-parser.add_option("--debug", dest="doDebug", default=False, action="store_true", help="Increase amount of debugging output")
-parser.add_option("--nologging", dest="logDisable", default=False, action="store_true", help="Disable all logging")
-parser.add_option("--netlog", dest="netlog", default=False, action="store_true", help="Log networking messages sent and received by Armory")
-parser.add_option("--logfile", dest="logFile", default='DEFAULT', type='str', help="Specify a non-default location to send logging information")
-parser.add_option("--mtdebug", dest="mtdebug", default=False, action="store_true", help="Log multi-threaded call sequences")
-parser.add_option("--skip-online-check", dest="forceOnline", default=False, action="store_true", help="Go into online mode, even if internet connection isn't detected")
-parser.add_option("--skip-version-check", dest="skipVerCheck", default=False, action="store_true", help="Do not contact bitcoinarmory.com to check for new versions")
-parser.add_option("--keypool", dest="keypool", default=100, type="int", help="Default number of addresses to lookahead in Armory wallets")
-parser.add_option("--rebuild", dest="rebuild", default=False, action="store_true", help="Rebuild blockchain database and rescan")
-parser.add_option("--rescan", dest="rescan", default=False, action="store_true", help="Rescan existing blockchain DB")
-parser.add_option("--maxfiles", dest="maxOpenFiles",default=0, type="int", help="Set maximum allowed open files for LevelDB databases")
-
-# These are arguments passed by running unit-tests that need to be handled
-parser.add_option("--port", dest="port", default=None, type="int", help="Unit Test Argument - Do not consume")
-parser.add_option("--verbosity", dest="verbosity", default=None, type="int", help="Unit Test Argument - Do not consume")
-parser.add_option("--coverage_output_dir", dest="coverageOutputDir", default=None, type="str", help="Unit Test Argument - Do not consume")
-parser.add_option("--coverage_include", dest="coverageInclude", default=None, type="str", help="Unit Test Argument - Do not consume")
-
-################################################################################
-# We need to have some methods for casting ASCII<->Unicode<->Preferred
-DEFAULT_ENCODING = 'utf-8'
-
-def isASCII(theStr):
- try:
- theStr.decode('ascii')
- return True
- except UnicodeEncodeError:
- return False
- except UnicodeDecodeError:
- return False
- except:
- LOGEXCEPT('What was passed to this function? %s', theStr)
- return False
-
-
-def toBytes(theStr, theEncoding=DEFAULT_ENCODING):
- if isinstance(theStr, unicode):
- return theStr.encode(theEncoding)
- elif isinstance(theStr, str):
- return theStr
- else:
- LOGERROR('toBytes() not been defined for input: %s', str(type(theStr)))
-
-
-def toUnicode(theStr, theEncoding=DEFAULT_ENCODING):
- if isinstance(theStr, unicode):
- return theStr
- elif isinstance(theStr, str):
- return unicode(theStr, theEncoding)
- else:
- LOGERROR('toUnicode() not been defined for input: %s', str(type(theStr)))
-
-
-def toPreferred(theStr):
- return toUnicode(theStr).encode(locale.getpreferredencoding())
-
-
-def lenBytes(theStr, theEncoding=DEFAULT_ENCODING):
- return len(toBytes(theStr, theEncoding))
-################################################################################
-
-
-
-(CLI_OPTIONS, CLI_ARGS) = parser.parse_args()
-
-
-# Use CLI args to determine testnet or not
-USE_TESTNET = CLI_OPTIONS.testnet
-#USE_TESTNET = True
-
-
-# Set default port for inter-process communication
-if CLI_OPTIONS.interport < 0:
- CLI_OPTIONS.interport = 8223 + (1 if USE_TESTNET else 0)
-
-
-
-
-def getVersionString(vquad, numPieces=4):
- vstr = '%d.%02d' % vquad[:2]
- if (vquad[2] > 0 or vquad[3] > 0) and numPieces>2:
- vstr += '.%d' % vquad[2]
- if vquad[3] > 0 and numPieces>3:
- vstr += '.%d' % vquad[3]
- return vstr
-
-def getVersionInt(vquad, numPieces=4):
- vint = int(vquad[0] * 1e7)
- vint += int(vquad[1] * 1e5)
- if numPieces>2:
- vint += int(vquad[2] * 1e3)
- if numPieces>3:
- vint += int(vquad[3])
- return vint
-
-def readVersionString(verStr):
- verList = [int(piece) for piece in verStr.split('.')]
- while len(verList)<4:
- verList.append(0)
- return tuple(verList)
-
-def readVersionInt(verInt):
- verStr = str(verInt).rjust(10,'0')
- verList = []
- verList.append( int(verStr[ -3:]) )
- verList.append( int(verStr[ -5:-3 ]) )
- verList.append( int(verStr[ -7:-5 ]) )
- verList.append( int(verStr[:-7 ]) )
- return tuple(verList[::-1])
-
-# Get the host operating system
-import platform
-opsys = platform.system()
-OS_WINDOWS = 'win32' in opsys.lower() or 'windows' in opsys.lower()
-OS_LINUX = 'nix' in opsys.lower() or 'nux' in opsys.lower()
-OS_MACOSX = 'darwin' in opsys.lower() or 'osx' in opsys.lower()
-
-# Figure out the default directories for Satoshi client, and BicoinArmory
-OS_NAME = ''
-OS_VARIANT = ''
-USER_HOME_DIR = ''
-BTC_HOME_DIR = ''
-ARMORY_HOME_DIR = ''
-LEVELDB_DIR = ''
-SUBDIR = 'testnet3' if USE_TESTNET else ''
-if OS_WINDOWS:
- OS_NAME = 'Windows'
- OS_VARIANT = platform.win32_ver()
- USER_HOME_DIR = os.getenv('APPDATA')
- BTC_HOME_DIR = os.path.join(USER_HOME_DIR, 'Bitcoin', SUBDIR)
- ARMORY_HOME_DIR = os.path.join(USER_HOME_DIR, 'Armory', SUBDIR)
- BLKFILE_DIR = os.path.join(BTC_HOME_DIR, 'blocks')
-elif OS_LINUX:
- OS_NAME = 'Linux'
- OS_VARIANT = platform.linux_distribution()
- USER_HOME_DIR = os.getenv('HOME')
- BTC_HOME_DIR = os.path.join(USER_HOME_DIR, '.bitcoin', SUBDIR)
- ARMORY_HOME_DIR = os.path.join(USER_HOME_DIR, '.armory', SUBDIR)
- BLKFILE_DIR = os.path.join(BTC_HOME_DIR, 'blocks')
-elif OS_MACOSX:
- platform.mac_ver()
- OS_NAME = 'MacOSX'
- OS_VARIANT = platform.mac_ver()
- USER_HOME_DIR = os.path.expanduser('~/Library/Application Support')
- BTC_HOME_DIR = os.path.join(USER_HOME_DIR, 'Bitcoin', SUBDIR)
- ARMORY_HOME_DIR = os.path.join(USER_HOME_DIR, 'Armory', SUBDIR)
- BLKFILE_DIR = os.path.join(BTC_HOME_DIR, 'blocks')
-else:
- print '***Unknown operating system!'
- print '***Cannot determine default directory locations'
-
-
-# Allow user to override default bitcoin-qt/bitcoind home directory
-if not CLI_OPTIONS.satoshiHome.lower()=='default':
- success = True
- if USE_TESTNET:
- testnetTry = os.path.join(CLI_OPTIONS.satoshiHome, 'testnet3')
- if os.path.exists(testnetTry):
- CLI_OPTIONS.satoshiHome = testnetTry
-
- if not os.path.exists(CLI_OPTIONS.satoshiHome):
- print 'Directory "%s" does not exist! Using default!' % \
- CLI_OPTIONS.satoshiHome
- else:
- BTC_HOME_DIR = CLI_OPTIONS.satoshiHome
-
-
-
-# Allow user to override default Armory home directory
-if not CLI_OPTIONS.datadir.lower()=='default':
- if not os.path.exists(CLI_OPTIONS.datadir):
- print 'Directory "%s" does not exist! Using default!' % \
- CLI_OPTIONS.datadir
- else:
- ARMORY_HOME_DIR = CLI_OPTIONS.datadir
-
-# Same for the directory that holds the LevelDB databases
-LEVELDB_DIR = os.path.join(ARMORY_HOME_DIR, 'databases')
-if not CLI_OPTIONS.leveldbDir.lower()=='default':
- if not os.path.exists(CLI_OPTIONS.leveldbDir):
- print 'Directory "%s" does not exist! Using default!' % \
- CLI_OPTIONS.leveldbDir
- os.makedirs(CLI_OPTIONS.leveldbDir)
- else:
- LEVELDB_DIR = CLI_OPTIONS.leveldbDir
-
-
-
-# Change the settings file to use
-#BITCOIND_PATH = None
-#if not CLI_OPTIONS.bitcoindPath.lower()=='default':
- #BITCOIND_PATH = CLI_OPTIONS.bitcoindPath
-
-# Change the settings file to use
-if CLI_OPTIONS.settingsPath.lower()=='default':
- CLI_OPTIONS.settingsPath = os.path.join(ARMORY_HOME_DIR, 'ArmorySettings.txt')
-
-# Change the log file to use
-ARMORY_LOG_FILE = os.path.join(ARMORY_HOME_DIR, 'armorylog.txt')
-ARMCPP_LOG_FILE = os.path.join(ARMORY_HOME_DIR, 'armorycpplog.txt')
-if sys.argv[0] in ['ArmoryQt.py', 'ArmoryQt.exe', 'Armory.exe']:
- ARMORY_LOG_FILElogFile = os.path.join(ARMORY_HOME_DIR, 'armorylog.txt')
-else:
- basename = os.path.basename(sys.argv[0])
- CLI_OPTIONS.logFile = os.path.join(ARMORY_HOME_DIR, '%s.log.txt' % basename)
-
-SETTINGS_PATH = CLI_OPTIONS.settingsPath
-
-
-
-# If this is the first Armory has been run, create directories
-if ARMORY_HOME_DIR and not os.path.exists(ARMORY_HOME_DIR):
- os.makedirs(ARMORY_HOME_DIR)
-
-
-if not os.path.exists(LEVELDB_DIR):
- os.makedirs(LEVELDB_DIR)
-
-
-if sys.argv[0]=='ArmoryQt.py':
- print '********************************************************************************'
- print 'Loading Armory Engine:'
- print ' Armory Version: ', getVersionString(BTCARMORY_VERSION)
- print ' PyBtcWallet Version:', getVersionString(PYBTCWALLET_VERSION)
- print 'Detected Operating system:', OS_NAME
- print ' OS Variant :', OS_VARIANT
- print ' User home-directory :', USER_HOME_DIR
- print ' Satoshi BTC directory :', BTC_HOME_DIR
- print ' Armory home dir :', ARMORY_HOME_DIR
- print ' LevelDB directory :', LEVELDB_DIR
- print ' Armory settings file :', SETTINGS_PATH
- print ' Armory log file :', ARMORY_LOG_FILE
-
-
-
-class UnserializeError(Exception): pass
-class BadAddressError(Exception): pass
-class VerifyScriptError(Exception): pass
-class FileExistsError(Exception): pass
-class ECDSA_Error(Exception): pass
-class PackerError(Exception): pass
-class UnpackerError(Exception): pass
-class UnitializedBlockDataError(Exception): pass
-class WalletLockError(Exception): pass
-class SignatureError(Exception): pass
-class KeyDataError(Exception): pass
-class ChecksumError(Exception): pass
-class WalletAddressError(Exception): pass
-class PassphraseError(Exception): pass
-class EncryptionError(Exception): pass
-class InterruptTestError(Exception): pass
-class NetworkIDError(Exception): pass
-class WalletExistsError(Exception): pass
-class ConnectionError(Exception): pass
-class BlockchainUnavailableError(Exception): pass
-class InvalidHashError(Exception): pass
-class BadURIError(Exception): pass
-class CompressedKeyError(Exception): pass
-class TooMuchPrecisionError(Exception): pass
-class NegativeValueError(Exception): pass
-class FiniteFieldError(Exception): pass
-class BitcoindError(Exception): pass
-class ShouldNotGetHereError(Exception): pass
-class BadInputError(Exception): pass
-
-
-
-
-##### MAIN NETWORK IS DEFAULT #####
-if not USE_TESTNET:
- # TODO: The testnet genesis tx hash can't be the same...?
- BITCOIN_PORT = 8333
- BITCOIN_RPC_PORT = 8332
- ARMORY_RPC_PORT = 8225
- MAGIC_BYTES = '\xf9\xbe\xb4\xd9'
- GENESIS_BLOCK_HASH_HEX = '6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000'
- GENESIS_BLOCK_HASH = 'o\xe2\x8c\n\xb6\xf1\xb3r\xc1\xa6\xa2F\xaec\xf7O\x93\x1e\x83e\xe1Z\x08\x9ch\xd6\x19\x00\x00\x00\x00\x00'
- GENESIS_TX_HASH_HEX = '3ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a'
- GENESIS_TX_HASH = ';\xa3\xed\xfdz{\x12\xb2z\xc7,>gv\x8fa\x7f\xc8\x1b\xc3\x88\x8aQ2:\x9f\xb8\xaaK\x1e^J'
- ADDRBYTE = '\x00'
- P2SHBYTE = '\x05'
- PRIVKEYBYTE = '\x80'
-else:
- BITCOIN_PORT = 18333
- BITCOIN_RPC_PORT = 18332
- ARMORY_RPC_PORT = 18225
- MAGIC_BYTES = '\x0b\x11\x09\x07'
- GENESIS_BLOCK_HASH_HEX = '43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea330900000000'
- GENESIS_BLOCK_HASH = 'CI\x7f\xd7\xf8&\x95q\x08\xf4\xa3\x0f\xd9\xce\xc3\xae\xbay\x97 \x84\xe9\x0e\xad\x01\xea3\t\x00\x00\x00\x00'
- GENESIS_TX_HASH_HEX = '3ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a'
- GENESIS_TX_HASH = ';\xa3\xed\xfdz{\x12\xb2z\xc7,>gv\x8fa\x7f\xc8\x1b\xc3\x88\x8aQ2:\x9f\xb8\xaaK\x1e^J'
- ADDRBYTE = '\x6f'
- P2SHBYTE = '\xc4'
- PRIVKEYBYTE = '\xef'
-
-if not CLI_OPTIONS.satoshiPort == 'DEFAULT':
- try:
- BITCOIN_PORT = int(CLI_OPTIONS.satoshiPort)
- except:
- raise TypeError, 'Invalid port for Bitcoin-Qt, using ' + str(BITCOIN_PORT)
-
-
-if not CLI_OPTIONS.rpcport == 'DEFAULT':
- try:
- ARMORY_RPC_PORT = int(CLI_OPTIONS.rpcport)
- except:
- raise TypeError, 'Invalid RPC port for armoryd ' + str(ARMORY_RPC_PORT)
-
-
-BLOCKCHAINS = {}
-BLOCKCHAINS['\xf9\xbe\xb4\xd9'] = "Main Network"
-BLOCKCHAINS['\xfa\xbf\xb5\xda'] = "Old Test Network"
-BLOCKCHAINS['\x0b\x11\x09\x07'] = "Test Network (testnet3)"
-
-NETWORKS = {}
-NETWORKS['\x00'] = "Main Network"
-NETWORKS['\x6f'] = "Test Network"
-NETWORKS['\x34'] = "Namecoin Network"
-
-
-
-######### INITIALIZE LOGGING UTILITIES ##########
-#
-# Setup logging to write INFO+ to file, and WARNING+ to console
-# In debug mode, will write DEBUG+ to file and INFO+ to console
-#
-
-# Want to get the line in which an error was triggered, but by wrapping
-# the logger function (as I will below), the displayed "file:linenum"
-# references the logger function, not the function that called it.
-# So I use traceback to find the file and line number two up in the
-# stack trace, and return that to be displayed instead of default
-# [Is this a hack? Yes and no. I see no other way to do this]
-def getCallerLine():
- stkTwoUp = traceback.extract_stack()[-3]
- filename,method = stkTwoUp[0], stkTwoUp[1]
- return '%s:%d' % (os.path.basename(filename),method)
-
-# When there's an error in the logging function, it's impossible to find!
-# These wrappers will print the full stack so that it's possible to find
-# which line triggered the error
-def LOGDEBUG(msg, *a):
- try:
- logstr = msg if len(a)==0 else (msg%a)
- callerStr = getCallerLine() + ' - '
- logging.debug(callerStr + logstr)
- except TypeError:
- traceback.print_stack()
- raise
-
-def LOGINFO(msg, *a):
- try:
- logstr = msg if len(a)==0 else (msg%a)
- callerStr = getCallerLine() + ' - '
- logging.info(callerStr + logstr)
- except TypeError:
- traceback.print_stack()
- raise
-def LOGWARN(msg, *a):
- try:
- logstr = msg if len(a)==0 else (msg%a)
- callerStr = getCallerLine() + ' - '
- logging.warn(callerStr + logstr)
- except TypeError:
- traceback.print_stack()
- raise
-def LOGERROR(msg, *a):
- try:
- logstr = msg if len(a)==0 else (msg%a)
- callerStr = getCallerLine() + ' - '
- logging.error(callerStr + logstr)
- except TypeError:
- traceback.print_stack()
- raise
-def LOGCRIT(msg, *a):
- try:
- logstr = msg if len(a)==0 else (msg%a)
- callerStr = getCallerLine() + ' - '
- logging.critical(callerStr + logstr)
- except TypeError:
- traceback.print_stack()
- raise
-def LOGEXCEPT(msg, *a):
- try:
- logstr = msg if len(a)==0 else (msg%a)
- callerStr = getCallerLine() + ' - '
- logging.exception(callerStr + logstr)
- except TypeError:
- traceback.print_stack()
- raise
-
-
-
-DEFAULT_CONSOLE_LOGTHRESH = logging.WARNING
-DEFAULT_FILE_LOGTHRESH = logging.INFO
-
-DEFAULT_PPRINT_LOGLEVEL = logging.DEBUG
-DEFAULT_RAWDATA_LOGLEVEL = logging.DEBUG
-
-rootLogger = logging.getLogger('')
-if CLI_OPTIONS.doDebug or CLI_OPTIONS.netlog or CLI_OPTIONS.mtdebug:
- # Drop it all one level: console will see INFO, file will see DEBUG
- DEFAULT_CONSOLE_LOGTHRESH -= 10
- DEFAULT_FILE_LOGTHRESH -= 10
-
-
-def chopLogFile(filename, size):
- if not os.path.exists(filename):
- print 'Log file doesn\'t exist [yet]'
- return
-
- logfile = open(filename, 'r')
- allLines = logfile.readlines()
- logfile.close()
-
- nBytes,nLines = 0,0;
- for line in allLines[::-1]:
- nBytes += len(line)
- nLines += 1
- if nBytes>size:
- break
-
- logfile = open(filename, 'w')
- for line in allLines[-nLines:]:
- logfile.write(line)
- logfile.close()
-
-
-
-# Cut down the log file to just the most recent 1 MB
-chopLogFile(ARMORY_LOG_FILE, 1024*1024)
-
-
-# Now set loglevels
-DateFormat = '%Y-%m-%d %H:%M'
-logging.getLogger('').setLevel(logging.DEBUG)
-fileFormatter = logging.Formatter('%(asctime)s (%(levelname)s) -- %(message)s', \
- datefmt=DateFormat)
-fileHandler = logging.FileHandler(ARMORY_LOG_FILE)
-fileHandler.setLevel(DEFAULT_FILE_LOGTHRESH)
-fileHandler.setFormatter(fileFormatter)
-logging.getLogger('').addHandler(fileHandler)
-
-consoleFormatter = logging.Formatter('(%(levelname)s) %(message)s')
-consoleHandler = logging.StreamHandler()
-consoleHandler.setLevel(DEFAULT_CONSOLE_LOGTHRESH)
-consoleHandler.setFormatter( consoleFormatter )
-logging.getLogger('').addHandler(consoleHandler)
-
-
-
-class stringAggregator(object):
- def __init__(self):
- self.theStr = ''
- def getStr(self):
- return self.theStr
- def write(self, theStr):
- self.theStr += theStr
-
-
-# A method to redirect pprint() calls to the log file
-# Need a way to take a pprint-able object, and redirect its output to file
-# Do this by swapping out sys.stdout temporarily, execute theObj.pprint()
-# then set sys.stdout back to the original.
-def LOGPPRINT(theObj, loglevel=DEFAULT_PPRINT_LOGLEVEL):
- sys.stdout = stringAggregator()
- theObj.pprint()
- printedStr = sys.stdout.getStr()
- sys.stdout = sys.__stdout__
- stkOneUp = traceback.extract_stack()[-2]
- filename,method = stkOneUp[0], stkOneUp[1]
- methodStr = '(PPRINT from %s:%d)\n' % (filename,method)
- logging.log(loglevel, methodStr + printedStr)
-
-# For super-debug mode, we'll write out raw data
-def LOGRAWDATA(rawStr, loglevel=DEFAULT_RAWDATA_LOGLEVEL):
- dtype = isLikelyDataType(rawStr)
- stkOneUp = traceback.extract_stack()[-2]
- filename,method = stkOneUp[0], stkOneUp[1]
- methodStr = '(PPRINT from %s:%d)\n' % (filename,method)
- pstr = rawStr[:]
- if dtype==DATATYPE.Binary:
- pstr = binary_to_hex(rawStr)
- pstr = prettyHex(pstr, indent=' ', withAddr=False)
- elif dtype==DATATYPE.Hex:
- pstr = prettyHex(pstr, indent=' ', withAddr=False)
- else:
- pstr = ' ' + '\n '.join(pstr.split('\n'))
-
- logging.log(loglevel, methodStr + pstr)
-
-
-cpplogfile = None
-if CLI_OPTIONS.logDisable:
- print 'Logging is disabled'
- rootLogger.disabled = True
-
-# For now, ditch the C++-console-catching. Logging python is enough
-# My attempt at C++ logging too was becoming a hardcore hack...
-"""
-elif CLI_OPTIONS.logcpp:
- # In order to catch C++ output, we have to redirect ALL stdout
- # (which means that console writes by python, too)
- cpplogfile = open(ARMORY_LOG_FILE_CPP, 'r')
- allLines = cpplogfile.readlines()
- cpplogfile.close()
- # Chop off the beginning of the file
- nBytes,nLines = 0,0;
- for line in allLines[::-1]:
- nBytes += len(line)
- nLines += 1
- if nBytes>100*1024:
- break
- cpplogfile = open(ARMORY_LOG_FILE_CPP, 'w')
- print 'nlines:', nLines
- for line in allLines[-nLines:]:
- print line,
- cpplogfile.write(line)
- cpplogfile.close()
- cpplogfile = open(ARMORY_LOG_FILE_CPP, 'a')
- raw_input()
- os.dup2(cpplogfile.fileno(), sys.stdout.fileno())
- raw_input()
- os.dup2(cpplogfile.fileno(), sys.stderr.fileno())
-"""
-
-
-fileRebuild = os.path.join(ARMORY_HOME_DIR, 'rebuild.txt')
-fileRescan = os.path.join(ARMORY_HOME_DIR, 'rescan.txt')
-if os.path.exists(fileRebuild):
- LOGINFO('Found %s, will destroy and rebuild databases' % fileRebuild)
- os.remove(fileRebuild)
- if os.path.exists(fileRescan):
- os.remove(fileRescan)
-
- CLI_OPTIONS.rebuild = True
-elif os.path.exists(fileRescan):
- LOGINFO('Found %s, will throw out saved history, rescan' % fileRescan)
- os.remove(fileRescan)
- if os.path.exists(fileRebuild):
- os.remove(fileRebuild)
- CLI_OPTIONS.rescan = True
-
-
-def logexcept_override(type, value, tback):
- import traceback
- import logging
- strList = traceback.format_exception(type,value,tback)
- logging.error(''.join([s for s in strList]))
- # then call the default handler
- sys.__excepthook__(type, value, tback)
-
-sys.excepthook = logexcept_override
-
-
-################################################################################
-def launchProcess(cmd, useStartInfo=True, *args, **kwargs):
- LOGINFO('Executing popen: %s', str(cmd))
- if not OS_WINDOWS:
- from subprocess import Popen, PIPE
- return Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, *args, **kwargs)
- else:
- from subprocess import Popen, PIPE, STARTUPINFO, STARTF_USESHOWWINDOW
- # Need lots of complicated stuff to accommodate quirks with Windows
- if isinstance(cmd, basestring):
- cmd2 = toPreferred(cmd)
- else:
- cmd2 = [toPreferred(c) for c in cmd]
-
- if useStartInfo:
- startinfo = STARTUPINFO()
- startinfo.dwFlags |= STARTF_USESHOWWINDOW
- return Popen(cmd2, \
- *args, \
- stdin=PIPE, \
- stdout=PIPE, \
- stderr=PIPE, \
- startupinfo=startinfo, \
- **kwargs)
- else:
- return Popen(cmd2, \
- *args, \
- stdin=PIPE, \
- stdout=PIPE, \
- stderr=PIPE, \
- **kwargs)
-
-
-################################################################################
-def killProcess(pid, sig='default'):
- # I had to do this, because killing a process in Windows has issues
- # when using py2exe (yes, os.kill does not work, for the same reason
- # I had to pass stdin/stdout/stderr everywhere...
- LOGWARN('Killing process pid=%d', pid)
- if not OS_WINDOWS:
- import os
- sig = signal.SIGKILL if sig=='default' else sig
- os.kill(pid, sig)
- else:
- import sys, os.path, ctypes, ctypes.wintypes
- k32 = ctypes.WinDLL('kernel32.dll')
- k32.OpenProcess.restype = ctypes.wintypes.HANDLE
- k32.TerminateProcess.restype = ctypes.wintypes.BOOL
- hProcess = k32.OpenProcess(1, False, pid)
- k32.TerminateProcess(hProcess, 1)
- k32.CloseHandle(hProcess)
-
-
-
-################################################################################
-def subprocess_check_output(*popenargs, **kwargs):
- """
- Run command with arguments and return its output as a byte string.
- Backported from Python 2.7, because it's stupid useful, short, and
- won't exist on systems using Python 2.6 or earlier
- """
- from subprocess import Popen, PIPE, CalledProcessError
- process = launchProcess(*popenargs, **kwargs)
- output, unused_err = process.communicate()
- retcode = process.poll()
- if retcode:
- cmd = kwargs.get("args")
- if cmd is None:
- cmd = popenargs[0]
- error = CalledProcessError(retcode, cmd)
- error.output = output
- raise error
- return output
-
-
-################################################################################
-def killProcessTree(pid):
- # In this case, Windows is easier because we know it has the get_children
- # call, because have bundled a recent version of psutil. Linux, however,
- # does not have that function call in earlier versions.
- if not OS_LINUX:
- for child in psutil.Process(pid).get_children():
- killProcess(child.pid)
- else:
- proc = Popen("ps -o pid --ppid %d --noheaders" % pid, shell=True, stdout=PIPE)
- out,err = proc.communicate()
- for pid_str in out.split("\n")[:-1]:
- killProcess(int(pid_str))
-
-
-################################################################################
-# Similar to subprocess_check_output, but used for long-running commands
-def execAndWait(cli_str, timeout=0, useStartInfo=True):
- """
- There may actually still be references to this function where check_output
- would've been more appropriate. But I didn't know about check_output at
- the time...
- """
-
- process = launchProcess(cli_str, shell=True, useStartInfo=useStartInfo)
- pid = process.pid
- start = RightNow()
- while process.poll() == None:
- time.sleep(0.1)
- if timeout>0 and (RightNow() - start)>timeout:
- print 'Process exceeded timeout, killing it'
- killProcess(pid)
- out,err = process.communicate()
- return [out,err]
-
-
-
-################################################################################
-# Get system details for logging purposes
-class DumbStruct(object): pass
-def GetSystemDetails():
- """Checks memory of a given system"""
-
- out = DumbStruct()
-
- CPU,COR,X64,MEM = range(4)
- sysParam = [None,None,None,None]
- out.CpuStr = 'UNKNOWN'
- if OS_LINUX:
- # Get total RAM
- freeStr = subprocess_check_output('free -m', shell=True)
- totalMemory = freeStr.split('\n')[1].split()[1]
- out.Memory = int(totalMemory) * 1024
-
- # Get CPU name
- out.CpuStr = 'Unknown'
- cpuinfo = subprocess_check_output(['cat','/proc/cpuinfo'])
- for line in cpuinfo.split('\n'):
- if line.strip().lower().startswith('model name'):
- out.CpuStr = line.split(':')[1].strip()
- break
-
-
- elif OS_WINDOWS:
- import ctypes
- class MEMORYSTATUSEX(ctypes.Structure):
- _fields_ = [
- ("dwLength", ctypes.c_ulong),
- ("dwMemoryLoad", ctypes.c_ulong),
- ("ullTotalPhys", ctypes.c_ulonglong),
- ("ullAvailPhys", ctypes.c_ulonglong),
- ("ullTotalPageFile", ctypes.c_ulonglong),
- ("ullAvailPageFile", ctypes.c_ulonglong),
- ("ullTotalVirtual", ctypes.c_ulonglong),
- ("ullAvailVirtual", ctypes.c_ulonglong),
- ("sullAvailExtendedVirtual", ctypes.c_ulonglong),
- ]
- def __init__(self):
- # have to initialize this to the size of MEMORYSTATUSEX
- self.dwLength = ctypes.sizeof(self)
- super(MEMORYSTATUSEX, self).__init__()
-
- stat = MEMORYSTATUSEX()
- ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
- out.Memory = stat.ullTotalPhys/1024.
- out.CpuStr = platform.processor()
- elif OS_MACOSX:
- memsizeStr = subprocess_check_output('sysctl hw.memsize', shell=True)
- out.Memory = int(memsizeStr.split(": ")[1]) / 1024
- out.CpuStr = subprocess_check_output('sysctl -n machdep.cpu.brand_string', shell=True)
-
- out.NumCores = multiprocessing.cpu_count()
- out.IsX64 = platform.architecture()[0].startswith('64')
- out.Memory = out.Memory / (1024*1024.)
- return out
-
-try:
- SystemSpecs = GetSystemDetails()
-except:
- LOGEXCEPT('Error getting system details:')
- LOGERROR('Skipping.')
- SystemSpecs = DumbStruct()
- SystemSpecs.Memory = -1
- SystemSpecs.CpuStr = 'Unknown'
- SystemSpecs.NumCores = -1
- SystemSpecs.IsX64 = 'Unknown'
-
-
-LOGINFO('')
-LOGINFO('')
-LOGINFO('')
-LOGINFO('************************************************************')
-LOGINFO('Invoked: ' + ' '.join(argv))
-LOGINFO('************************************************************')
-LOGINFO('Loading Armory Engine:')
-LOGINFO(' Armory Version : ' + getVersionString(BTCARMORY_VERSION))
-LOGINFO(' PyBtcWallet Version : ' + getVersionString(PYBTCWALLET_VERSION))
-LOGINFO('Detected Operating system: ' + OS_NAME)
-LOGINFO(' OS Variant : ' + (str(OS_VARIANT) if OS_MACOSX else '-'.join(OS_VARIANT)))
-LOGINFO(' User home-directory : ' + USER_HOME_DIR)
-LOGINFO(' Satoshi BTC directory : ' + BTC_HOME_DIR)
-LOGINFO(' Armory home dir : ' + ARMORY_HOME_DIR)
-LOGINFO('Detected System Specs : ')
-LOGINFO(' Total Available RAM : %0.2f GB', SystemSpecs.Memory)
-LOGINFO(' CPU ID string : ' + SystemSpecs.CpuStr)
-LOGINFO(' Number of CPU cores : %d cores', SystemSpecs.NumCores)
-LOGINFO(' System is 64-bit : ' + str(SystemSpecs.IsX64))
-LOGINFO(' Preferred Encoding : ' + locale.getpreferredencoding())
-LOGINFO('')
-LOGINFO('Network Name: ' + NETWORKS[ADDRBYTE])
-LOGINFO('Satoshi Port: %d', BITCOIN_PORT)
-LOGINFO('Named options/arguments to armoryengine.py:')
-for key,val in ast.literal_eval(str(CLI_OPTIONS)).iteritems():
- LOGINFO(' %-16s: %s', key,val)
-LOGINFO('Other arguments:')
-for val in CLI_ARGS:
- LOGINFO(' %s', val)
-LOGINFO('************************************************************')
-
-
-def GetExecDir():
- """
- Return the path from where armoryengine was imported. Inspect method
- expects a function or module name, it can actually inspect its own
- name...
- """
- srcfile = inspect.getsourcefile(GetExecDir)
- srcpath = os.path.dirname(srcfile)
- srcpath = os.path.abspath(srcpath)
- return srcpath
-
-
-
-def coin2str(nSatoshi, ndec=8, rJust=True, maxZeros=8):
- """
- Converts a raw value (1e-8 BTC) into a formatted string for display
-
- ndec, guarantees that we get get a least N decimal places in our result
-
- maxZeros means we will replace zeros with spaces up to M decimal places
- in order to declutter the amount field
-
- """
-
- nBtc = float(nSatoshi) / float(ONE_BTC)
- s = ('%%0.%df' % ndec) % nBtc
- s = s.rjust(18, ' ')
-
- if maxZeros < ndec:
- maxChop = ndec - maxZeros
- nChop = min(len(s) - len(str(s.strip('0'))), maxChop)
- if nChop>0:
- s = s[:-nChop] + nChop*' '
-
- if nSatoshi < 10000*ONE_BTC:
- s.lstrip()
-
- if not rJust:
- s = s.strip(' ')
-
- s = s.replace('. ', '')
-
- return s
-
-
-def coin2strNZ(nSatoshi):
- """ Right-justified, minimum zeros, but with padding for alignment"""
- return coin2str(nSatoshi, 8, True, 0)
-
-def coin2strNZS(nSatoshi):
- """ Right-justified, minimum zeros, stripped """
- return coin2str(nSatoshi, 8, True, 0).strip()
-
-def coin2str_approx(nSatoshi, sigfig=3):
- posVal = nSatoshi
- isNeg = False
- if nSatoshi<0:
- isNeg = True
- posVal *= -1
-
- nDig = max(round(math.log(posVal+1, 10)-0.5), 0)
- nChop = max(nDig-2, 0 )
- approxVal = round((10**nChop) * round(posVal / (10**nChop)))
- return coin2str( (-1 if isNeg else 1)*approxVal, maxZeros=0)
-
-
-def str2coin(theStr, negAllowed=True, maxDec=8, roundHighPrec=True):
- coinStr = str(theStr)
- if len(coinStr.strip())==0:
- raise ValueError
-
- isNeg = ('-' in coinStr)
- coinStrPos = coinStr.replace('-','')
- if not '.' in coinStrPos:
- if not negAllowed and isNeg:
- raise NegativeValueError
- return (int(coinStrPos)*ONE_BTC)*(-1 if isNeg else 1)
- else:
- lhs,rhs = coinStrPos.strip().split('.')
- if len(lhs.strip('-'))==0:
- lhs='0'
- if len(rhs)>maxDec and not roundHighPrec:
- raise TooMuchPrecisionError
- if not negAllowed and isNeg:
- raise NegativeValueError
- fullInt = (int(lhs + rhs[:9].ljust(9,'0')) + 5) / 10
- return fullInt*(-1 if isNeg else 1)
-
-
-# This is a sweet trick for create enum-like dictionaries.
-# Either automatically numbers (*args), or name-val pairs (**kwargs)
-#http://stackoverflow.com/questions/36932/whats-the-best-way-to-implement-an-enum-in-python
-def enum(*sequential, **named):
- enums = dict(zip(sequential, range(len(sequential))), **named)
- return type('Enum', (), enums)
-
-
-# Some useful constants to be used throughout everything
-BASE58CHARS = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
-BASE16CHARS = '0123 4567 89ab cdef'.replace(' ','')
-LITTLEENDIAN = '<';
-BIGENDIAN = '>';
-NETWORKENDIAN = '!';
-ONE_BTC = long(100000000)
-CENT = long(1000000)
-UNINITIALIZED = None
-UNKNOWN = -2
-MIN_TX_FEE = 10000
-MIN_RELAY_TX_FEE = 10000
-MT_WAIT_TIMEOUT_SEC = 20;
-
-UINT8_MAX = 2**8-1
-UINT16_MAX = 2**16-1
-UINT32_MAX = 2**32-1
-UINT64_MAX = 2**64-1
-
-RightNow = time.time
-SECOND = 1
-MINUTE = 60
-HOUR = 3600
-DAY = 24*HOUR
-WEEK = 7*DAY
-MONTH = 30*DAY
-YEAR = 365*DAY
-
-KILOBYTE = 1024.0
-MEGABYTE = 1024*KILOBYTE
-GIGABYTE = 1024*MEGABYTE
-TERABYTE = 1024*GIGABYTE
-PETABYTE = 1024*TERABYTE
-
-# Set the default-default
-DEFAULT_DATE_FORMAT = '%Y-%b-%d %I:%M%p'
-FORMAT_SYMBOLS = [ \
- ['%y', 'year, two digit (00-99)'], \
- ['%Y', 'year, four digit'], \
- ['%b', 'month name (abbrev)'], \
- ['%B', 'month name (full)'], \
- ['%m', 'month number (01-12)'], \
- ['%d', 'day of month (01-31)'], \
- ['%H', 'hour 24h (00-23)'], \
- ['%I', 'hour 12h (01-12)'], \
- ['%M', 'minute (00-59)'], \
- ['%p', 'morning/night (am,pm)'], \
- ['%a', 'day of week (abbrev)'], \
- ['%A', 'day of week (full)'], \
- ['%%', 'percent symbol'] ]
-
-
-# The database uses prefixes to identify type of address. Until the new
-# wallet format is created that supports more than just hash160 addresses
-# we have to explicitly add the prefix to any hash160 values that are being
-# sent to any of the C++ utilities. For instance, the BlockDataManager (BDM)
-# (C++ stuff) tracks regular hash160 addresses, P2SH, multisig, and all
-# non-standard scripts. Any such "scrAddrs" (script-addresses) will eventually
-# be valid entities for tracking in a wallet. Until then, all of our python
-# utilities all use just hash160 values, and we manually add the prefix
-# before talking to the BDM.
-HASH160PREFIX = '\x00'
-P2SHPREFIX = '\x05'
-MSIGPREFIX = '\xfe'
-NONSTDPREFIX = '\xff'
-def CheckHash160(scrAddr):
- if not len(scrAddr)==21:
- raise BadAddressError, "Supplied scrAddr is not a Hash160 value!"
- if not scrAddr[0] == HASH160PREFIX:
- raise BadAddressError, "Supplied scrAddr is not a Hash160 value!"
- return scrAddr[1:]
-
-def Hash160ToScrAddr(a160):
- if not len(a160)==20:
- LOGERROR('Invalid hash160 value!')
- return HASH160PREFIX + a160
-
-def HexHash160ToScrAddr(a160):
- if not len(a160)==40:
- LOGERROR('Invalid hash160 value!')
- return HASH160PREFIX + hex_to_binary(a160)
-
-
-# Some more constants that are needed to play nice with the C++ utilities
-ARMORY_DB_BARE, ARMORY_DB_LITE, ARMORY_DB_PARTIAL, ARMORY_DB_FULL, ARMORY_DB_SUPER = range(5)
-DB_PRUNE_ALL, DB_PRUNE_NONE = range(2)
-
-
-
-
-# Some time methods (RightNow() return local unix timestamp)
-RightNow = time.time
-def RightNowUTC():
- return time.mktime(time.gmtime(RightNow()))
-
-
-
-################################################################################
-# Load the C++ utilites here
-#
-# The SWIG/C++ block utilities give us access to the blockchain, fast ECDSA
-# operations, and general encryption/secure-binary containers
-################################################################################
-try:
- import CppBlockUtils as Cpp
- from CppBlockUtils import KdfRomix, CryptoECDSA, CryptoAES, SecureBinaryData
- LOGINFO('C++ block utilities loaded successfully')
-except:
- LOGCRIT('C++ block utilities not available.')
- LOGCRIT(' Make sure that you have the SWIG-compiled modules')
- LOGCRIT(' in the current directory (or added to the PATH)')
- LOGCRIT(' Specifically, you need:')
- LOGCRIT(' CppBlockUtils.py and')
- if OS_LINUX or OS_MACOSX:
- LOGCRIT(' _CppBlockUtils.so')
- elif OS_WINDOWS:
- LOGCRIT(' _CppBlockUtils.pyd')
- else:
- LOGCRIT('\n\n... UNKNOWN operating system')
- raise
-
-
-
-
-
-DATATYPE = enum("Binary", 'Base58', 'Hex')
-def isLikelyDataType(theStr, dtype=None):
- """
- This really shouldn't be used on short strings. Hence
- why it's called "likely" datatype...
- """
- ret = None
- hexCount = sum([1 if c in BASE16CHARS else 0 for c in theStr])
- b58Count = sum([1 if c in BASE58CHARS else 0 for c in theStr])
- canBeHex = hexCount==len(theStr)
- canBeB58 = b58Count==len(theStr)
- if canBeHex:
- ret = DATATYPE.Hex
- elif canBeB58 and not canBeHex:
- ret = DATATYPE.Base58
- else:
- ret = DATATYPE.Binary
-
- if dtype==None:
- return ret
- else:
- return dtype==ret
-
-
-def getCurrTimeAndBlock():
- time0 = long(RightNowUTC())
- if TheBDM.getBDMState()=='BlockchainReady':
- return (time0, TheBDM.getTopBlockHeight())
- else:
- return (time0, UINT32_MAX)
-
-
-
-# Define all the hashing functions we're going to need. We don't actually
-# use any of the first three directly (sha1, sha256, ripemd160), we only
-# use hash256 and hash160 which use the first three to create the ONLY hash
-# operations we ever do in the bitcoin network
-# UPDATE: mini-private-key format requires vanilla sha256...
-def sha1(bits):
- return hashlib.new('sha1', bits).digest()
-def sha256(bits):
- return hashlib.new('sha256', bits).digest()
-def sha512(bits):
- return hashlib.new('sha512', bits).digest()
-def ripemd160(bits):
- # It turns out that not all python has ripemd160...?
- #return hashlib.new('ripemd160', bits).digest()
- return Cpp.BtcUtils().ripemd160_SWIG(bits)
-def hash256(s):
- """ Double-SHA256 """
- return sha256(sha256(s))
-def hash160(s):
- """ RIPEMD160( SHA256( binaryStr ) ) """
- return Cpp.BtcUtils().getHash160_SWIG(s)
-
-
-def HMAC(key, msg, hashfunc=sha512, hashsz=None):
- """ This is intended to be simple, not fast. For speed, use HDWalletCrypto() """
- hashsz = len(hashfunc('')) if hashsz==None else hashsz
- key = (hashfunc(key) if len(key)>hashsz else key)
- key = key.ljust(hashsz, '\x00')
- okey = ''.join([chr(ord('\x5c')^ord(c)) for c in key])
- ikey = ''.join([chr(ord('\x36')^ord(c)) for c in key])
- return hashfunc( okey + hashfunc(ikey + msg) )
-
-HMAC256 = lambda key,msg: HMAC(key,msg,sha256, 32)
-HMAC512 = lambda key,msg: HMAC(key,msg,sha512, 64)
-
-################################################################################
-def prettyHex(theStr, indent='', withAddr=True, major=8, minor=8):
- """
- This is the same as pprintHex(), but returns the string instead of
- printing it to console. This is useful for redirecting output to
- files, or doing further modifications to the data before display
- """
- outStr = ''
- sz = len(theStr)
- nchunk = int((sz-1)/minor) + 1;
- for i in range(nchunk):
- if i%major==0:
- outStr += '\n' + indent
- if withAddr:
- locStr = int_to_hex(i*minor/2, widthBytes=2, endOut=BIGENDIAN)
- outStr += '0x' + locStr + ': '
- outStr += theStr[i*minor:(i+1)*minor] + ' '
- return outStr
-
-
-
-
-
-################################################################################
-def pprintHex(theStr, indent='', withAddr=True, major=8, minor=8):
- """
- This method takes in a long hex string and prints it out into rows
- of 64 hex chars, in chunks of 8 hex characters, and with address
- markings on each row. This means that each row displays 32 bytes,
- which is usually pleasant.
-
- The format is customizable: you can adjust the indenting of the
- entire block, remove address markings, or change the major/minor
- grouping size (major * minor = hexCharsPerRow)
- """
- print prettyHex(theStr, indent, withAddr, major, minor)
-
-
-
-def pprintDiff(str1, str2, indent=''):
- if not len(str1)==len(str2):
- print 'pprintDiff: Strings are different length!'
- return
-
- byteDiff = []
- for i in range(len(str1)):
- if str1[i]==str2[i]:
- byteDiff.append('-')
- else:
- byteDiff.append('X')
-
- pprintHex(''.join(byteDiff), indent=indent)
-
-
-
-
-##### Switch endian-ness #####
-def hex_switchEndian(s):
- """ Switches the endianness of a hex string (in pairs of hex chars) """
- pairList = [s[i]+s[i+1] for i in xrange(0,len(s),2)]
- return ''.join(pairList[::-1])
-def binary_switchEndian(s):
- """ Switches the endianness of a binary string """
- return s[::-1]
-
-
-##### INT/HEXSTR #####
-def int_to_hex(i, widthBytes=0, endOut=LITTLEENDIAN):
- """
- Convert an integer (int() or long()) to hexadecimal. Default behavior is
- to use the smallest even number of hex characters necessary, and using
- little-endian. Use the widthBytes argument to add 0-padding where needed
- if you are expecting constant-length output.
- """
- h = hex(i)[2:]
- if isinstance(i,long):
- h = h[:-1]
- if len(h)%2 == 1:
- h = '0'+h
- if not widthBytes==0:
- nZero = 2*widthBytes - len(h)
- if nZero > 0:
- h = '0'*nZero + h
- if endOut==LITTLEENDIAN:
- h = hex_switchEndian(h)
- return h
-
-def hex_to_int(h, endIn=LITTLEENDIAN):
- """
- Convert hex-string to integer (or long). Default behavior is to interpret
- hex string as little-endian
- """
- hstr = h.replace(' ','') # copies data, no references
- if endIn==LITTLEENDIAN:
- hstr = hex_switchEndian(hstr)
- return( int(hstr, 16) )
-
-
-##### HEXSTR/BINARYSTR #####
-def hex_to_binary(h, endIn=LITTLEENDIAN, endOut=LITTLEENDIAN):
- """
- Converts hexadecimal to binary (in a python string). Endianness is
- only switched if (endIn != endOut)
- """
- bout = h.replace(' ','') # copies data, no references
- if not endIn==endOut:
- bout = hex_switchEndian(bout)
- return bout.decode('hex_codec')
-
-
-def binary_to_hex(b, endOut=LITTLEENDIAN, endIn=LITTLEENDIAN):
- """
- Converts binary to hexadecimal. Endianness is only switched
- if (endIn != endOut)
- """
- hout = b.encode('hex_codec')
- if not endOut==endIn:
- hout = hex_switchEndian(hout)
- return hout
-
-
-##### INT/BINARYSTR #####
-def int_to_binary(i, widthBytes=0, endOut=LITTLEENDIAN):
- """
- Convert integer to binary. Default behavior is use as few bytes
- as necessary, and to use little-endian. This can be changed with
- the two optional input arguemnts.
- """
- h = int_to_hex(i,widthBytes)
- return hex_to_binary(h, endOut=endOut)
-
-def binary_to_int(b, endIn=LITTLEENDIAN):
- """
- Converts binary to integer (or long). Interpret as LE by default
- """
- h = binary_to_hex(b, endIn, LITTLEENDIAN)
- return hex_to_int(h)
-
-##### INT/BITS #####
-
-def int_to_bitset(i, widthBytes=0):
- bitsOut = []
- while i>0:
- i,r = divmod(i,2)
- bitsOut.append(['0','1'][r])
- result = ''.join(bitsOut)
- if widthBytes != 0:
- result = result.ljust(widthBytes*8,'0')
- return result
-
-def bitset_to_int(bitset):
- n = 0
- for i,bit in enumerate(bitset):
- n += (0 if bit=='0' else 1) * 2**i
- return n
-
-
-
-EmptyHash = hex_to_binary('00'*32)
-
-
-################################################################################
-# BINARY/BASE58 CONVERSIONS
-def binary_to_base58(binstr):
- """
- This method applies the Bitcoin-specific conversion from binary to Base58
- which may includes some extra "zero" bytes, such as is the case with the
- main-network addresses.
-
- This method is labeled as outputting an "addrStr", but it's really this
- special kind of Base58 converter, which makes it usable for encoding other
- data, such as ECDSA keys or scripts.
- """
- padding = 0;
- for b in binstr:
- if b=='\x00':
- padding+=1
- else:
- break
-
- n = 0
- for ch in binstr:
- n *= 256
- n += ord(ch)
-
- b58 = ''
- while n > 0:
- n, r = divmod (n, 58)
- b58 = BASE58CHARS[r] + b58
- return '1'*padding + b58
-
-
-################################################################################
-def base58_to_binary(addr):
- """
- This method applies the Bitcoin-specific conversion from Base58 to binary
- which may includes some extra "zero" bytes, such as is the case with the
- main-network addresses.
-
- This method is labeled as inputting an "addrStr", but it's really this
- special kind of Base58 converter, which makes it usable for encoding other
- data, such as ECDSA keys or scripts.
- """
- # Count the zeros ('1' characters) at the beginning
- padding = 0;
- for c in addr:
- if c=='1':
- padding+=1
- else:
- break
-
- n = 0
- for ch in addr:
- n *= 58
- n += BASE58CHARS.index(ch)
-
- binOut = ''
- while n>0:
- d,m = divmod(n,256)
- binOut = chr(m) + binOut
- n = d
- return '\x00'*padding + binOut
-
-
-
-
-
-
-
-################################################################################
-def hash160_to_addrStr(binStr, isP2SH=False):
- """
- Converts the 20-byte pubKeyHash to 25-byte binary Bitcoin address
- which includes the network byte (prefix) and 4-byte checksum (suffix)
- """
- addr21 = (P2SHBYTE if isP2SH else ADDRBYTE) + binStr
- addr25 = addr21 + hash256(addr21)[:4]
- return binary_to_base58(addr25);
-
-################################################################################
-def addrStr_is_p2sh(b58Str):
- binStr = base58_to_binary(b58Str)
- if not len(binStr)==25:
- return False
- return (binStr[0] == P2SHBYTE)
-
-################################################################################
-def addrStr_to_hash160(b58Str):
- return base58_to_binary(b58Str)[1:-4]
-
-
-###### Typing-friendly Base16 #####
-# Implements "hexadecimal" encoding but using only easy-to-type
-# characters in the alphabet. Hex usually includes the digits 0-9
-# which can be slow to type, even for good typists. On the other
-# hand, by changing the alphabet to common, easily distinguishable,
-# lowercase characters, typing such strings will become dramatically
-# faster. Additionally, some default encodings of QRCodes do not
-# preserve the capitalization of the letters, meaning that Base58
-# is not a feasible options
-NORMALCHARS = '0123 4567 89ab cdef'.replace(' ','')
-EASY16CHARS = 'asdf ghjk wert uion'.replace(' ','')
-hex_to_base16_map = {}
-base16_to_hex_map = {}
-for n,b in zip(NORMALCHARS,EASY16CHARS):
- hex_to_base16_map[n] = b
- base16_to_hex_map[b] = n
-
-def binary_to_easyType16(binstr):
- return ''.join([hex_to_base16_map[c] for c in binary_to_hex(binstr)])
-
-# Treat unrecognized characters as 0, to facilitate possibly later recovery of
-# their correct values from the checksum.
-def easyType16_to_binary(b16str):
- return hex_to_binary(''.join([base16_to_hex_map.get(c, '0') for c in b16str]))
-
-
-def makeSixteenBytesEasy(b16):
- if not len(b16)==16:
- raise ValueError, 'Must supply 16-byte input'
- chk2 = computeChecksum(b16, nBytes=2)
- et18 = binary_to_easyType16(b16 + chk2)
- nineQuads = [et18[i*4:(i+1)*4] for i in range(9)]
- first4 = ' '.join(nineQuads[:4])
- second4 = ' '.join(nineQuads[4:8])
- last1 = nineQuads[8]
- return ' '.join([first4, second4, last1])
-
-def readSixteenEasyBytes(et18):
- b18 = easyType16_to_binary(et18.strip().replace(' ',''))
- b16 = b18[:16]
- chk = b18[ 16:]
- if chk=='':
- LOGWARN('Missing checksum when reading EasyType')
- return (b16, 'No_Checksum')
- b16new = verifyChecksum(b16, chk)
- if len(b16new)==0:
- return ('','Error_2+')
- elif not b16new==b16:
- return (b16new,'Fixed_1')
- else:
- return (b16new,None)
-
-##### FLOAT/BTC #####
-# https://en.bitcoin.it/wiki/Proper_Money_Handling_(JSON-RPC)
-def ubtc_to_floatStr(n):
- return '%d.%08d' % divmod (n, ONE_BTC)
-def floatStr_to_ubtc(s):
- return long(round(float(s) * ONE_BTC))
-def float_to_btc (f):
- return long (round(f * ONE_BTC))
-
-
-
-##### And a few useful utilities #####
-def unixTimeToFormatStr(unixTime, formatStr=DEFAULT_DATE_FORMAT):
- """
- Converts a unix time (like those found in block headers) to a
- pleasant, human-readable format
- """
- dtobj = datetime.fromtimestamp(unixTime)
- dtstr = u'' + dtobj.strftime(formatStr).decode('utf-8')
- return dtstr[:-2] + dtstr[-2:].lower()
-
-def secondsToHumanTime(nSec):
- strPieces = []
- floatSec = float(nSec)
- if floatSec < 0.9*MINUTE:
- strPieces = [floatSec, 'second']
- elif floatSec < 0.9*HOUR:
- strPieces = [floatSec/MINUTE, 'minute']
- elif floatSec < 0.9*DAY:
- strPieces = [floatSec/HOUR, 'hour']
- elif floatSec < 0.9*WEEK:
- strPieces = [floatSec/DAY, 'day']
- elif floatSec < 0.9*MONTH:
- strPieces = [floatSec/WEEK, 'week']
- else:
- strPieces = [floatSec/MONTH, 'month']
-
- if strPieces[0]<1.25:
- return '1 '+strPieces[1]
- elif strPieces[0]<=1.75:
- return '1.5 '+strPieces[1]+'s'
- else:
- return '%d %ss' % (int(strPieces[0]+0.5), strPieces[1])
-
-def bytesToHumanSize(nBytes):
- if nBytes0:
- if not beQuiet: LOGWARN('fixed!')
- return fixStr
- else:
- # ONE LAST CHECK SPECIFIC TO MY SERIALIZATION SCHEME:
- # If the string was originally all zeros, chksum is hash256('')
- # ...which is a known value, and frequently used in my files
- if chksum==hex_to_binary('5df6e0e2'):
- if not beQuiet: LOGWARN('fixed!')
- return ''
-
-
- # ID a checksum byte error...
- origHash = hashFunc(bin1)
- for i in range(len(chksum)):
- chkArray = [chksum[j] for j in range(len(chksum))]
- for ch in range(256):
- chkArray[i] = chr(ch)
- if origHash.startswith(''.join(chkArray)):
- LOGWARN('***Checksum error! Incorrect byte in checksum!')
- return bin1
-
- LOGWARN('Checksum fix failed')
- return ''
-
-
-# Taken directly from rpc.cpp in reference bitcoin client, 0.3.24
-def binaryBits_to_difficulty(b):
- """ Converts the 4-byte binary difficulty string to a float """
- i = binary_to_int(b)
- nShift = (i >> 24) & 0xff
- dDiff = float(0x0000ffff) / float(i & 0x00ffffff)
- while nShift < 29:
- dDiff *= 256.0
- nShift += 1
- while nShift > 29:
- dDiff /= 256.0
- nShift -= 1
- return dDiff
-
-# TODO: I don't actually know how to do this, yet...
-def difficulty_to_binaryBits(i):
- pass
-
-
-################################################################################
-from qrcodenative import QRCode, QRErrorCorrectLevel
-def CreateQRMatrix(dataToEncode, errLevel='L'):
- sz=3
- success=False
- qrmtrx = [[]]
- while sz<20:
- try:
- errCorrectEnum = getattr(QRErrorCorrectLevel, errLevel.upper())
- qr = QRCode(sz, errCorrectEnum)
- qr.addData(dataToEncode)
- qr.make()
- success=True
- break
- except TypeError:
- sz += 1
-
- if not success:
- LOGERROR('Unsuccessful attempt to create QR code')
- LOGERROR('Data to encode: (Length: %s, isAscii: %s)', \
- len(dataToEncode), isASCII(dataToEncode))
- return [[0]], 1
-
- qrmtrx = []
- modCt = qr.getModuleCount()
- for r in range(modCt):
- tempList = [0]*modCt
- for c in range(modCt):
- # The matrix is transposed by default, from what we normally expect
- tempList[c] = 1 if qr.isDark(c,r) else 0
- qrmtrx.append(tempList)
-
- return [qrmtrx, modCt]
-
-
-
-################################################################################
-################################################################################
-# Classes for reading and writing large binary objects
-################################################################################
-################################################################################
-UINT8, UINT16, UINT32, UINT64, INT8, INT16, INT32, INT64, VAR_INT, VAR_STR, FLOAT, BINARY_CHUNK = range(12)
-
-# Seed this object with binary data, then read in its pieces sequentially
-class BinaryUnpacker(object):
- """
- Class for helping unpack binary streams of data. Typical usage is
- >> bup = BinaryUnpacker(myBinaryData)
- >> int32 = bup.get(UINT32)
- >> int64 = bup.get(VAR_INT)
- >> bytes10 = bup.get(BINARY_CHUNK, 10)
- >> ...etc...
- """
- def __init__(self, binaryStr):
- self.binaryStr = binaryStr
- self.pos = 0
-
- def getSize(self): return len(self.binaryStr)
- def getRemainingSize(self): return len(self.binaryStr) - self.pos
- def getBinaryString(self): return self.binaryStr
- def getRemainingString(self): return self.binaryStr[self.pos:]
- def append(self, binaryStr): self.binaryStr += binaryStr
- def advance(self, bytesToAdvance): self.pos += bytesToAdvance
- def rewind(self, bytesToRewind): self.pos -= bytesToRewind
- def resetPosition(self, toPos=0): self.pos = toPos
- def getPosition(self): return self.pos
-
- def get(self, varType, sz=0, endianness=LITTLEENDIAN):
- """
- First argument is the data-type: UINT32, VAR_INT, etc.
- If BINARY_CHUNK, need to supply a number of bytes to read, as well
- """
- def sizeCheck(sz):
- if self.getRemainingSize()> binpack = BinaryPacker()
- >> bup.put(UINT32, 12)
- >> bup.put(VAR_INT, 78)
- >> bup.put(BINARY_CHUNK, '\x9f'*10)
- >> ...etc...
- >> result = bup.getBinaryString()
- """
- def __init__(self):
- self.binaryConcat = []
-
- def getSize(self):
- return sum([len(a) for a in self.binaryConcat])
-
- def getBinaryString(self):
- return ''.join(self.binaryConcat)
-
- def __str__(self):
- return self.getBinaryString()
-
-
- def put(self, varType, theData, width=None, endianness=LITTLEENDIAN):
- """
- Need to supply the argument type you are put'ing into the stream.
- Values of BINARY_CHUNK will automatically detect the size as necessary
-
- Use width=X to include padding of BINARY_CHUNKs w/ 0x00 bytes
- """
- E = endianness
- if varType == UINT8:
- self.binaryConcat += int_to_binary(theData, 1, endianness)
- elif varType == UINT16:
- self.binaryConcat += int_to_binary(theData, 2, endianness)
- elif varType == UINT32:
- self.binaryConcat += int_to_binary(theData, 4, endianness)
- elif varType == UINT64:
- self.binaryConcat += int_to_binary(theData, 8, endianness)
- elif varType == INT8:
- self.binaryConcat += pack(E+'b', theData)
- elif varType == INT16:
- self.binaryConcat += pack(E+'h', theData)
- elif varType == INT32:
- self.binaryConcat += pack(E+'i', theData)
- elif varType == INT64:
- self.binaryConcat += pack(E+'q', theData)
- elif varType == VAR_INT:
- self.binaryConcat += packVarInt(theData)[0]
- elif varType == VAR_STR:
- self.binaryConcat += packVarInt(len(theData))[0]
- self.binaryConcat += theData
- elif varType == FLOAT:
- self.binaryConcat += pack(E+'f', theData)
- elif varType == BINARY_CHUNK:
- if width==None:
- self.binaryConcat += theData
- else:
- if len(theData)>width:
- raise PackerError, 'Too much data to fit into fixed width field'
- self.binaryConcat += theData.ljust(width, '\x00')
- else:
- raise PackerError, "Var type not recognized! VarType="+str(varType)
-
-################################################################################
-
-# The following params are for the Bitcoin elliptic curves (secp256k1)
-SECP256K1_MOD = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2FL
-SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141L
-SECP256K1_B = 0x0000000000000000000000000000000000000000000000000000000000000007L
-SECP256K1_A = 0x0000000000000000000000000000000000000000000000000000000000000000L
-SECP256K1_GX = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798L
-SECP256K1_GY = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8L
-
-
-
-
-
-################################################################################
-################################################################################
-# START FINITE FIELD OPERATIONS
-
-
-class FiniteField(object):
- """
- Create a simple, prime-order FiniteField. Because this is used only
- to encode data of fixed width, I enforce prime-order by hardcoding
- primes, and you just pick the data width (in bytes). If your desired
- data width is not here, simply find a prime number very close to 2^N,
- and add it to the PRIMES map below.
-
- This will be used for Shamir's Secret Sharing scheme. Encode your
- data as the coeffient of finite-field polynomial, and store points
- on that polynomial. The order of the polynomial determines how
- many points are needed to recover the original secret.
- """
-
- # bytes: primeclosetomaxval
- PRIMES = { 1: 2**8-5, # mainly for testing
- 2: 2**16-39,
- 4: 2**32-5,
- 8: 2**64-59,
- 16: 2**128-797,
- 20: 2**160-543,
- 24: 2**192-333,
- 32: 2**256-357,
- 48: 2**384-317,
- 64: 2**512-569,
- 96: 2**768-825,
- 128: 2**1024-105,
- 192: 2**1536-3453,
- 256: 2**2048-1157 }
-
- def __init__(self, nbytes):
- if not self.PRIMES.has_key(nbytes):
- LOGERROR('No primes available for size=%d bytes', nbytes)
- self.prime = None
- raise FiniteFieldError
- self.prime = self.PRIMES[nbytes]
-
-
- def add(self,a,b):
- return (a+b) % self.prime
-
- def subtract(self,a,b):
- return (a-b) % self.prime
-
- def mult(self,a,b):
- return (a*b) % self.prime
-
- def power(self,a,b):
- result = 1
- while(b>0):
- b,x = divmod(b,2)
- result = (result * (a if x else 1)) % self.prime
- a = a*a % self.prime
- return result
-
- def powinv(self,a):
- """ USE ONLY PRIME MODULUS """
- return self.power(a,self.prime-2)
-
- def divide(self,a,b):
- """ USE ONLY PRIME MODULUS """
- baddinv = self.powinv(b)
- return self.mult(a,baddinv)
-
- def mtrxrmrowcol(self,mtrx,r,c):
- if not len(mtrx) == len(mtrx[0]):
- LOGERROR('Must be a square matrix!')
- return []
- sz = len(mtrx)
- return [[mtrx[i][j] for j in range(sz) if not j==c] for i in range(sz) if not i==r]
-
-
- ################################################################################
- def mtrxdet(self,mtrx):
- if len(mtrx)==1:
- return mtrx[0][0]
-
- if not len(mtrx) == len(mtrx[0]):
- LOGERROR('Must be a square matrix!')
- return -1
-
- result = 0;
- for i in range(len(mtrx)):
- mult = mtrx[0][i] * (-1 if i%2==1 else 1)
- subdet = self.mtrxdet(self.mtrxrmrowcol(mtrx,0,i))
- result = self.add(result, self.mult(mult,subdet))
- return result
-
- ################################################################################
- def mtrxmultvect(self,mtrx, vect):
- M,N = len(mtrx), len(mtrx[0])
- if not len(mtrx[0])==len(vect):
- LOGERROR('Mtrx and vect are incompatible: %dx%d, %dx1', M, N, len(vect))
- return [ sum([self.mult(mtrx[i][j],vect[j]) for j in range(N)])%self.prime for i in range(M) ]
-
- ################################################################################
- def mtrxmult(self,m1, m2):
- M1,N1 = len(m1), len(m1[0])
- M2,N2 = len(m2), len(m2[0])
- if not N1==M2:
- LOGERROR('Mtrx and vect are incompatible: %dx%d, %dx%d', M1,N1, M2,N2)
- inner = lambda i,j: sum([self.mult(m1[i][k],m2[k][j]) for k in range(N1)])
- return [ [inner(i,j)%self.prime for j in range(N1)] for i in range(M1) ]
-
- ################################################################################
- def mtrxadjoint(self,mtrx):
- sz = len(mtrx)
- inner = lambda i,j: self.mtrxdet(self.mtrxrmrowcol(mtrx,i,j))
- return [[((-1 if (i+j)%2==1 else 1)*inner(j,i))%self.prime for j in range(sz)] for i in range(sz)]
-
- ################################################################################
- def mtrxinv(self,mtrx):
- det = self.mtrxdet(mtrx)
- adj = self.mtrxadjoint(mtrx)
- sz = len(mtrx)
- return [[self.divide(adj[i][j],det) for j in range(sz)] for i in range(sz)]
-
-
-################################################################################
-def SplitSecret(secret, needed, pieces, nbytes=None, use_random_x=False):
- if not isinstance(secret, basestring):
- secret = secret.toBinStr()
-
- if nbytes==None:
- nbytes = len(secret)
-
- ff = FiniteField(nbytes)
- fragments = []
-
- # Convert secret to an integer
- a = binary_to_int(SecureBinaryData(secret).toBinStr(),BIGENDIAN)
- if not a=needed:
- LOGERROR('You must create more pieces than needed to reconstruct!')
- raise FiniteFieldError
-
- if needed==1 or needed>8:
- LOGERROR('Can split secrets into parts *requiring* at most 8 fragments')
- LOGERROR('You can break it into as many optional fragments as you want')
- raise FiniteFieldError
-
-
- # We deterministically produce the coefficients so that we always use the
- # same polynomial for a given secret
- lasthmac = secret[:]
- othernum = []
- for i in range(pieces+needed-1):
- lasthmac = HMAC512(lasthmac, 'splitsecrets')[:nbytes]
- othernum.append(binary_to_int(lasthmac))
-
- def poly(x):
- polyout = ff.mult(a, ff.power(x,needed-1))
- for i,e in enumerate(range(needed-2,-1,-1)):
- term = ff.mult(othernum[i], ff.power(x,e))
- polyout = ff.add(polyout, term)
- return polyout
-
- for i in range(pieces):
- x = othernum[i+2] if use_random_x else i+1
- fragments.append( [x, poly(x)] )
-
- secret,a = None,None
- fragments = [ [int_to_binary(p, nbytes, BIGENDIAN) for p in frag] for frag in fragments]
- return fragments
-
-
-################################################################################
-def ReconstructSecret(fragments, needed, nbytes):
-
- ff = FiniteField(nbytes)
- pairs = fragments[:needed]
- m = []
- v = []
- for x,y in pairs:
- x = binary_to_int(x, BIGENDIAN)
- y = binary_to_int(y, BIGENDIAN)
- m.append([])
- for i,e in enumerate(range(needed-1,-1,-1)):
- m[-1].append( ff.power(x,e) )
- v.append(y)
-
- minv = ff.mtrxinv(m)
- outvect = ff.mtrxmultvect(minv,v)
- return int_to_binary(outvect[0], nbytes, BIGENDIAN)
-
-
-################################################################################
-def createTestingSubsets( fragIndices, M, maxTestCount=20):
- """
- Returns (IsRandomized, listOfTuplesOfSizeM)
- """
- numIdx = len(fragIndices)
-
- if M>numIdx:
- LOGERROR('Insufficent number of fragments')
- raise KeyDataError
- elif M==numIdx:
- LOGINFO('Fragments supplied == needed. One subset to test (%s-of-N)' % M)
- return ( False, [tuple(fragIndices)] )
- else:
- LOGINFO('Test reconstruct %s-of-N, with %s fragments' % (M, numIdx))
- subs = []
-
- # Compute the number of possible subsets. This is stable because we
- # shouldn't ever have more than 12 fragments
- fact = math.factorial
- numCombo = fact(numIdx) / ( fact(M) * fact(numIdx-M) )
-
- if numCombo <= maxTestCount:
- LOGINFO('Testing all %s combinations...' % numCombo)
- for x in xrange(2**numIdx):
- bits = int_to_bitset(x)
- if not bits.count('1') == M:
- continue
-
- subs.append(tuple([fragIndices[i] for i,b in enumerate(bits) if b=='1']))
-
- return (False, sorted(subs))
- else:
- LOGINFO('#Subsets > %s, will need to randomize' % maxTestCount)
- usedSubsets = set()
- while len(subs) < maxTestCount:
- sample = tuple(sorted(random.sample(fragIndices, M)))
- if not sample in usedSubsets:
- usedSubsets.add(sample)
- subs.append(sample)
-
- return (True, sorted(subs))
-
-
-################################################################################
-def testReconstructSecrets(fragMap, M, maxTestCount=20):
- # If fragMap has X elements, then it will test all X-choose-M subsets of
- # the fragMap and return the restored secret for each one. If there's more
- # subsets than maxTestCount, then just do a random sampling of the possible
- # subsets
- fragKeys = [k for k in fragMap.iterkeys()]
- isRandom, subs = createTestingSubsets(fragKeys, M, maxTestCount)
- nBytes = len(fragMap[fragKeys[0]][1])
- LOGINFO('Testing %d-byte fragments' % nBytes)
-
- testResults = []
- for subset in subs:
- fragSubset = [fragMap[i][:] for i in subset]
-
- recon = ReconstructSecret(fragSubset, M, nBytes)
- testResults.append((subset, recon))
-
- return isRandom, testResults
-
-
-
-
-
-################################################################################
-def ComputeFragIDBase58(M, wltIDBin):
- mBin4 = int_to_binary(M, widthBytes=4, endOut=BIGENDIAN)
- fragBin = hash256(wltIDBin + mBin4)[:4]
- fragB58 = str(M) + binary_to_base58(fragBin)
- return fragB58
-
-################################################################################
-def ComputeFragIDLineHex(M, index, wltIDBin, isSecure=False, addSpaces=False):
- fragID = int_to_hex((128+M) if isSecure else M)
- fragID += int_to_hex(index+1)
- fragID += binary_to_hex(wltIDBin)
-
- if addSpaces:
- fragID = ' '.join([fragID[i*4:(i+1)*4] for i in range(4)])
-
- return fragID
-
-
-################################################################################
-def ReadFragIDLineBin(binLine):
- doMask = binary_to_int(binLine[0]) > 127
- M = binary_to_int(binLine[0]) & 0x7f
- fnum = binary_to_int(binLine[1])
- wltID = binLine[2:]
-
- idBase58 = ComputeFragIDBase58(M, wltID) + '-#' + str(fnum)
- return (M, fnum, wltID, doMask, idBase58)
-
-
-################################################################################
-def ReadFragIDLineHex(hexLine):
- return ReadFragIDLineBin( hex_to_binary(hexLine.strip().replace(' ','')))
-
-
-# END FINITE FIELD OPERATIONS
-################################################################################
-################################################################################
-
-
-# We can identify an address string by its first byte upon conversion
-# back to binary. Return -1 if checksum doesn't match
-def checkAddrType(addrBin):
- """ Gets the network byte of the address. Returns -1 if chksum fails """
- first21, chk4 = addrBin[:-4], addrBin[-4:]
- chkBytes = hash256(first21)
- if chkBytes[:4] == chk4:
- return addrBin[0]
- else:
- return -1
-
-# Check validity of a BTC address in its binary form, as would
-# be found inside a pkScript. Usually about 24 bytes
-def checkAddrBinValid(addrBin, netbyte=ADDRBYTE):
- """
- Checks whether this address is valid for the given network
- (set at the top of pybtcengine.py)
- """
- return checkAddrType(addrBin) == netbyte
-
-# Check validity of a BTC address in Base58 form
-def checkAddrStrValid(addrStr):
- """ Check that a Base58 address-string is valid on this network """
- return checkAddrBinValid(base58_to_binary(addrStr))
-
-
-def convertKeyDataToAddress(privKey=None, pubKey=None):
- if not privKey and not pubKey:
- raise BadAddressError, 'No key data supplied for conversion'
- elif privKey:
- if isinstance(privKey, str):
- privKey = SecureBinaryData(privKey)
-
- if not privKey.getSize()==32:
- raise BadAddressError, 'Invalid private key format!'
- else:
- pubKey = CryptoECDSA().ComputePublicKey(privKey)
-
- if isinstance(pubKey,str):
- pubKey = SecureBinaryData(pubKey)
- return pubKey.getHash160()
-
-
-
-################################################################################
-def decodeMiniPrivateKey(keyStr):
- """
- Converts a 22, 26 or 30-character Base58 mini private key into a
- 32-byte binary private key.
- """
- if not len(keyStr) in (22,26,30):
- return ''
-
- keyQ = keyStr + '?'
- theHash = sha256(keyQ)
-
- if binary_to_hex(theHash[0]) == '01':
- raise KeyDataError, 'PBKDF2-based mini private keys not supported!'
- elif binary_to_hex(theHash[0]) != '00':
- raise KeyDataError, 'Invalid mini private key... double check the entry'
-
- return sha256(keyStr)
-
-
-################################################################################
-def parsePrivateKeyData(theStr):
- hexChars = '01234567890abcdef'
- b58Chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
-
- hexCount = sum([1 if c in hexChars else 0 for c in theStr.lower()])
- b58Count = sum([1 if c in b58Chars else 0 for c in theStr])
- canBeHex = hexCount==len(theStr)
- canBeB58 = b58Count==len(theStr)
-
- binEntry = ''
- keyType = ''
- isMini = False
- if canBeB58 and not canBeHex:
- if len(theStr) in (22, 30):
- # Mini-private key format!
- try:
- binEntry = decodeMiniPrivateKey(theStr)
- except KeyDataError:
- raise BadAddressError, 'Invalid mini-private key string'
- keyType = 'Mini Private Key Format'
- isMini = True
- elif len(theStr) in range(48,53):
- binEntry = base58_to_binary(theStr)
- keyType = 'Plain Base58'
- else:
- raise BadAddressError, 'Unrecognized key data'
- elif canBeHex:
- binEntry = hex_to_binary(theStr)
- keyType = 'Plain Hex'
- else:
- raise BadAddressError, 'Unrecognized key data'
-
-
- if len(binEntry)==36 or (len(binEntry)==37 and binEntry[0]==PRIVKEYBYTE):
- if len(binEntry)==36:
- keydata = binEntry[:32 ]
- chk = binEntry[ 32:]
- binEntry = verifyChecksum(keydata, chk)
- if not isMini:
- keyType = 'Raw %s with checksum' % keyType.split(' ')[1]
- else:
- # Assume leading 0x80 byte, and 4 byte checksum
- keydata = binEntry[ :1+32 ]
- chk = binEntry[ 1+32:]
- binEntry = verifyChecksum(keydata, chk)
- binEntry = binEntry[1:]
- if not isMini:
- keyType = 'Standard %s key with checksum' % keyType.split(' ')[1]
-
- if binEntry=='':
- raise InvalidHashError, 'Private Key checksum failed!'
- elif (len(binEntry)==33 and binEntry[-1]=='\x01') or \
- (len(binEntry)==37 and binEntry[-5]=='\x01'):
- raise CompressedKeyError, 'Compressed Public keys not supported!'
- return binEntry, keyType
-
-
-
-################################################################################
-def encodePrivKeyBase58(privKeyBin, leadByte=PRIVKEYBYTE):
- bin33 = leadByte + privKeyBin
- chk = computeChecksum(bin33)
- return binary_to_base58(bin33 + chk)
-
-
-
-URI_VERSION_STR = '1.0'
-
-################################################################################
-def parseBitcoinURI(theStr):
- """ Takes a URI string, returns the pieces of it, in a dictionary """
-
- # Start by splitting it into pieces on any separator
- seplist = ':;?&'
- for c in seplist:
- theStr = theStr.replace(c,' ')
- parts = theStr.split()
-
- # Now start walking through the parts and get the info out of it
- if not parts[0] == 'bitcoin':
- return {}
-
- uriData = {}
-
- try:
- uriData['address'] = parts[1]
- for p in parts[2:]:
- if not '=' in p:
- raise BadURIError, 'Unrecognized URI field: "%s"'%p
-
- # All fields must be "key=value" making it pretty easy to parse
- key, value = p.split('=')
-
- # A few
- if key.lower()=='amount':
- uriData['amount'] = str2coin(value)
- elif key.lower() in ('label','message'):
- uriData[key] = uriPercentToReserved(value)
- else:
- uriData[key] = value
- except:
- return {}
-
- return uriData
-
-
-################################################################################
-def uriReservedToPercent(theStr):
- """
- Convert from a regular string to a percent-encoded string
- """
- #Must replace '%' first, to avoid recursive (and incorrect) replacement!
- reserved = "%!*'();:@&=+$,/?#[] "
-
- for c in reserved:
- theStr = theStr.replace(c, '%%%s' % int_to_hex(ord(c)))
- return theStr
-
-
-################################################################################
-def uriPercentToReserved(theStr):
- """
- This replacement direction is much easier!
- Convert from a percent-encoded string to a
- """
-
- parts = theStr.split('%')
- if len(parts)>1:
- for p in parts[1:]:
- parts[0] += chr( hex_to_int(p[:2]) ) + p[2:]
- return parts[0][:]
-
-
-################################################################################
-def createBitcoinURI(addr, amt=None, msg=None):
- uriStr = 'bitcoin:%s' % addr
- if amt or msg:
- uriStr += '?'
-
- if amt:
- uriStr += 'amount=%s' % coin2str(amt, maxZeros=0).strip()
-
- if amt and msg:
- uriStr += '&'
-
- if msg:
- uriStr += 'label=%s' % uriReservedToPercent(msg)
-
- return uriStr
-
-################################################################################
-def createSigScript(rBin, sBin):
- # Remove all leading zero-bytes
- while rBin[0]=='\x00':
- rBin = rBin[1:]
- while sBin[0]=='\x00':
- sBin = sBin[1:]
-
- if binary_to_int(rBin[0])&128>0: rBin = '\x00'+rBin
- if binary_to_int(sBin[0])&128>0: sBin = '\x00'+sBin
- rSize = int_to_binary(len(rBin))
- sSize = int_to_binary(len(sBin))
- rsSize = int_to_binary(len(rBin) + len(sBin) + 4)
- sigScript = '\x30' + rsSize + \
- '\x02' + rSize + rBin + \
- '\x02' + sSize + sBin
- return sigScript
-
-################################################################################
-class PyBtcAddress(object):
- """
- PyBtcAddress --
-
- This class encapsulated EVERY kind of address object:
- -- Plaintext private-key-bearing addresses
- -- Encrypted private key addresses, with AES locking and unlocking
- -- Watching-only public-key addresses
- -- Address-only storage, representing someone else's key
- -- Deterministic address generation from previous addresses
- -- Serialization and unserialization of key data under all conditions
- -- Checksums on all serialized fields to protect against HDD byte errors
-
- For deterministic wallets, new addresses will be created from a chaincode
- and the previous address. What is implemented here is a special kind of
- deterministic calculation that actually allows the user to securely
- generate new addresses even if they don't have the private key. This
- method uses Diffie-Hellman shared-secret calculations to produce the new
- keys, and has the same level of security as all other ECDSA operations.
- There's a lot of fantastic benefits to doing this:
-
- (1) If all addresses in wallet are chained, then you only need to backup
- your wallet ONCE -- when you first create it. Print it out, put it
- in a safety-deposit box, or tattoo the generator key to the inside
- of your eyelid: it will never change.
-
- (2) You can keep your private keys on an offline machine, and keep a
- watching-only wallet online. You will be able to generate new
- keys/addresses, and verify incoming transactions, without ever
- requiring your private key to touch the internet.
-
- (3) If your friend has the chaincode and your first public key, they
- too can generate new addresses for you -- allowing them to send
- you money multiple times, with different addresses, without ever
- needing to specifically request the addresses.
- (the downside to this is if the chaincode is compromised, all
- chained addresses become de-anonymized -- but is only a loss of
- privacy, not security)
-
- However, we do require some fairly complicated logic, due to the fact
- that a user with a full, private-key-bearing wallet, may try to generate
- a new key/address without supplying a passphrase. If this happens, the
- wallet logic gets very complicated -- we don't want to reject the request
- to generate a new address, but we can't compute the private key until the
- next time the user unlocks their wallet. Thus, we have to save off the
- data they will need to create the key, to be applied on next unlock.
- """
- #############################################################################
- def __init__(self):
- """
- We use SecureBinaryData objects to store pub, priv and IV objects,
- because that is what is required by the C++ code. See EncryptionUtils.h
- to see that available methods.
- """
- self.addrStr20 = ''
- self.binPublicKey65 = SecureBinaryData() # 0x04 X(BE) Y(BE)
- self.binPrivKey32_Encr = SecureBinaryData() # BIG-ENDIAN
- self.binPrivKey32_Plain = SecureBinaryData()
- self.binInitVect16 = SecureBinaryData()
- self.isLocked = False
- self.useEncryption = False
- self.isInitialized = False
- self.keyChanged = False # ...since last key encryption
- self.walletByteLoc = -1
- self.chaincode = SecureBinaryData()
- self.chainIndex = 0
-
- # Information to be used by C++ to know where to search for transactions
- # in the blockchain (disabled in favor of a better search method)
- self.timeRange = [2**32-1, 0]
- self.blkRange = [2**32-1, 0]
-
- # This feels like a hack, but it's the only way I can think to handle
- # the case of generating new, chained addresses, even without the
- # private key currently in memory. i.e. - If we can't unlock the priv
- # key when creating a new chained priv key, we will simply extend the
- # public key, and store the last-known chain info, so that it can be
- # generated the next time the address is unlocked
- self.createPrivKeyNextUnlock = False
- self.createPrivKeyNextUnlock_IVandKey = [None, None] # (IV,Key)
- self.createPrivKeyNextUnlock_ChainDepth = -1
-
- #############################################################################
- def isInitialized(self):
- """ Keep track of whether this address has been initialized """
- return self.isInitialized
-
- #############################################################################
- def hasPrivKey(self):
- """
- We have a private key if either the plaintext, or ciphertext private-key
- fields are non-empty. We also consider ourselves to "have" the private
- key if this address was chained from a key that has the private key, even
- if we haven't computed it yet (due to not having unlocked the private key
- before creating the new address).
- """
- return (self.binPrivKey32_Encr.getSize() != 0 or \
- self.binPrivKey32_Plain.getSize() != 0 or \
- self.createPrivKeyNextUnlock)
-
- #############################################################################
- def hasPubKey(self):
- return (self.binPublicKey65.getSize() != 0)
-
- #############################################################################
- def getAddrStr(self, netbyte=ADDRBYTE):
- chksum = hash256(netbyte + self.addrStr20)[:4]
- return binary_to_base58(netbyte + self.addrStr20 + chksum)
-
- #############################################################################
- def getAddr160(self):
- if len(self.addrStr20)!=20:
- raise KeyDataError, 'PyBtcAddress does not have an address string!'
- return self.addrStr20
-
-
- #############################################################################
- def isCompressed(self):
- # Armory wallets (v1.35) do not support compressed keys
- return False
-
-
- #############################################################################
- def touch(self, unixTime=None, blkNum=None):
- """
- Just like "touching" a file, this makes sure that the firstSeen and
- lastSeen fields for this address are updated to include "now"
-
- If we include only a block number, we will fill in the timestamp with
- the unix-time for that block (if the BlockDataManager is availabled)
- """
- if self.blkRange[0]==0:
- self.blkRange[0]=2**32-1
- if self.timeRange[0]==0:
- self.timeRange[0]=2**32-1
-
- if blkNum==None:
- if TheBDM.getBDMState()=='BlockchainReady':
- topBlk = TheBDM.getTopBlockHeight()
- self.blkRange[0] = long(min(self.blkRange[0], topBlk))
- self.blkRange[1] = long(max(self.blkRange[1], topBlk))
- else:
- self.blkRange[0] = long(min(self.blkRange[0], blkNum))
- self.blkRange[1] = long(max(self.blkRange[1], blkNum))
-
- if unixTime==None and TheBDM.getBDMState()=='BlockchainReady':
- unixTime = TheBDM.getHeaderByHeight(blkNum).getTimestamp()
-
- if unixTime==None:
- unixTime = RightNow()
-
- self.timeRange[0] = long(min(self.timeRange[0], unixTime))
- self.timeRange[1] = long(max(self.timeRange[1], unixTime))
-
-
-
- #############################################################################
- def copy(self):
- newAddr = PyBtcAddress().unserialize(self.serialize())
- newAddr.binPrivKey32_Plain = self.binPrivKey32_Plain.copy()
- newAddr.binPrivKey32_Encr = self.binPrivKey32_Encr.copy()
- newAddr.binPublicKey65 = self.binPublicKey65.copy()
- newAddr.binInitVect16 = self.binInitVect16.copy()
- newAddr.isLocked = self.isLocked
- newAddr.useEncryption = self.useEncryption
- newAddr.isInitialized = self.isInitialized
- newAddr.keyChanged = self.keyChanged
- newAddr.walletByteLoc = self.walletByteLoc
- newAddr.chaincode = self.chaincode
- newAddr.chainIndex = self.chainIndex
- return newAddr
-
-
-
- #############################################################################
- def getTimeRange(self):
- return self.timeRange
-
- #############################################################################
- def getBlockRange(self):
- return self.blkRange
-
- #############################################################################
- def serializePublicKey(self):
- """Converts the SecureBinaryData public key to a 65-byte python string"""
- return self.binPublicKey65.toBinStr()
-
- #############################################################################
- def serializeEncryptedPrivateKey(self):
- """Converts SecureBinaryData encrypted private key to python string"""
- return self.binPrivKey32_Encr.toBinStr()
-
- #############################################################################
- # NOTE: This method should rarely be used, unless we are only printing it
- # to the screen. Actually, it will be used for unencrypted wallets
- def serializePlainPrivateKey(self):
- return self.binPrivKey32_Plain.toBinStr()
-
- def serializeInitVector(self):
- return self.binInitVect16.toBinStr()
-
-
- #############################################################################
- def verifyEncryptionKey(self, secureKdfOutput):
- """
- Determine if this data is the decryption key for this encrypted address
- """
- if not self.useEncryption or not self.hasPrivKey():
- return False
-
- if self.useEncryption and not secureKdfOutput:
- LOGERROR('No encryption key supplied to verifyEncryption!')
- return False
-
-
- decryptedKey = CryptoAES().DecryptCFB( self.binPrivKey32_Encr, \
- SecureBinaryData(secureKdfOutput), \
- self.binInitVect16)
- verified = False
-
- if not self.isLocked:
- if decryptedKey==self.binPrivKey32_Plain:
- verified = True
- else:
- computedPubKey = CryptoECDSA().ComputePublicKey(decryptedKey)
- if self.hasPubKey():
- verified = (self.binPublicKey65==computedPubKey)
- else:
- self.binPublicKey65 = computedPubKey
- verified = (computedPubKey.getHash160()==self.addrStr20)
-
- decryptedKey.destroy()
- return verified
-
-
-
- #############################################################################
- def setInitializationVector(self, IV16=None, random=False, force=False):
- """
- Either set the IV through input arg, or explicitly call random=True
- Returns the IV -- which is especially important if it is randomly gen
-
- This method is mainly for PREVENTING you from changing an existing IV
- without meaning to. Losing the IV for encrypted data is almost as bad
- as losing the encryption key. Caller must use force=True in order to
- override this warning -- otherwise this method will abort.
- """
- if self.binInitVect16.getSize()==16:
- if self.isLocked:
- LOGERROR('Address already locked with different IV.')
- LOGERROR('Changing IV may cause loss of keydata.')
- else:
- LOGERROR('Address already contains an initialization')
- LOGERROR('vector. If you change IV without updating')
- LOGERROR('the encrypted storage, you may permanently')
- LOGERROR('lose the encrypted data')
-
- if not force:
- LOGERROR('If you really want to do this, re-execute this call with force=True')
- return ''
-
- if IV16:
- self.binInitVect16 = SecureBinaryData(IV16)
- elif random==True:
- self.binInitVect16 = SecureBinaryData().GenerateRandom(16)
- else:
- raise KeyDataError, 'setInitVector: set IV data, or random=True'
- return self.binInitVect16
-
- #############################################################################
- def enableKeyEncryption(self, IV16=None, generateIVIfNecessary=False):
- """
- setIV method will raise error is we don't specify any args, but it is
- acceptable HERE to not specify any args just to enable encryption
- """
- self.useEncryption = True
- if IV16:
- self.setInitializationVector(IV16)
- elif generateIVIfNecessary and self.binInitVect16.getSize()<16:
- self.setInitializationVector(random=True)
-
-
- #############################################################################
- def isKeyEncryptionEnabled(self):
- return self.useEncryption
-
-
- #############################################################################
- def createFromEncryptedKeyData(self, addr20, encrPrivKey32, IV16, \
- chkSum=None, pubKey=None):
- # We expect both private key and IV to the right size
- assert(encrPrivKey32.getSize()==32)
- assert(IV16.getSize()==16)
- self.__init__()
- self.addrStr20 = addr20
- self.binPrivKey32_Encr = SecureBinaryData(encrPrivKey32)
- self.setInitializationVector(IV16)
- self.isLocked = True
- self.useEncryption = True
- self.isInitialized = True
- if chkSum and not self.binPrivKey32_Encr.getHash256().startswith(chkSum):
- raise ChecksumError, "Checksum doesn't match encrypted priv key data!"
- if pubKey:
- self.binPublicKey65 = SecureBinaryData(pubKey)
- if not self.binPublicKey65.getHash160()==self.addrStr20:
- raise KeyDataError, "Public key does not match supplied address"
-
- return self
-
-
- #############################################################################
- def createFromPlainKeyData(self, plainPrivKey, addr160=None, willBeEncr=False, \
- generateIVIfNecessary=False, IV16=None, \
- chksum=None, publicKey65=None, \
- skipCheck=False, skipPubCompute=False):
-
- assert(plainPrivKey.getSize()==32)
-
- if not addr160:
- addr160 = convertKeyDataToAddress(privKey=plainPrivKey)
-
- self.__init__()
- self.addrStr20 = addr160
- self.isInitialized = True
- self.binPrivKey32_Plain = SecureBinaryData(plainPrivKey)
- self.isLocked = False
-
- if willBeEncr:
- self.enableKeyEncryption(IV16, generateIVIfNecessary)
- elif IV16:
- self.binInitVect16 = IV16
-
- if chksum and not verifyChecksum(self.binPrivKey32_Plain.toBinStr(), chksum):
- raise ChecksumError, "Checksum doesn't match plaintext priv key!"
- if publicKey65:
- self.binPublicKey65 = SecureBinaryData(publicKey65)
- if not self.binPublicKey65.getHash160()==self.addrStr20:
- raise KeyDataError, "Public key does not match supplied address"
- if not skipCheck:
- if not CryptoECDSA().CheckPubPrivKeyMatch(self.binPrivKey32_Plain,\
- self.binPublicKey65):
- raise KeyDataError, 'Supplied pub and priv key do not match!'
- elif not skipPubCompute:
- # No public key supplied, but we do want to calculate it
- self.binPublicKey65 = CryptoECDSA().ComputePublicKey(plainPrivKey)
-
- return self
-
- #############################################################################
- def createFromPublicKeyData(self, publicKey65, chksum=None):
-
- assert(publicKey65.getSize()==65)
- self.__init__()
- self.addrStr20 = publicKey65.getHash160()
- self.binPublicKey65 = publicKey65
- self.isInitialized = True
- self.isLocked = False
- self.useEncryption = False
-
- if chksum and not verifyChecksum(self.binPublicKey65.toBinStr(), chksum):
- raise ChecksumError, "Checksum doesn't match supplied public key!"
-
- return self
-
-
- #############################################################################
- def lock(self, secureKdfOutput=None, generateIVIfNecessary=False):
- # We don't want to destroy the private key if it's not supposed to be
- # encrypted. Similarly, if we haven't actually saved the encrypted
- # version, let's not lock it
- newIV = False
- if not self.useEncryption or not self.hasPrivKey():
- # This isn't supposed to be encrypted, or there's no privkey to encrypt
- return
- else:
- if self.binPrivKey32_Encr.getSize()==32 and not self.keyChanged:
- # Addr should be encrypted, and we already have encrypted priv key
- self.binPrivKey32_Plain.destroy()
- self.isLocked = True
- else:
- # Addr should be encrypted, but haven't computed encrypted value yet
- if secureKdfOutput!=None:
- # We have an encryption key, use it
- if self.binInitVect16.getSize() < 16:
- if not generateIVIfNecessary:
- raise KeyDataError, 'No Initialization Vector available'
- else:
- self.binInitVect16 = SecureBinaryData().GenerateRandom(16)
- newIV = True
-
- # Finally execute the encryption
- self.binPrivKey32_Encr = CryptoAES().EncryptCFB( \
- self.binPrivKey32_Plain, \
- SecureBinaryData(secureKdfOutput), \
- self.binInitVect16)
- # Destroy the unencrypted key, reset the keyChanged flag
- self.binPrivKey32_Plain.destroy()
- self.isLocked = True
- self.keyChanged = False
- else:
- # Can't encrypt the addr because we don't have encryption key
- raise WalletLockError, ("\n\tTrying to destroy plaintext key, but no"
- "\n\tencrypted key data is available, and no"
- "\n\tencryption key provided to encrypt it.")
-
-
- # In case we changed the IV, we should let the caller know this
- return self.binInitVect16 if newIV else SecureBinaryData()
-
-
- #############################################################################
- def unlock(self, secureKdfOutput, skipCheck=False):
- """
- This method knows nothing about a key-derivation function. It simply
- takes in an AES key and applies it to decrypt the data. However, it's
- best if that AES key is actually derived from "heavy" key-derivation
- function.
- """
- if not self.useEncryption or not self.isLocked:
- # Bail out if the wallet is unencrypted, or already unlocked
- self.isLocked = False
- return
-
-
- if self.createPrivKeyNextUnlock:
- # This is SPECIFICALLY for the case that we didn't have the encr key
- # available when we tried to extend our deterministic wallet, and
- # generated a new address anyway
- self.binPrivKey32_Plain = CryptoAES().DecryptCFB( \
- self.createPrivKeyNextUnlock_IVandKey[1], \
- SecureBinaryData(secureKdfOutput), \
- self.createPrivKeyNextUnlock_IVandKey[0])
-
- for i in range(self.createPrivKeyNextUnlock_ChainDepth):
- self.binPrivKey32_Plain = CryptoECDSA().ComputeChainedPrivateKey( \
- self.binPrivKey32_Plain, \
- self.chaincode)
-
-
- # IV should have already been randomly generated, before
- self.isLocked = False
- self.createPrivKeyNextUnlock = False
- self.createPrivKeyNextUnlock_IVandKey = []
- self.createPrivKeyNextUnlock_ChainDepth = 0
-
- # Lock/Unlock to make sure encrypted private key is filled
- self.lock(secureKdfOutput,generateIVIfNecessary=True)
- self.unlock(secureKdfOutput)
-
- else:
-
- if not self.binPrivKey32_Encr.getSize()==32:
- raise WalletLockError, 'No encrypted private key to decrypt!'
-
- if not self.binInitVect16.getSize()==16:
- raise WalletLockError, 'Initialization Vect (IV) is missing!'
-
- self.binPrivKey32_Plain = CryptoAES().DecryptCFB( \
- self.binPrivKey32_Encr, \
- secureKdfOutput, \
- self.binInitVect16)
-
- self.isLocked = False
-
- if not skipCheck:
- if not self.hasPubKey():
- self.binPublicKey65 = CryptoECDSA().ComputePublicKey(\
- self.binPrivKey32_Plain)
- else:
- # We should usually check that keys match, but may choose to skip
- # if we have a lot of keys to load
- # NOTE: I run into this error if I fill the keypool without first
- # unlocking the wallet. I'm not sure why it doesn't work
- # when locked (it should), but this wallet format has been
- # working flawless for almost a year... and will be replaced
- # soon, so I won't sweat it.
- if not CryptoECDSA().CheckPubPrivKeyMatch(self.binPrivKey32_Plain, \
- self.binPublicKey65):
- raise KeyDataError, "Stored public key does not match priv key!"
-
-
-
- #############################################################################
- def changeEncryptionKey(self, secureOldKey, secureNewKey):
- """
- We will use None to specify "no encryption", either for old or new. Of
- course we throw an error is old key is "None" but the address is actually
- encrypted.
- """
- if not self.hasPrivKey():
- raise KeyDataError, 'No private key available to re-encrypt'
-
- if not secureOldKey and self.useEncryption and self.isLocked:
- raise WalletLockError, 'Need old encryption key to unlock private keys'
-
- wasLocked = self.isLocked
-
- # Decrypt the original key
- if self.isLocked:
- self.unlock(secureOldKey, skipCheck=False)
-
- # Keep the old IV if we are changing the key. IV reuse is perfectly
- # fine for a new key, and might save us from disaster if we otherwise
- # generated a new one and then forgot to take note of it.
- self.keyChanged = True
- if not secureNewKey:
- # If we chose not to re-encrypt, make sure we clear the encryption
- self.binInitVect16 = SecureBinaryData()
- self.binPrivKey32_Encr = SecureBinaryData()
- self.isLocked = False
- self.useEncryption = False
- else:
- # Re-encrypt with new key (using same IV)
- self.useEncryption = True
- self.lock(secureNewKey) # do this to make sure privKey_Encr filled
- if wasLocked:
- self.isLocked = True
- else:
- self.unlock(secureNewKey)
- self.isLocked = False
-
-
-
-
- #############################################################################
- # This is more of a static method
- def checkPubPrivKeyMatch(self, securePriv, securePub):
- CryptoECDSA().CheckPubPrivKeyMatch(securePriv, securePub)
-
- #############################################################################
- def generateDERSignature(self, binMsg, secureKdfOutput=None):
- """
- This generates a DER signature for this address using the private key.
- Obviously, if we don't have the private key, we throw an error. Or if
- the wallet is locked and no encryption key was provided.
-
- If an encryption key IS provided, then we unlock the address just long
- enough to sign the message and then re-lock it
- """
-
- TimerStart('generateDERSignature')
-
- if not self.hasPrivKey():
- raise KeyDataError, 'Cannot sign for address without private key!'
-
- if self.isLocked:
- if secureKdfOutput==None:
- raise WalletLockError, "Cannot sign Tx when private key is locked!"
- else:
- # Wallet is locked but we have a decryption key
- self.unlock(secureKdfOutput, skipCheck=False)
-
- try:
- secureMsg = SecureBinaryData(binMsg)
- sig = CryptoECDSA().SignData(secureMsg, self.binPrivKey32_Plain)
- sigstr = sig.toBinStr()
- # We add an extra 0 byte to the beginning of each value to guarantee
- # that they are interpretted as unsigned integers. Not always necessary
- # but it doesn't hurt to always do it.
- rBin = sigstr[:32 ]
- sBin = sigstr[ 32:]
- return createSigScript(rBin, sBin)
- except:
- LOGERROR('Failed signature generation')
- finally:
- # Always re-lock/cleanup after unlocking, even after an exception.
- # If locking triggers an error too, we will just skip it.
- TimerStop('generateDERSignature')
- try:
- if secureKdfOutput!=None:
- self.lock(secureKdfOutput)
- except:
- LOGERROR('Error re-locking address')
- pass
-
-
-
-
- #############################################################################
- def verifyDERSignature(self, binMsgVerify, derSig):
-
- TimerStart('verifyDERSignature')
- if not self.hasPubKey():
- raise KeyDataError, 'No public key available for this address!'
-
- if not isinstance(derSig, str):
- # In case this is a SecureBinaryData object...
- derSig = derSig.toBinStr()
-
- codeByte = derSig[0]
- nBytes = binary_to_int(derSig[1])
- rsStr = derSig[2:2+nBytes]
- assert(codeByte == '\x30')
- assert(nBytes == len(rsStr))
- # Read r
- codeByte = rsStr[0]
- rBytes = binary_to_int(rsStr[1])
- r = rsStr[2:2+rBytes]
- assert(codeByte == '\x02')
- sStr = rsStr[2+rBytes:]
- # Read s
- codeByte = sStr[0]
- sBytes = binary_to_int(sStr[1])
- s = sStr[2:2+sBytes]
- assert(codeByte == '\x02')
- # Now we have the (r,s) values of the
-
- secMsg = SecureBinaryData(binMsgVerify)
- secSig = SecureBinaryData(r[-32:] + s[-32:])
- secPubKey = SecureBinaryData(self.binPublicKey65)
- TimerStop('verifyDERSignature')
- return CryptoECDSA().VerifyData(secMsg, secSig, secPubKey)
-
-
- #############################################################################
- def markAsRootAddr(self, chaincode):
- if not chaincode.getSize()==32:
- raise KeyDataError, 'Chaincode must be 32 bytes'
- else:
- self.chainIndex = -1
- self.chaincode = chaincode
-
-
- #############################################################################
- def isAddrChainRoot(self):
- return (self.chainIndex==-1)
-
- #############################################################################
- def extendAddressChain(self, secureKdfOutput=None, newIV=None):
- """
- We require some fairly complicated logic here, due to the fact that a
- user with a full, private-key-bearing wallet, may try to generate a new
- key/address without supplying a passphrase. If this happens, the wallet
- logic gets mucked up -- we don't want to reject the request to
- generate a new address, but we can't compute the private key until the
- next time the user unlocks their wallet. Thus, we have to save off the
- data they will need to create the key, to be applied on next unlock.
- """
- LOGDEBUG('Extending address chain')
- TimerStart('extendAddressChain')
- if not self.chaincode.getSize() == 32:
- raise KeyDataError, 'No chaincode has been defined to extend chain'
-
- newAddr = PyBtcAddress()
- privKeyAvailButNotDecryptable = (self.hasPrivKey() and \
- self.isLocked and \
- not secureKdfOutput )
-
-
- if self.hasPrivKey() and not privKeyAvailButNotDecryptable:
- # We are extending a chain using private key data
- wasLocked = self.isLocked
- if self.useEncryption and self.isLocked:
- if not secureKdfOutput:
- raise WalletLockError, 'Cannot create new address without passphrase'
- self.unlock(secureKdfOutput)
- if not newIV:
- newIV = SecureBinaryData().GenerateRandom(16)
-
- if self.hasPubKey():
- newPriv = CryptoECDSA().ComputeChainedPrivateKey( \
- self.binPrivKey32_Plain, \
- self.chaincode, \
- self.binPublicKey65)
- else:
- newPriv = CryptoECDSA().ComputeChainedPrivateKey( \
- self.binPrivKey32_Plain, \
- self.chaincode)
- newPub = CryptoECDSA().ComputePublicKey(newPriv)
- newAddr160 = newPub.getHash160()
- newAddr.createFromPlainKeyData(newPriv, newAddr160, \
- IV16=newIV, publicKey65=newPub)
-
- newAddr.addrStr20 = newPub.getHash160()
- newAddr.useEncryption = self.useEncryption
- newAddr.isInitialized = True
- newAddr.chaincode = self.chaincode
- newAddr.chainIndex = self.chainIndex+1
-
- # We can't get here without a secureKdfOutput (I think)
- if newAddr.useEncryption:
- newAddr.lock(secureKdfOutput)
- if not wasLocked:
- newAddr.unlock(secureKdfOutput)
- self.unlock(secureKdfOutput)
- TimerStop('extendAddressChain')
- return newAddr
- else:
- # We are extending the address based solely on its public key
- if not self.hasPubKey():
- raise KeyDataError, 'No public key available to extend chain'
- newAddr.binPublicKey65 = CryptoECDSA().ComputeChainedPublicKey( \
- self.binPublicKey65, self.chaincode)
- newAddr.addrStr20 = newAddr.binPublicKey65.getHash160()
- newAddr.useEncryption = self.useEncryption
- newAddr.isInitialized = True
- newAddr.chaincode = self.chaincode
- newAddr.chainIndex = self.chainIndex+1
-
-
- if privKeyAvailButNotDecryptable:
- # *** store what is needed to recover key on next addr unlock ***
- newAddr.isLocked = True
- newAddr.useEncryption = True
- if not newIV:
- newIV = SecureBinaryData().GenerateRandom(16)
- newAddr.binInitVect16 = newIV
- newAddr.createPrivKeyNextUnlock = True
- newAddr.createPrivKeyNextUnlock_IVandKey = [None,None]
- if self.createPrivKeyNextUnlock:
- # We are chaining from address also requiring gen on next unlock
- newAddr.createPrivKeyNextUnlock_IVandKey[0] = \
- self.createPrivKeyNextUnlock_IVandKey[0].copy()
- newAddr.createPrivKeyNextUnlock_IVandKey[1] = \
- self.createPrivKeyNextUnlock_IVandKey[1].copy()
- newAddr.createPrivKeyNextUnlock_ChainDepth = \
- self.createPrivKeyNextUnlock_ChainDepth+1
- else:
- # The address from which we are extending has already been generated
- newAddr.createPrivKeyNextUnlock_IVandKey[0] = self.binInitVect16.copy()
- newAddr.createPrivKeyNextUnlock_IVandKey[1] = self.binPrivKey32_Encr.copy()
- newAddr.createPrivKeyNextUnlock_ChainDepth = 1
- TimerStop('extendAddressChain')
- return newAddr
-
-
- def serialize(self):
- """
- We define here a binary serialization scheme that will write out ALL
- information needed to completely reconstruct address data from file.
- This method returns a string, but presumably will be used to write addr
- data to file. The following format is used.
-
- Address160 (20 bytes) : The 20-byte hash of the public key
- This must always be the first field
- AddressChk ( 4 bytes) : Checksum to make sure no error in addr160
- AddrVersion ( 4 bytes) : Early version don't specify encrypt params
- Flags ( 8 bytes) : Addr-specific info, including encrypt params
-
- ChainCode (32 bytes) : For extending deterministic wallets
- ChainChk ( 4 bytes) : Checksum for chaincode
- ChainIndex ( 8 bytes) : Index in chain if deterministic addresses
- ChainDepth ( 8 bytes) : How deep addr is in chain beyond last
- computed private key (if base address was
- locked when we tried to extend/chain it)
-
- InitVect (16 bytes) : Initialization vector for encryption
- InitVectChk ( 4 bytes) : Checksum for IV
- PrivKey (32 bytes) : Private key data (may be encrypted)
- PrivKeyChk ( 4 bytes) : Checksum for private key data
-
- PublicKey (65 bytes) : Public key for this address
- PubKeyChk ( 4 bytes) : Checksum for private key data
-
-
- FirstTime ( 8 bytes) : The first time addr was seen in blockchain
- LastTime ( 8 bytes) : The last time addr was seen in blockchain
- FirstBlock ( 4 bytes) : The first block addr was seen in blockchain
- LastBlock ( 4 bytes) : The last block addr was seen in blockchain
- """
-
- serializeWithEncryption = self.useEncryption
-
- if self.useEncryption and \
- self.binPrivKey32_Encr.getSize()==0 and \
- self.binPrivKey32_Plain.getSize()>0:
- LOGERROR('')
- LOGERROR('***WARNING: you have chosen to serialize a key you hope to be')
- LOGERROR(' encrypted, but have not yet chosen a passphrase for')
- LOGERROR(' it. The only way to serialize this address is with ')
- LOGERROR(' the plaintext keys. Please lock this address at')
- LOGERROR(' least once in order to enable encrypted output.')
- serializeWithEncryption = False
-
- # Before starting, let's construct the flags for this address
- nFlagBytes = 8
- flags = [False]*nFlagBytes*8
- flags[0] = self.hasPrivKey()
- flags[1] = self.hasPubKey()
- flags[2] = serializeWithEncryption
- flags[3] = self.createPrivKeyNextUnlock
- flags = ''.join([('1' if f else '0') for f in flags])
-
- def raw(a):
- if isinstance(a, str):
- return a
- else:
- return a.toBinStr()
-
- def chk(a):
- if isinstance(a, str):
- return computeChecksum(a,4)
- else:
- return computeChecksum(a.toBinStr(),4)
-
- # Use BinaryPacker "width" fields to guaranteee BINARY_CHUNK width.
- # Sure, if we have malformed data we might cut some of it off instead
- # of writing it to the binary stream. But at least we'll ALWAYS be
- # able to determine where each field is, and will never corrupt the
- # whole wallet so badly we have to go hex-diving to figure out what
- # happened.
- binOut = BinaryPacker()
- binOut.put(BINARY_CHUNK, self.addrStr20, width=20)
- binOut.put(BINARY_CHUNK, chk(self.addrStr20), width= 4)
- binOut.put(UINT32, getVersionInt(PYBTCWALLET_VERSION))
- binOut.put(UINT64, bitset_to_int(flags))
-
- # Write out address-chaining parameters (for deterministic wallets)
- binOut.put(BINARY_CHUNK, raw(self.chaincode), width=32)
- binOut.put(BINARY_CHUNK, chk(self.chaincode), width= 4)
- binOut.put(INT64, self.chainIndex)
- binOut.put(INT64, self.createPrivKeyNextUnlock_ChainDepth)
-
- # Write out whatever is appropriate for private-key data
- # Binary-unpacker will write all 0x00 bytes if empty values are given
- if serializeWithEncryption:
- if self.createPrivKeyNextUnlock:
- binOut.put(BINARY_CHUNK, raw(self.createPrivKeyNextUnlock_IVandKey[0]), width=16)
- binOut.put(BINARY_CHUNK, chk(self.createPrivKeyNextUnlock_IVandKey[0]), width= 4)
- binOut.put(BINARY_CHUNK, raw(self.createPrivKeyNextUnlock_IVandKey[1]), width=32)
- binOut.put(BINARY_CHUNK, chk(self.createPrivKeyNextUnlock_IVandKey[1]), width= 4)
- else:
- binOut.put(BINARY_CHUNK, raw(self.binInitVect16), width=16)
- binOut.put(BINARY_CHUNK, chk(self.binInitVect16), width= 4)
- binOut.put(BINARY_CHUNK, raw(self.binPrivKey32_Encr), width=32)
- binOut.put(BINARY_CHUNK, chk(self.binPrivKey32_Encr), width= 4)
- else:
- binOut.put(BINARY_CHUNK, raw(self.binInitVect16), width=16)
- binOut.put(BINARY_CHUNK, chk(self.binInitVect16), width= 4)
- binOut.put(BINARY_CHUNK, raw(self.binPrivKey32_Plain), width=32)
- binOut.put(BINARY_CHUNK, chk(self.binPrivKey32_Plain), width= 4)
-
- binOut.put(BINARY_CHUNK, raw(self.binPublicKey65), width=65)
- binOut.put(BINARY_CHUNK, chk(self.binPublicKey65), width= 4)
-
- binOut.put(UINT64, self.timeRange[0])
- binOut.put(UINT64, self.timeRange[1])
- binOut.put(UINT32, self.blkRange[0])
- binOut.put(UINT32, self.blkRange[1])
-
- return binOut.getBinaryString()
-
- #############################################################################
- def scanBlockchainForAddress(self, abortIfBDMBusy=False):
- """
- This method will return null output if the BDM is currently in the
- middle of a scan. You can use waitAsLongAsNecessary=True if you
- want to wait for the previous scan AND the next scan. Otherwise,
- you can check for bal==-1 and then try again later...
-
- This is particularly relevant if you know that an address has already
- been scanned, and you expect this method to return immediately. Thus,
- you don't want to wait for any scan at all...
-
- This one-stop-shop method has to be blocking. You might want to
- register the address and rescan asynchronously, skipping this method
- entirely:
-
- cppWlt = Cpp.BtcWallet()
- cppWlt.addScrAddress_1_(Hash160ToScrAddr(self.getAddr160()))
- TheBDM.registerScrAddr(Hash160ToScrAddr(self.getAddr160()))
- TheBDM.rescanBlockchain(wait=False)
-
- <... do some other stuff ...>
-
- if TheBDM.getBDMState()=='BlockchainReady':
- TheBDM.updateWalletsAfterScan(wait=True) # fast after a rescan
- bal = cppWlt.getBalance('Spendable')
- utxoList = cppWlt.getUnspentTxOutList()
- else:
- <...come back later...>
-
- """
- if TheBDM.getBDMState()=='BlockchainReady' or \
- (TheBDM.isScanning() and not abortIfBDMBusy):
- LOGDEBUG('Scanning blockchain for address')
-
- # We are expecting this method to return balance
- # and UTXO data, so we must make sure we're blocking.
- cppWlt = Cpp.BtcWallet()
- cppWlt.addScrAddress_1_(Hash160ToScrAddr(self.getAddr160()))
- TheBDM.registerWallet(cppWlt, wait=True)
- TheBDM.scanBlockchainForTx(cppWlt, wait=True)
-
- utxoList = cppWlt.getUnspentTxOutList()
- bal = cppWlt.getSpendableBalance()
- return (bal, utxoList)
- else:
- return (-1, [])
-
- #############################################################################
- def unserialize(self, toUnpack):
- """
- We reconstruct the address from a serialized version of it. See the help
- text for "serialize()" for information on what fields need to
- be included and the binary mapping
-
- We verify all checksums, correct for one byte errors, and raise exceptions
- for bigger problems that can't be fixed.
- """
- if isinstance(toUnpack, BinaryUnpacker):
- serializedData = toUnpack
- else:
- serializedData = BinaryUnpacker( toUnpack )
-
-
- def chkzero(a):
- """
- Due to fixed-width fields, we will get lots of zero-bytes
- even when the binary data container was empty
- """
- if a.count('\x00')==len(a):
- return ''
- else:
- return a
-
-
- # Start with a fresh new address
- self.__init__()
-
- self.addrStr20 = serializedData.get(BINARY_CHUNK, 20)
- chkAddr20 = serializedData.get(BINARY_CHUNK, 4)
-
- addrVerInt = serializedData.get(UINT32)
- flags = serializedData.get(UINT64)
- self.addrStr20 = verifyChecksum(self.addrStr20, chkAddr20)
- flags = int_to_bitset(flags, widthBytes=8)
-
- # Interpret the flags
- containsPrivKey = (flags[0]=='1')
- containsPubKey = (flags[1]=='1')
- self.useEncryption = (flags[2]=='1')
- self.createPrivKeyNextUnlock = (flags[3]=='1')
-
- addrChkError = False
- if len(self.addrStr20)==0:
- addrChkError = True
- if not containsPrivKey and not containsPubKey:
- raise UnserializeError, 'Checksum mismatch in addrStr'
-
-
-
- # Write out address-chaining parameters (for deterministic wallets)
- self.chaincode = chkzero(serializedData.get(BINARY_CHUNK, 32))
- chkChaincode = serializedData.get(BINARY_CHUNK, 4)
- self.chainIndex = serializedData.get(INT64)
- depth = serializedData.get(INT64)
- self.createPrivKeyNextUnlock_ChainDepth = depth
-
- # Correct errors, convert to secure container
- self.chaincode = SecureBinaryData(verifyChecksum(self.chaincode, chkChaincode))
-
-
- # Write out whatever is appropriate for private-key data
- # Binary-unpacker will write all 0x00 bytes if empty values are given
- iv = chkzero(serializedData.get(BINARY_CHUNK, 16))
- chkIv = serializedData.get(BINARY_CHUNK, 4)
- privKey = chkzero(serializedData.get(BINARY_CHUNK, 32))
- chkPriv = serializedData.get(BINARY_CHUNK, 4)
- iv = SecureBinaryData(verifyChecksum(iv, chkIv))
- privKey = SecureBinaryData(verifyChecksum(privKey, chkPriv))
-
- # If this is SUPPOSED to contain a private key...
- if containsPrivKey:
- if privKey.getSize()==0:
- raise UnserializeError, 'Checksum mismatch in PrivateKey '+\
- '('+hash160_to_addrStr(self.addrStr20)+')'
-
- if self.useEncryption:
- if iv.getSize()==0:
- raise UnserializeError, 'Checksum mismatch in IV ' +\
- '('+hash160_to_addrStr(self.addrStr20)+')'
- if self.createPrivKeyNextUnlock:
- self.createPrivKeyNextUnlock_IVandKey[0] = iv.copy()
- self.createPrivKeyNextUnlock_IVandKey[1] = privKey.copy()
- else:
- self.binInitVect16 = iv.copy()
- self.binPrivKey32_Encr = privKey.copy()
- else:
- self.binInitVect16 = iv.copy()
- self.binPrivKey32_Plain = privKey.copy()
-
- pubKey = chkzero(serializedData.get(BINARY_CHUNK, 65))
- chkPub = serializedData.get(BINARY_CHUNK, 4)
- pubKey = SecureBinaryData(verifyChecksum(pubKey, chkPub))
-
- if containsPubKey:
- if not pubKey.getSize()==65:
- if self.binPrivKey32_Plain.getSize()==32:
- pubKey = CryptoAES().ComputePublicKey(self.binPrivKey32_Plain)
- else:
- raise UnserializeError, 'Checksum mismatch in PublicKey ' +\
- '('+hash160_to_addrStr(self.addrStr20)+')'
-
- self.binPublicKey65 = pubKey
-
- if addrChkError:
- self.addrStr20 = self.binPublicKey65.getHash160()
-
- self.timeRange[0] = serializedData.get(UINT64)
- self.timeRange[1] = serializedData.get(UINT64)
- self.blkRange[0] = serializedData.get(UINT32)
- self.blkRange[1] = serializedData.get(UINT32)
-
- self.isInitialized = True
- return self
-
- #############################################################################
- # The following methods are the SIMPLE address operations that can be used
- # to juggle address data without worrying at all about encryption details.
- # The addresses created here can later be endowed with encryption.
- #############################################################################
- def createFromPrivateKey(self, privKey, pubKey=None, skipCheck=False):
- """
- Creates address from a user-supplied random INTEGER.
- This method DOES perform elliptic-curve operations
- """
- if isinstance(privKey, str) and len(privKey)==32:
- self.binPrivKey32_Plain = SecureBinaryData(privKey)
- elif isinstance(privKey, int) or isinstance(privKey, long):
- binPriv = int_to_binary(privKey, widthBytes=32, endOut=BIGENDIAN)
- self.binPrivKey32_Plain = SecureBinaryData(binPriv)
- else:
- raise KeyDataError, 'Unknown private key format'
-
- if pubKey==None:
- self.binPublicKey65 = CryptoECDSA().ComputePublicKey(self.binPrivKey32_Plain)
- else:
- self.binPublicKey65 = SecureBinaryData(pubKey)
-
- if not skipCheck:
- assert(CryptoECDSA().CheckPubPrivKeyMatch( \
- self.binPrivKey32_Plain, \
- self.binPublicKey65))
-
- self.addrStr20 = self.binPublicKey65.getHash160()
-
- self.isInitialized = True
- return self
-
- def createFromPublicKey(self, pubkey):
- """
- Creates address from a user-supplied ECDSA public key.
-
- The key can be supplied as an (x,y) pair of integers, an EC_Point
- as defined in the lisecdsa class, or as a 65-byte binary string
- (the 64 public key bytes with a 0x04 prefix byte)
-
- This method will fail if the supplied pair of points is not
- on the secp256k1 curve.
- """
- if isinstance(pubkey, tuple) and len(pubkey)==2:
- # We are given public-key (x,y) pair
- binXBE = int_to_binary(pubkey[0], widthBytes=32, endOut=BIGENDIAN)
- binYBE = int_to_binary(pubkey[1], widthBytes=32, endOut=BIGENDIAN)
- self.binPublicKey65 = SecureBinaryData('\x04' + binXBE + binYBE)
- if not CryptoECDSA().VerifyPublicKeyValid(self.binPublicKey65):
- raise KeyDataError, 'Supplied public key is not on secp256k1 curve'
- elif isinstance(pubkey, str) and len(pubkey)==65:
- self.binPublicKey65 = SecureBinaryData(pubkey)
- if not CryptoECDSA().VerifyPublicKeyValid(self.binPublicKey65):
- raise KeyDataError, 'Supplied public key is not on secp256k1 curve'
- else:
- raise KeyDataError, 'Unknown public key format!'
-
- # TODO: I should do a test to see which is faster:
- # 1) Compute the hash directly like this
- # 2) Get the string, hash it in python
- self.addrStr20 = self.binPublicKey65.getHash160()
- self.isInitialized = True
- return self
-
-
- def createFromPublicKeyHash160(self, pubkeyHash160, netbyte=ADDRBYTE):
- """
- Creates an address from just the 20-byte binary hash of a public key.
-
- In binary form without a chksum, there is no protection against byte
- errors, since there's no way to distinguish an invalid address from
- a valid one (they both look like random data).
-
- If you are creating an address using 20 bytes you obtained in an
- unreliable manner (such as manually typing them in), you should
- double-check the input before sending money using the address created
- here -- the tx will appear valid and be accepted by the network,
- but will be permanently tied up in the network
- """
- self.__init__()
- self.addrStr20 = pubkeyHash160
- self.isInitialized = True
- return self
-
- def createFromAddrStr(self, addrStr):
- """
- Creates an address from a Base58 address string. Since the address
- string includes a checksum, this method will fail if there was any
- errors entering/copying the address
- """
- self.__init__()
- self.addrStr = addrStr
- if not self.checkAddressValid():
- raise BadAddressError, 'Invalid address string: '+addrStr
- self.isInitialized = True
- return self
-
- def calculateAddrStr(self, netbyte=ADDRBYTE):
- """
- Forces a recalculation of the address string from the public key
- """
- if not self.hasPubKey():
- raise KeyDataError, 'Cannot compute address without PublicKey'
- keyHash = self.binPublicKey65.getHash160()
- chksum = hash256(netbyte + keyHash)[:4]
- return binary_to_base58(netbyte + keyHash + chksum)
-
-
-
- def checkAddressValid(self):
- return checkAddrStrValid(self.addrStr);
-
-
- def pprint(self, withPrivKey=True, indent=''):
- def pp(x, nchar=1000):
- if x.getSize()==0:
- return '--'*32
- else:
- return x.toHexStr()[:nchar]
- print indent + 'BTC Address :', self.getAddrStr()
- print indent + 'Hash160[BE] :', binary_to_hex(self.getAddr160())
- print indent + 'Wallet Location :', self.walletByteLoc
- print indent + 'Chained Address :', self.chainIndex >= -1
- print indent + 'Have (priv,pub) : (%s,%s)' % \
- (str(self.hasPrivKey()), str(self.hasPubKey()))
- print indent + 'First/Last Time : (%s,%s)' % \
- (str(self.timeRange[0]), str(self.timeRange[1]))
- print indent + 'First/Last Block : (%s,%s)' % \
- (str(self.blkRange[0]), str(self.blkRange[1]))
- if self.hasPubKey():
- print indent + 'PubKeyX(BE) :', \
- binary_to_hex(self.binPublicKey65.toBinStr()[1:33 ])
- print indent + 'PubKeyY(BE) :', \
- binary_to_hex(self.binPublicKey65.toBinStr()[ 33:])
- print indent + 'Encryption parameters:'
- print indent + ' UseEncryption :', self.useEncryption
- print indent + ' IsLocked :', self.isLocked
- print indent + ' KeyChanged :', self.keyChanged
- print indent + ' ChainIndex :', self.chainIndex
- print indent + ' Chaincode :', pp(self.chaincode)
- print indent + ' InitVector :', pp(self.binInitVect16)
- if withPrivKey and self.hasPrivKey():
- print indent + 'PrivKeyPlain(BE) :', pp(self.binPrivKey32_Plain)
- print indent + 'PrivKeyCiphr(BE) :', pp(self.binPrivKey32_Encr)
- else:
- print indent + 'PrivKeyPlain(BE) :', pp(SecureBinaryData())
- print indent + 'PrivKeyCiphr(BE) :', pp(SecureBinaryData())
- if self.createPrivKeyNextUnlock:
- print indent + ' ***** :', 'PrivKeys available on next unlock'
-
-
-#############################################################################
-def calcWalletIDFromRoot(root, chain):
- """ Helper method for computing a wallet ID """
- root = PyBtcAddress().createFromPlainKeyData(SecureBinaryData(root))
- root.chaincode = SecureBinaryData(chain)
- first = root.extendAddressChain()
- return binary_to_base58((ADDRBYTE + first.getAddr160()[:5])[::-1])
-
-
-
-################################################################################
-# Identify all the codes/strings that are needed for dealing with scripts
-################################################################################
-
-# Start list of OP codes
-OP_0 = 0
-OP_FALSE = 0
-OP_PUSHDATA1 = 76
-OP_PUSHDATA2 = 77
-OP_PUSHDATA4 = 78
-OP_1NEGATE = 79
-OP_1 = 81
-OP_TRUE = 81
-OP_2 = 82
-OP_3 = 83
-OP_4 = 84
-OP_5 = 85
-OP_6 = 86
-OP_7 = 87
-OP_8 = 88
-OP_9 = 89
-OP_10 = 90
-OP_11 = 91
-OP_12 = 92
-OP_13 = 93
-OP_14 = 94
-OP_15 = 95
-OP_16 = 96
-OP_NOP = 97
-OP_IF = 99
-OP_NOTIF = 100
-OP_ELSE = 103
-OP_ENDIF = 104
-OP_VERIFY = 105
-OP_RETURN = 106
-OP_TOALTSTACK = 107
-OP_FROMALTSTACK = 108
-OP_IFDUP = 115
-OP_DEPTH = 116
-OP_DROP = 117
-OP_DUP = 118
-OP_NIP = 119
-OP_OVER = 120
-OP_PICK = 121
-OP_ROLL = 122
-OP_ROT = 123
-OP_SWAP = 124
-OP_TUCK = 125
-OP_2DROP = 109
-OP_2DUP = 110
-OP_3DUP = 111
-OP_2OVER = 112
-OP_2ROT = 113
-OP_2SWAP = 114
-OP_CAT = 126
-OP_SUBSTR = 127
-OP_LEFT = 128
-OP_RIGHT = 129
-OP_SIZE = 130
-OP_INVERT = 131
-OP_AND = 132
-OP_OR = 133
-OP_XOR = 134
-OP_EQUAL = 135
-OP_EQUALVERIFY = 136
-OP_1ADD = 139
-OP_1SUB = 140
-OP_2MUL = 141
-OP_2DIV = 142
-OP_NEGATE = 143
-OP_ABS = 144
-OP_NOT = 145
-OP_0NOTEQUAL = 146
-OP_ADD = 147
-OP_SUB = 148
-OP_MUL = 149
-OP_DIV = 150
-OP_MOD = 151
-OP_LSHIFT = 152
-OP_RSHIFT = 153
-OP_BOOLAND = 154
-OP_BOOLOR = 155
-OP_NUMEQUAL = 156
-OP_NUMEQUALVERIFY = 157
-OP_NUMNOTEQUAL = 158
-OP_LESSTHAN = 159
-OP_GREATERTHAN = 160
-OP_LESSTHANOREQUAL = 161
-OP_GREATERTHANOREQUAL = 162
-OP_MIN = 163
-OP_MAX = 164
-OP_WITHIN = 165
-OP_RIPEMD160 = 166
-OP_SHA1 = 167
-OP_SHA256 = 168
-OP_HASH160 = 169
-OP_HASH256 = 170
-OP_CODESEPARATOR = 171
-OP_CHECKSIG = 172
-OP_CHECKSIGVERIFY = 173
-OP_CHECKMULTISIG = 174
-OP_CHECKMULTISIGVERIFY = 175
-
-opnames = ['']*256
-opnames[0] = 'OP_0'
-for i in range(1,76):
- opnames[i] ='OP_PUSHDATA'
-opnames[76] = 'OP_PUSHDATA1'
-opnames[77] = 'OP_PUSHDATA2'
-opnames[78] = 'OP_PUSHDATA4'
-opnames[79] = 'OP_1NEGATE'
-opnames[81] = 'OP_1'
-opnames[81] = 'OP_TRUE'
-for i in range(1,17):
- opnames[80+i] = 'OP_' + str(i)
-opnames[97] = 'OP_NOP'
-opnames[99] = 'OP_IF'
-opnames[100] = 'OP_NOTIF'
-opnames[103] = 'OP_ELSE'
-opnames[104] = 'OP_ENDIF'
-opnames[105] = 'OP_VERIFY'
-opnames[106] = 'OP_RETURN'
-opnames[107] = 'OP_TOALTSTACK'
-opnames[108] = 'OP_FROMALTSTACK'
-opnames[115] = 'OP_IFDUP'
-opnames[116] = 'OP_DEPTH'
-opnames[117] = 'OP_DROP'
-opnames[118] = 'OP_DUP'
-opnames[119] = 'OP_NIP'
-opnames[120] = 'OP_OVER'
-opnames[121] = 'OP_PICK'
-opnames[122] = 'OP_ROLL'
-opnames[123] = 'OP_ROT'
-opnames[124] = 'OP_SWAP'
-opnames[125] = 'OP_TUCK'
-opnames[109] = 'OP_2DROP'
-opnames[110] = 'OP_2DUP'
-opnames[111] = 'OP_3DUP'
-opnames[112] = 'OP_2OVER'
-opnames[113] = 'OP_2ROT'
-opnames[114] = 'OP_2SWAP'
-opnames[126] = 'OP_CAT'
-opnames[127] = 'OP_SUBSTR'
-opnames[128] = 'OP_LEFT'
-opnames[129] = 'OP_RIGHT'
-opnames[130] = 'OP_SIZE'
-opnames[131] = 'OP_INVERT'
-opnames[132] = 'OP_AND'
-opnames[133] = 'OP_OR'
-opnames[134] = 'OP_XOR'
-opnames[135] = 'OP_EQUAL'
-opnames[136] = 'OP_EQUALVERIFY'
-opnames[139] = 'OP_1ADD'
-opnames[140] = 'OP_1SUB'
-opnames[141] = 'OP_2MUL'
-opnames[142] = 'OP_2DIV'
-opnames[143] = 'OP_NEGATE'
-opnames[144] = 'OP_ABS'
-opnames[145] = 'OP_NOT'
-opnames[146] = 'OP_0NOTEQUAL'
-opnames[147] = 'OP_ADD'
-opnames[148] = 'OP_SUB'
-opnames[149] = 'OP_MUL'
-opnames[150] = 'OP_DIV'
-opnames[151] = 'OP_MOD'
-opnames[152] = 'OP_LSHIFT'
-opnames[153] = 'OP_RSHIFT'
-opnames[154] = 'OP_BOOLAND'
-opnames[155] = 'OP_BOOLOR'
-opnames[156] = 'OP_NUMEQUAL'
-opnames[157] = 'OP_NUMEQUALVERIFY'
-opnames[158] = 'OP_NUMNOTEQUAL'
-opnames[159] = 'OP_LESSTHAN'
-opnames[160] = 'OP_GREATERTHAN'
-opnames[161] = 'OP_LESSTHANOREQUAL'
-opnames[162] = 'OP_GREATERTHANOREQUAL'
-opnames[163] = 'OP_MIN'
-opnames[164] = 'OP_MAX'
-opnames[165] = 'OP_WITHIN'
-opnames[166] = 'OP_RIPEMD160'
-opnames[167] = 'OP_SHA1'
-opnames[168] = 'OP_SHA256'
-opnames[169] = 'OP_HASH160'
-opnames[170] = 'OP_HASH256'
-opnames[171] = 'OP_CODESEPARATOR'
-opnames[172] = 'OP_CHECKSIG'
-opnames[173] = 'OP_CHECKSIGVERIFY'
-opnames[174] = 'OP_CHECKMULTISIG'
-opnames[175] = 'OP_CHECKMULTISIGVERIFY'
-
-
-opCodeLookup = {}
-opCodeLookup['OP_FALSE'] = 0
-opCodeLookup['OP_PUSHDATA1'] = 76
-opCodeLookup['OP_PUSHDATA2'] = 77
-opCodeLookup['OP_PUSHDATA4'] = 78
-opCodeLookup['OP_1NEGATE'] = 79
-opCodeLookup['OP_1'] = 81
-for i in range(1,17):
- opCodeLookup['OP_'+str(i)] = 80+i
-opCodeLookup['OP_TRUE'] = 81
-opCodeLookup['OP_NOP'] = 97
-opCodeLookup['OP_IF'] = 99
-opCodeLookup['OP_NOTIF'] = 100
-opCodeLookup['OP_ELSE'] = 103
-opCodeLookup['OP_ENDIF'] = 104
-opCodeLookup['OP_VERIFY'] = 105
-opCodeLookup['OP_RETURN'] = 106
-opCodeLookup['OP_TOALTSTACK'] = 107
-opCodeLookup['OP_FROMALTSTACK'] = 108
-opCodeLookup['OP_IFDUP'] = 115
-opCodeLookup['OP_DEPTH'] = 116
-opCodeLookup['OP_DROP'] = 117
-opCodeLookup['OP_DUP'] = 118
-opCodeLookup['OP_NIP'] = 119
-opCodeLookup['OP_OVER'] = 120
-opCodeLookup['OP_PICK'] = 121
-opCodeLookup['OP_ROLL'] = 122
-opCodeLookup['OP_ROT'] = 123
-opCodeLookup['OP_SWAP'] = 124
-opCodeLookup['OP_TUCK'] = 125
-opCodeLookup['OP_2DROP'] = 109
-opCodeLookup['OP_2DUP'] = 110
-opCodeLookup['OP_3DUP'] = 111
-opCodeLookup['OP_2OVER'] = 112
-opCodeLookup['OP_2ROT'] = 113
-opCodeLookup['OP_2SWAP'] = 114
-opCodeLookup['OP_CAT'] = 126
-opCodeLookup['OP_SUBSTR'] = 127
-opCodeLookup['OP_LEFT'] = 128
-opCodeLookup['OP_RIGHT'] = 129
-opCodeLookup['OP_SIZE'] = 130
-opCodeLookup['OP_INVERT'] = 131
-opCodeLookup['OP_AND'] = 132
-opCodeLookup['OP_OR'] = 133
-opCodeLookup['OP_XOR'] = 134
-opCodeLookup['OP_EQUAL'] = 135
-opCodeLookup['OP_EQUALVERIFY'] = 136
-opCodeLookup['OP_1ADD'] = 139
-opCodeLookup['OP_1SUB'] = 140
-opCodeLookup['OP_2MUL'] = 141
-opCodeLookup['OP_2DIV'] = 142
-opCodeLookup['OP_NEGATE'] = 143
-opCodeLookup['OP_ABS'] = 144
-opCodeLookup['OP_NOT'] = 145
-opCodeLookup['OP_0NOTEQUAL'] = 146
-opCodeLookup['OP_ADD'] = 147
-opCodeLookup['OP_SUB'] = 148
-opCodeLookup['OP_MUL'] = 149
-opCodeLookup['OP_DIV'] = 150
-opCodeLookup['OP_MOD'] = 151
-opCodeLookup['OP_LSHIFT'] = 152
-opCodeLookup['OP_RSHIFT'] = 153
-opCodeLookup['OP_BOOLAND'] = 154
-opCodeLookup['OP_BOOLOR'] = 155
-opCodeLookup['OP_NUMEQUAL'] = 156
-opCodeLookup['OP_NUMEQUALVERIFY'] = 157
-opCodeLookup['OP_NUMNOTEQUAL'] = 158
-opCodeLookup['OP_LESSTHAN'] = 159
-opCodeLookup['OP_GREATERTHAN'] = 160
-opCodeLookup['OP_LESSTHANOREQUAL'] = 161
-opCodeLookup['OP_GREATERTHANOREQUAL'] = 162
-opCodeLookup['OP_MIN'] = 163
-opCodeLookup['OP_MAX'] = 164
-opCodeLookup['OP_WITHIN'] = 165
-opCodeLookup['OP_RIPEMD160'] = 166
-opCodeLookup['OP_SHA1'] = 167
-opCodeLookup['OP_SHA256'] = 168
-opCodeLookup['OP_HASH160'] = 169
-opCodeLookup['OP_HASH256'] = 170
-opCodeLookup['OP_CODESEPARATOR'] = 171
-opCodeLookup['OP_CHECKSIG'] = 172
-opCodeLookup['OP_CHECKSIGVERIFY'] = 173
-opCodeLookup['OP_CHECKMULTISIG'] = 174
-opCodeLookup['OP_CHECKMULTISIGVERIFY'] = 175
-#Word Opcode Description
-#OP_PUBKEYHASH = 253 Represents a public key hashed with OP_HASH160.
-#OP_PUBKEY = 254 Represents a public key compatible with OP_CHECKSIG.
-#OP_INVALIDOPCODE = 255 Matches any opcode that is not yet assigned.
-#[edit] Reserved words
-#Any opcode not assigned is also reserved. Using an unassigned opcode makes the transaction invalid.
-#Word Opcode When used...
-#OP_RESERVED = 80 Transaction is invalid
-#OP_VER = 98 Transaction is invalid
-#OP_VERIF = 101 Transaction is invalid
-#OP_VERNOTIF = 102 Transaction is invalid
-#OP_RESERVED1 = 137 Transaction is invalid
-#OP_RESERVED2 = 138 Transaction is invalid
-#OP_NOP1 = OP_NOP10 176-185 The word is ignored.
-
-
-def getOpCode(name):
- return int_to_binary(opCodeLookup[name], widthBytes=1)
-
-
-TXIN_SCRIPT_STANDARD = 0
-TXIN_SCRIPT_COINBASE = 1
-TXIN_SCRIPT_SPENDCB = 2
-TXIN_SCRIPT_UNSIGNED = 3
-TXIN_SCRIPT_UNKNOWN = 4
-
-TXOUT_SCRIPT_STANDARD = 0
-TXOUT_SCRIPT_COINBASE = 1
-TXOUT_SCRIPT_MULTISIG = 2
-TXOUT_SCRIPT_OP_EVAL = 3
-TXOUT_SCRIPT_UNKNOWN = 4
-
-MULTISIG_1of1 = (1,1)
-MULTISIG_1of2 = (1,2)
-MULTISIG_2oF2 = (2,2)
-MULTISIG_1oF3 = (1,3)
-MULTISIG_2oF3 = (2,3)
-MULTISIG_3oF3 = (3,3)
-MULTISIG_UNKNOWN = (0,0)
-
-TXOUT_TYPE_NAMES = { TXOUT_SCRIPT_STANDARD: 'Standard', \
- TXOUT_SCRIPT_COINBASE: 'Coinbase', \
- TXOUT_SCRIPT_MULTISIG: 'Multi-Signature', \
- TXOUT_SCRIPT_UNKNOWN: '', \
- TXOUT_SCRIPT_OP_EVAL: 'OP-EVAL' }
-TXIN_TYPE_NAMES = { TXIN_SCRIPT_STANDARD: 'Standard', \
- TXIN_SCRIPT_COINBASE: 'Coinbase', \
- TXIN_SCRIPT_SPENDCB: 'Spend-CB', \
- TXIN_SCRIPT_UNSIGNED: 'Unsigned', \
- TXIN_SCRIPT_UNKNOWN: ''}
-
-################################################################################
-def getTxOutMultiSigInfo(binScript):
- """
- Gets the Multi-Sig tx type, as well as all the address-160 strings of
- the keys that are needed to satisfy this transaction. This currently
- only identifies M-of-N transaction types, returning unknown otherwise.
-
- However, the address list it returns should be valid regardless of
- whether the type was unknown: we assume all 20-byte chunks of data
- are public key hashes, and 65-byte chunks are public keys.
-
- NOTE: Because the address list is always valid, there is no reason
- not to use this method to extract addresses from ANY scripts,
- not just multi-sig...
- """
- addr160List = []
- pub65List = []
- bup = BinaryUnpacker(binScript)
- opcodes = []
- while bup.getRemainingSize() > 0:
- nextByte = bup.get(UINT8)
- binChunk = ''
- if 0 < nextByte < 76:
- nBytes = nextByte
- binChunk = bup.get(BINARY_CHUNK, nBytes)
- elif nextByte == OP_PUSHDATA1:
- nBytes = scriptUnpacker.get(UINT8)
- binChunk = bup.get(BINARY_CHUNK, nBytes)
- elif nextByte == OP_PUSHDATA2:
- nBytes = scriptUnpacker.get(UINT16)
- binChunk = bup.get(BINARY_CHUNK, nBytes)
- elif nextByte == OP_PUSHDATA4:
- nBytes = scriptUnpacker.get(UINT32)
- binChunk = bup.get(BINARY_CHUNK, nBytes)
- else:
- opcodes.append(nextByte)
-
-
- if len(binChunk) == 20:
- addr160List.append(binChunk)
- pub65List.append('')
- opcodes.append('')
- elif len(binChunk) == 65:
- addr160List.append(convertKeyDataToAddress(pubKey=binChunk))
- pub65List.append(binChunk)
- opcodes.append('')
-
-
- mstype = MULTISIG_UNKNOWN
- #print 'Transaction:',
- #for op in opcodes:
- #print op,
-
- # First assume that this is an M-of-N script
- try:
- isCMS = opcodes[-1]==getOpCode('OP_CHECKMULTISIG')
- M = int(opcodes[ 0])
- N = int(opcodes[-2])
- keys = opcodes[1:-2]
- nPub = sum([(1 if p=='PubKey65' else 0) for p in keys])
- if 00:
- print indstr + indent + 'Sender: ', hash160_to_addrStr(inAddr160)
- print indstr + indent + 'Seq: ', self.intSeq
-
- # Before broadcasting a transaction make sure that the script is canonical
- # This TX could have been signed by an older version of the software.
- # Either on the offline Armory installation which may not have been upgraded
- # or on a previous installation of Armory on this computer.
- def minimizeDERSignaturePadding(self):
- rsLen = binary_to_int(self.binScript[2:3])
- rLen = binary_to_int(self.binScript[4:5])
- rBin = self.binScript[5:5+rLen]
- sLen = binary_to_int(self.binScript[6+rLen:7+rLen])
- sBin = self.binScript[7+rLen:7+rLen+sLen]
- sigScript = createSigScript(rBin, sBin)
- newBinScript = int_to_binary(len(sigScript)+1) + sigScript + self.binScript[3+rsLen:]
- paddingRemoved = newBinScript != self.binScript
- newTxIn = self.copy()
- newTxIn.binScript = newBinScript
- return paddingRemoved, newTxIn
-
-#####
-class PyTxOut(object):
- def __init__(self):
- self.value = UNINITIALIZED
- self.binScript = UNINITIALIZED
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- txOutData = toUnpack
- else:
- txOutData = BinaryUnpacker( toUnpack )
-
- self.value = txOutData.get(UINT64)
- scriptSize = txOutData.get(VAR_INT)
- if txOutData.getRemainingSize() < scriptSize: raise UnserializeError
- self.binScript = txOutData.get(BINARY_CHUNK, scriptSize)
- return self
-
- def getValue(self):
- return self.value
-
- def getScript(self):
- return self.binScript
-
- def serialize(self):
- binOut = BinaryPacker()
- binOut.put(UINT64, self.value)
- binOut.put(VAR_INT, len(self.binScript))
- binOut.put(BINARY_CHUNK, self.binScript)
- return binOut.getBinaryString()
-
- def copy(self):
- return PyTxOut().unserialize(self.serialize())
-
- def fromCpp(self, cppTxOut):
- return self.unserialize(cppTxOut.serialize())
-
- def createCpp(self):
- """ Convert a raw PyTxOut with no context, to a C++ TxOut """
- cppout = Cpp.TxOut()
- cppout.unserialize_swigsafe_(self.serialize())
- return cppout
-
- def pprint(self, nIndent=0, endian=BIGENDIAN):
- indstr = indent*nIndent
- print indstr + 'TxOut:'
- print indstr + indent + 'Value: ', self.value, '(', float(self.value) / ONE_BTC, ')'
- txoutType = getTxOutScriptType(self.binScript)
- if txoutType == TXOUT_SCRIPT_COINBASE:
- print indstr + indent + 'Script: PubKey(%s) OP_CHECKSIG' % \
- (TxOutScriptExtractAddrStr(self.binScript),)
- elif txoutType == TXOUT_SCRIPT_STANDARD:
- print indstr + indent + 'Script: OP_DUP OP_HASH (%s) OP_EQUAL OP_CHECKSIG' % \
- (TxOutScriptExtractAddrStr(self.binScript),)
- else:
- print indstr + indent + 'Script: '
-
-#####
-class PyTx(object):
- def __init__(self):
- self.version = UNINITIALIZED
- self.inputs = UNINITIALIZED
- self.outputs = UNINITIALIZED
- self.lockTime = 0
- self.thisHash = UNINITIALIZED
- self.isSigned = False
-
- def serialize(self):
- binOut = BinaryPacker()
- binOut.put(UINT32, self.version)
- binOut.put(VAR_INT, len(self.inputs))
- for txin in self.inputs:
- binOut.put(BINARY_CHUNK, txin.serialize())
- binOut.put(VAR_INT, len(self.outputs))
- for txout in self.outputs:
- binOut.put(BINARY_CHUNK, txout.serialize())
- binOut.put(UINT32, self.lockTime)
- return binOut.getBinaryString()
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- txData = toUnpack
- else:
- txData = BinaryUnpacker( toUnpack )
-
- startPos = txData.getPosition()
- self.inputs = []
- self.outputs = []
- self.version = txData.get(UINT32)
- numInputs = txData.get(VAR_INT)
- for i in xrange(numInputs):
- self.inputs.append( PyTxIn().unserialize(txData) )
- numOutputs = txData.get(VAR_INT)
- for i in xrange(numOutputs):
- self.outputs.append( PyTxOut().unserialize(txData) )
- self.lockTime = txData.get(UINT32)
- endPos = txData.getPosition()
- self.nBytes = endPos - startPos
- self.thisHash = hash256(self.serialize())
- return self
-
- def copy(self):
- return PyTx().unserialize(self.serialize())
-
- # Before broadcasting a transaction make sure that the script is canonical
- # This TX could have been signed by an older version of the software.
- # Either on the offline Armory installation which may not have been upgraded
- # or on a previous installation of Armory on this computer.
- def minimizeDERSignaturePadding(self):
- paddingRemoved = False
- newTx = self.copy()
- newTx.inputs = []
- for txIn in self.inputs:
- paddingRemovedFromTxIn, newTxIn = txIn.minimizeDERSignaturePadding()
- if paddingRemovedFromTxIn:
- paddingRemoved = True
- newTx.inputs.append(newTxIn)
- else:
- newTx.inputs.append(txIn)
- return paddingRemoved, newTx.copy()
-
- def getHash(self):
- return hash256(self.serialize())
-
- def getHashHex(self, endianness=LITTLEENDIAN):
- return binary_to_hex(self.getHash(), endOut=endianness)
-
- def makeRecipientsList(self):
- """
- Make a list of lists, each one containing information about
- an output in this tx. Usually contains
- [ScriptType, Value, Addr160]
- May include more information if any of the scripts are multi-sig,
- such as public keys and multi-sig type (M-of-N)
- """
- recipInfoList = []
- for txout in self.outputs:
- recipInfoList.append([])
-
- scrType = getTxOutScriptType(txout.binScript)
- recipInfoList[-1].append(scrType)
- recipInfoList[-1].append(txout.value)
- if scrType in (TXOUT_SCRIPT_STANDARD, TXOUT_SCRIPT_COINBASE):
- recipInfoList[-1].append(TxOutScriptExtractAddr160(txout.binScript))
- elif scrType in (TXOUT_SCRIPT_MULTISIG,):
- mstype, addr160s, pubs = getTxOutMultiSigInfo(txout.binScript)
- recipInfoList[-1].append(addr160s)
- recipInfoList[-1].append(pubs)
- recipInfoList[-1].append(mstype[0]) # this is M (from M-of-N)
- elif scrType in (TXOUT_SCRIPT_OP_EVAL,):
- LOGERROR('OP_EVAL doesn\'t exist anymore. How did we get here?')
- recipInfoList[-1].append(txout.binScript)
- elif scrType in (TXOUT_SCRIPT_UNKNOWN,):
- LOGERROR('Unknown TxOut type')
- recipInfoList[-1].append(txout.binScript)
- else:
- LOGERROR('Unrecognized txout script that isn\'t TXOUT_SCRIPT_UNKNOWN...?')
- return recipInfoList
-
-
- def pprint(self, nIndent=0, endian=BIGENDIAN):
- indstr = indent*nIndent
- print indstr + 'Transaction:'
- print indstr + indent + 'TxHash: ', self.getHashHex(endian), \
- '(BE)' if endian==BIGENDIAN else '(LE)'
- print indstr + indent + 'Version: ', self.version
- print indstr + indent + 'nInputs: ', len(self.inputs)
- print indstr + indent + 'nOutputs: ', len(self.outputs)
- print indstr + indent + 'LockTime: ', self.lockTime
- print indstr + indent + 'Inputs: '
- for inp in self.inputs:
- inp.pprint(nIndent+2, endian=endian)
- print indstr + indent + 'Outputs: '
- for out in self.outputs:
- out.pprint(nIndent+2, endian=endian)
-
-
-
- #def pprintShort(self, nIndent=0, endian=BIGENDIAN):
- #print '\nTransaction: %s' % self.getHashHex()
-
-
- def fromCpp(self, cppTx):
- return self.unserialize(cppTx.serialize())
-
- def createCpp(self):
- """ Convert a raw PyTx with no context, to a C++ Tx """
- cpptx = Cpp.Tx()
- cpptx.unserialize_swigsafe_(self.serialize())
- return cpptx
-
- def fetchCpp(self):
- """ Use the info in this PyTx to get the C++ version from TheBDM """
- return TheBDM.getTxByHash(self.getHash())
-
- def pprintHex(self, nIndent=0):
- bu = BinaryUnpacker(self.serialize())
- theSer = self.serialize()
- print binary_to_hex(bu.get(BINARY_CHUNK, 4))
- nTxin = bu.get(VAR_INT)
- print 'VAR_INT(%d)' % nTxin
- for i in range(nTxin):
- print binary_to_hex(bu.get(BINARY_CHUNK,32))
- print binary_to_hex(bu.get(BINARY_CHUNK,4))
- scriptSz = bu.get(VAR_INT)
- print 'VAR_IN(%d)' % scriptSz
- print binary_to_hex(bu.get(BINARY_CHUNK,scriptSz))
- print binary_to_hex(bu.get(BINARY_CHUNK,4))
- nTxout = bu.get(VAR_INT)
- print 'VAR_INT(%d)' % nTxout
- for i in range(nTxout):
- print binary_to_hex(bu.get(BINARY_CHUNK,8))
- scriptSz = bu.get(VAR_INT)
- print binary_to_hex(bu.get(BINARY_CHUNK,scriptSz))
- print binary_to_hex(bu.get(BINARY_CHUNK, 4))
-
-
-
-
-################################################################################
-# Block Information
-################################################################################
-
-
-class PyBlockHeader(object):
- def __init__(self):
- self.version = 1
- self.prevBlkHash = ''
- self.merkleRoot = UNINITIALIZED
- self.timestamp = UNINITIALIZED
- self.diffBits = UNINITIALIZED
- self.nonce = UNINITIALIZED
- # Use these fields for storage of block information, but are not otherwise
- # part of the serialized data structure
- self.theHash = ''
- self.numTx = UNINITIALIZED
- self.blkHeight = UNINITIALIZED
- self.fileByteLoc = UNINITIALIZED
- self.nextBlkHash = UNINITIALIZED
- self.intDifficult = UNINITIALIZED
- self.sumDifficult = UNINITIALIZED
- self.isMainChain = False
- self.isOrphan = True
-
- def serialize(self):
- if self.version == UNINITIALIZED:
- raise UnitializedBlockDataError, 'PyBlockHeader object not initialized!'
- binOut = BinaryPacker()
- binOut.put(UINT32, self.version)
- binOut.put(BINARY_CHUNK, self.prevBlkHash)
- binOut.put(BINARY_CHUNK, self.merkleRoot)
- binOut.put(UINT32, self.timestamp)
- binOut.put(BINARY_CHUNK, self.diffBits)
- binOut.put(UINT32, self.nonce)
- return binOut.getBinaryString()
-
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- blkData = toUnpack
- else:
- blkData = BinaryUnpacker( toUnpack )
-
- self.version = blkData.get(UINT32)
- self.prevBlkHash = blkData.get(BINARY_CHUNK, 32)
- self.merkleRoot = blkData.get(BINARY_CHUNK, 32)
- self.timestamp = blkData.get(UINT32)
- self.diffBits = blkData.get(BINARY_CHUNK, 4)
- self.nonce = blkData.get(UINT32)
- self.theHash = hash256(self.serialize())
- return self
-
- def copy(self):
- return PyBlockHeader().unserialize(self.serialize())
-
- def getHash(self, endian=LITTLEENDIAN):
- if self.version == UNINITIALIZED:
- raise UnitializedBlockDataError, 'PyBlockHeader object not initialized!'
- if len(self.theHash) < 32:
- self.theHash = hash256(self.serialize())
- outHash = self.theHash
- if endian==BIGENDIAN:
- outHash = binary_switchEndian(outHash)
- return outHash
-
- def getHashHex(self, endian=LITTLEENDIAN):
- if self.version == UNINITIALIZED:
- raise UnitializedBlockDataError, 'PyBlockHeader object not initialized!'
- if len(self.theHash) < 32:
- self.theHash = hash256(self.serialize())
- return binary_to_hex(self.theHash, endian)
-
- def getDifficulty(self):
- if self.diffBits == UNINITIALIZED:
- raise UnitializedBlockDataError, 'PyBlockHeader object not initialized!'
- self.intDifficult = binaryBits_to_difficulty(self.diffBits)
- return self.intDifficult
-
- def fromCpp(self, cppHead):
- return self.unserialize(cppHead.serialize())
-
- def createCpp(self):
- """ Convert a raw blockheader with no context, to a C++ BlockHeader """
- cppbh = Cpp.BlockHeader()
- cppbh.unserialize_swigsafe_(self.serialize())
- return cppbh
-
- def fetchCpp(self):
- """ Convert a raw blockheader with no context, to a C++ BlockHeader """
- return TheBDM.getHeaderByHash(self.getHash())
-
-
- def pprint(self, nIndent=0, endian=BIGENDIAN):
- indstr = indent*nIndent
- print indstr + 'BlockHeader:'
- print indstr + indent + 'Version: ', self.version
- print indstr + indent + 'ThisHash: ', binary_to_hex( self.theHash, endOut=endian), \
- '(BE)' if endian==BIGENDIAN else '(LE)'
- print indstr + indent + 'PrevBlock: ', binary_to_hex(self.prevBlkHash, endOut=endian), \
- '(BE)' if endian==BIGENDIAN else '(LE)'
- print indstr + indent + 'MerkRoot: ', binary_to_hex(self.merkleRoot, endOut=endian), \
- '(BE)' if endian==BIGENDIAN else '(LE)'
- print indstr + indent + 'Timestamp: ', self.timestamp
- fltDiff = binaryBits_to_difficulty(self.diffBits)
- print indstr + indent + 'Difficulty:', fltDiff, '('+binary_to_hex(self.diffBits)+')'
- print indstr + indent + 'Nonce: ', self.nonce
- if not self.blkHeight==UNINITIALIZED:
- print indstr + indent + 'BlkHeight: ', self.blkHeight
- if not self.blkHeight==UNINITIALIZED:
- print indstr + indent + 'BlkFileLoc:', self.fileByteLoc
- if not self.nextBlkHash==UNINITIALIZED:
- #print indstr + indent + 'NextBlock: ', binary_to_hex(self.nextBlkHash)
- print indstr + indent + 'NextBlock: ', self.nextBlkHash
- if not self.numTx==UNINITIALIZED:
- print indstr + indent + 'NumTx: ', self.numTx
- if not self.intDifficult==UNINITIALIZED:
- print indstr + indent + 'Difficulty:', self.intDifficult
- if not self.sumDifficult==UNINITIALIZED:
- print indstr + indent + 'DiffSum: ', self.sumDifficult
-
-
-################################################################################
-################################################################################
-class PyBlockData(object):
- def __init__(self, txList=[]):
- self.txList = txList
- self.numTx = len(txList)
- self.merkleTree = []
- self.merkleRoot = UNINITIALIZED
-
-
- def serialize(self):
- if self.numTx == UNINITIALIZED:
- raise UnitializedBlockDataError, 'PyBlockData object not initialized!'
- binOut = BinaryPacker()
- binOut.put(VAR_INT, self.numTx)
- for tx in self.txList:
- binOut.put(BINARY_CHUNK, tx.serialize())
- return binOut.getBinaryString()
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- blkData = toUnpack
- else:
- blkData = BinaryUnpacker( toUnpack )
-
- self.txList = []
- self.numTx = blkData.get(VAR_INT)
- for i in xrange(self.numTx):
- self.txList.append( PyTx().unserialize(blkData) )
- self.merkleTree = []
- self.merkleRoot = ''
- return self
-
-
- def getTxHashList(self):
- if( self.numTx == UNINITIALIZED ):
- self.getMerkleRoot()
- return self.merkleTree[:self.numTx]
-
-
- def getMerkleRoot(self):
- assert( not self.numTx == UNINITIALIZED )
- if len(self.merkleTree)==0 and not self.numTx==0:
- #Create the merkle tree
- self.merkleTree = [hash256(tx.serialize()) for tx in self.txList]
- sz = len(self.merkleTree)
- while sz > 1:
- hashes = self.merkleTree[-sz:]
- mod2 = sz%2
- for i in range(sz/2):
- self.merkleTree.append( hash256(hashes[2*i] + hashes[2*i+1]) )
- if mod2==1:
- self.merkleTree.append( hash256(hashes[-1] + hashes[-1]) )
- sz = (sz+1) / 2
- self.merkleRoot = self.merkleTree[-1]
- return self.merkleRoot
-
- def printMerkleTree(self, reverseHash=False, indent=''):
- print indent + 'Printing Merkle Tree:'
- if reverseHash:
- print indent + '(hashes will be reversed, like shown on BlockExplorer.com)'
- root = self.getMerkleRoot()
- print indent + 'Merkle Root:', binary_to_hex(root)
- for h in self.merkleTree:
- phash = binary_to_hex(h) if not reverseHash else binary_to_hex(h, endOut=BIGENDIAN)
- print indent + '\t' + phash
-
-
- def pprint(self, nIndent=0, endian=BIGENDIAN):
- indstr = indent*nIndent
- print indstr + 'BlockData:'
- print indstr + indent + 'MerkleRoot: ', binary_to_hex(self.getMerkleRoot(), endian), \
- '(BE)' if endian==BIGENDIAN else '(LE)'
- print indstr + indent + 'NumTx: ', self.numTx
- for tx in self.txList:
- tx.pprint(nIndent+1, endian=endian)
-
-
-################################################################################
-################################################################################
-class PyBlock(object):
- def __init__(self, prevHeader=None, txlist=[]):
- self.blockHeader = PyBlockHeader()
- self.blockData = PyBlockData()
- if prevHeader:
- self.setPrevHeader(prevHeader)
- if txlist:
- self.setTxList(txlist)
-
- def serialize(self):
- assert( not self.blockHeader == UNINITIALIZED )
- binOut = BinaryPacker()
- binOut.put(BINARY_CHUNK, self.blockHeader.serialize())
- binOut.put(BINARY_CHUNK, self.blockData.serialize())
- return binOut.getBinaryString()
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- blkData = toUnpack
- else:
- blkData = BinaryUnpacker( toUnpack )
-
- self.txList = []
- self.blockHeader = PyBlockHeader().unserialize(blkData)
- self.blockData = PyBlockData().unserialize(blkData)
- return self
-
- def getNumTx(self):
- return len(self.blockData.txList)
-
- def getSize(self):
- return len(self.serialize())
-
- # Not sure how useful these manual block-construction methods
- # are. For now, I just need something with non-ridiculous vals
- def setPrevHeader(self, prevHeader, copyAttr=True):
- self.blockHeader.prevBlkHash = prevHeader.theHash
- self.blockHeader.nonce = 0
- if copyAttr:
- self.blockHeader.version = prevHeader.version
- self.blockHeader.timestamp = prevHeader.timestamp+600
- self.blockHeader.diffBits = prevHeader.diffBits
-
- def setTxList(self, txlist):
- self.blockData = PyBlockData(txlist)
- if not self.blockHeader == UNINITIALIZED:
- self.blockHeader.merkleRoot = self.blockData.getMerkleRoot()
-
- def tx(self, idx):
- return self.blockData.txList[idx]
-
- def pprint(self, nIndent=0, endian=BIGENDIAN):
- indstr = indent*nIndent
- print indstr + 'Block:'
- self.blockHeader.pprint(nIndent+1, endian=endian)
- self.blockData.pprint(nIndent+1, endian=endian)
-
-
-#############################################################################
-def getFeeForTx(txHash):
- if TheBDM.getBDMState()=='BlockchainReady':
- if not TheBDM.hasTxWithHash(txHash):
- LOGERROR('Attempted to get fee for tx we don\'t have...? %s', \
- binary_to_hex(txHash,BIGENDIAN))
- return 0
- txref = TheBDM.getTxByHash(txHash)
- valIn, valOut = 0,0
- for i in range(txref.getNumTxIn()):
- valIn += TheBDM.getSentValue(txref.getTxInCopy(i))
- for i in range(txref.getNumTxOut()):
- valOut += txref.getTxOutCopy(i).getValue()
- return valIn - valOut
-
-
-#############################################################################
-def determineSentToSelfAmt(le, wlt):
- """
- NOTE: this method works ONLY because we always generate a new address
- whenever creating a change-output, which means it must have a
- higher chainIndex than all other addresses. If you did something
- creative with this tx, this may not actually work.
- """
- amt = 0
- if TheBDM.isInitialized() and le.isSentToSelf():
- txref = TheBDM.getTxByHash(le.getTxHash())
- if not txref.isInitialized():
- return (0, 0)
- if txref.getNumTxOut()==1:
- return (txref.getTxOutCopy(0).getValue(), -1)
- maxChainIndex = -5
- txOutChangeVal = 0
- changeIndex = -1
- valSum = 0
- for i in range(txref.getNumTxOut()):
- valSum += txref.getTxOutCopy(i).getValue()
- addr160 = CheckHash160(txref.getTxOutCopy(i).getScrAddressStr())
- addr = wlt.getAddrByHash160(addr160)
- if addr and addr.chainIndex > maxChainIndex:
- maxChainIndex = addr.chainIndex
- txOutChangeVal = txref.getTxOutCopy(i).getValue()
- changeIndex = i
-
- amt = valSum - txOutChangeVal
- return (amt, changeIndex)
-
-
-
-
-################################################################################
-#
-# SCRIPTING!
-#
-################################################################################
-
-
-def convertScriptToOpStrings(binScript):
- opList = []
-
- i = 0;
- sz = len(binScript)
- error = False;
- while i < sz:
- nextOp = ord(binScript[i]);
- if nextOp == 0:
- opList.append("0")
- i+=1
- elif nextOp < 76:
- opList.append("[PUSHDATA -- " + str(nextOp) + " BYTES:]")
- binObj = binScript[i+1:i+1+nextOp]
- opList.append(binary_to_hex(binObj))
- i += nextOp+1
- elif nextOp == 76:
- nb = binary_to_int(binScript[i+1:i+2])
- if i+1+1+nb > sz:
- error = True;
- break
- binObj = binScript[i+2:i+2+nb]
- opList.append("[OP_PUSHDATA1 -- " + str(nb) + " BYTES:]");
- opList.append(binary_to_hex(binObj))
- i += nb+2
- elif nextOp == 77:
- nb = binScript[i+1:i+3];
- if i+1+2+nb > sz:
- error = True;
- break
- nbprint = min(nb,256)
- binObj = binScript[i+3,i+3+nbprint]
- opList.append("[OP_PUSHDATA2 -- " + str(nb) + " BYTES:]");
- opList.append(binary_to_hex(binObj) + '...')
- i += nb+3
- elif nextOp == 78:
- nb = binScript[i+1:i+5];
- if i+1+4+nb > sz:
- error = True;
- break
- nbprint = min(nb,256)
- binObj = binScript[i+5,i+5+nbprint]
- opList.append("[OP_PUSHDATA4 -- " + str(nb) + " BYTES:]");
- opList.append(binary_to_hex(binObj) + '...')
- i += nb+5
- else:
- opList.append(opnames[nextOp]);
- i += 1
-
- if error:
- opList.append("ERROR PROCESSING SCRIPT");
-
- return opList;
-
-
-def pprintScript(binScript, nIndent=0):
- indstr = indent*nIndent
- print indstr + 'Script:'
- opList = convertScriptToOpStrings(binScript)
- for op in opList:
- print indstr + indent + op
-
-
-
-TX_INVALID = 0
-OP_NOT_IMPLEMENTED = 1
-OP_DISABLED = 2
-SCRIPT_STACK_SIZE_ERROR = 3
-SCRIPT_ERROR = 4
-SCRIPT_NO_ERROR = 5
-
-
-class PyScriptProcessor(object):
- """
- Use this class to evaluate a script. This method is more complicated
- than some might expect, due to the fact that any OP_CHECKSIG or
- OP_CHECKMULTISIG code requires the full transaction of the TxIn script
- and also needs the TxOut script being spent. Since nearly every useful
- script will have one of these operations, this class/method assumes
- that all that data will be supplied.
-
- To simply execute a script not requiring any crypto operations:
-
- scriptIsValid = PyScriptProcessor().executeScript(binScript)
- """
-
- def __init__(self, txOldData=None, txNew=None, txInIndex=None):
- self.stack = []
- self.txNew = None
- self.script1 = None
- self.script2 = None
- if txOldData and txNew and not txInIndex==None:
- self.setTxObjects(txOldData, txNew, txInIndex)
-
-
- def setTxObjects(self, txOldData, txNew, txInIndex):
- """
- The minimal amount of data necessary to evaluate a script that
- has an signature check is the TxOut script that is being spent
- and the entire Tx of the TxIn that is spending it. Thus, we
- must supply at least the txOldScript, and a txNew with its
- TxIn index (so we know which TxIn is spending that TxOut).
- It is acceptable to pass in the full TxOut or the tx of the
- TxOut instead of just the script itself.
- """
- self.txNew = PyTx().unserialize(txNew.serialize())
- self.script1 = str(txNew.inputs[txInIndex].binScript) # copy
- self.txInIndex = txInIndex
- self.txOutIndex = txNew.inputs[txInIndex].outpoint.txOutIndex
- self.txHash = txNew.inputs[txInIndex].outpoint.txHash
-
- if isinstance(txOldData, PyTx):
- if not self.txHash == hash256(txOldData.serialize()):
- LOGERROR('*** Supplied incorrect pair of transactions!')
- self.script2 = str(txOldData.outputs[self.txOutIndex].binScript)
- elif isinstance(txOldData, PyTxOut):
- self.script2 = str(txOldData.binScript)
- elif isinstance(txOldData, str):
- self.script2 = str(txOldData)
-
-
-
- def verifyTransactionValid(self, txOldData=None, txNew=None, txInIndex=-1):
- TimerStart('psp.verifyTransactionValid')
- if txOldData and txNew and txInIndex != -1:
- self.setTxObjects(txOldData, txNew, txInIndex)
- else:
- txOldData = self.script2
- txNew = self.txNew
- txInIndex = self.txInIndex
-
- if self.script1==None or self.txNew==None:
- raise VerifyScriptError, 'Cannot verify transactions, without setTxObjects call first!'
-
- # Execute TxIn script first
- self.stack = []
- exitCode1 = self.executeScript(self.script1, self.stack)
-
- if not exitCode1 == SCRIPT_NO_ERROR:
- raise VerifyScriptError, ('First script failed! Exit Code: ' + str(exitCode1))
-
- exitCode2 = self.executeScript(self.script2, self.stack)
-
- if not exitCode2 == SCRIPT_NO_ERROR:
- raise VerifyScriptError, ('Second script failed! Exit Code: ' + str(exitCode2))
-
- TimerStop('psp.verifyTransactionValid')
- return self.stack[-1]==1
-
-
- def executeScript(self, binaryScript, stack=[]):
- self.stack = stack
- self.stackAlt = []
- scriptData = BinaryUnpacker(binaryScript)
- self.lastOpCodeSepPos = None
-
- while scriptData.getRemainingSize() > 0:
- opcode = scriptData.get(UINT8)
- exitCode = self.executeOpCode(opcode, scriptData, self.stack, self.stackAlt)
- if not exitCode == SCRIPT_NO_ERROR:
- if exitCode==OP_NOT_IMPLEMENTED:
- LOGERROR('***ERROR: OpCodes OP_IF, OP_NOTIF, OP_ELSE, OP_ENDIF,')
- LOGERROR(' have not been implemented, yet. This script')
- LOGERROR(' could not be evaluated.')
- if exitCode==OP_DISABLED:
- LOGERROR('***ERROR: This script included an op code that has been')
- LOGERROR(' disabled for security reasons. Script eval')
- LOGERROR(' failed.')
- return exitCode
-
- return SCRIPT_NO_ERROR
-
-
- # Implementing this method exactly as in the client because it looks like
- # there could be some subtleties with how it determines "true"
- def castToBool(self, binData):
- if isinstance(binData, int):
- binData = int_to_binary(binData)
-
- for i,byte in enumerate(binData):
- if not ord(byte) == 0:
- # This looks like it's assuming LE encoding (?)
- if (i == len(binData)-1) and (byte==0x80):
- return False
- return True
- return False
-
-
- def checkSig(self,binSig, binPubKey, txOutScript, txInTx, txInIndex, lastOpCodeSep=None):
- """
- Generic method for checking Bitcoin tx signatures. This needs to be used for both
- OP_CHECKSIG and OP_CHECKMULTISIG. Step 1 is to pop signature and public key off
- the stack, which must be done outside this method and passed in through the argument
- list. The remaining steps do not require access to the stack.
- """
-
- # 2. Subscript is from latest OP_CODESEPARATOR until end... if DNE, use whole script
- subscript = txOutScript
- if lastOpCodeSep:
- subscript = subscript[lastOpCodeSep:]
-
- # 3. Signature is deleted from subscript
- # I'm not sure why this line is necessary - maybe for non-standard scripts?
- lengthInBinary = int_to_binary(len(binSig))
- subscript = subscript.replace( lengthInBinary + binSig, "")
-
- # 4. Hashtype is popped and stored
- hashtype = binary_to_int(binSig[-1])
- justSig = binSig[:-1]
-
- if not hashtype == 1:
- LOGERROR('Non-unity hashtypes not implemented yet! (hashtype = %d)', hashtype)
- assert(False)
-
- # 5. Make a copy of the transaction -- we will be hashing a modified version
- txCopy = PyTx().unserialize( txInTx.serialize() )
-
- # 6. Remove all OP_CODESEPARATORs
- subscript.replace( int_to_binary(OP_CODESEPARATOR), '')
-
- # 7. All the TxIn scripts in the copy are blanked (set to empty string)
- for txin in txCopy.inputs:
- txin.binScript = ''
-
- # 8. Script for the current input in the copy is set to subscript
- txCopy.inputs[txInIndex].binScript = subscript
-
- # 9. Prepare the signature and public key
- senderAddr = PyBtcAddress().createFromPublicKey(binPubKey)
- binHashCode = int_to_binary(hashtype, widthBytes=4)
- toHash = txCopy.serialize() + binHashCode
-
- # Hashes are computed as part of CppBlockUtils::CryptoECDSA methods
- ##hashToVerify = hash256(toHash)
- ##hashToVerify = binary_switchEndian(hashToVerify)
-
- # 10. Apply ECDSA signature verification
- if senderAddr.verifyDERSignature(toHash, justSig):
- return True
- else:
- return False
-
-
-
-
- def executeOpCode(self, opcode, scriptUnpacker, stack, stackAlt):
- """
- Executes the next OP_CODE given the current state of the stack(s)
- """
-
- # TODO: Gavin clarified the effects of OP_0, and OP_1-OP_16.
- # OP_0 puts an empty string onto the stack, which evaluateses to
- # false and is plugged into HASH160 as ''
- # OP_X puts a single byte onto the stack, 0x01 to 0x10
- #
- # I haven't implemented it this way yet, because I'm still missing
- # some details. Since this "works" for available scripts, I'm going
- # to leave it alone for now.
-
- ##########################################################################
- ##########################################################################
- ### This block produces very nice debugging output for script eval!
- #def pr(s):
- #if isinstance(s,int):
- #return str(s)
- #elif isinstance(s,str):
- #if len(s)>8:
- #return binary_to_hex(s)[:8]
- #else:
- #return binary_to_hex(s)
-
- #print ' '.join([pr(i) for i in stack])
- #print opnames[opcode][:12].ljust(12,' ') + ':',
- ##########################################################################
- ##########################################################################
-
-
- stackSizeAtLeast = lambda n: (len(self.stack) >= n)
-
- if opcode == OP_FALSE:
- stack.append(0)
- elif 0 < opcode < 76:
- stack.append(scriptUnpacker.get(BINARY_CHUNK, opcode))
- elif opcode == OP_PUSHDATA1:
- nBytes = scriptUnpacker.get(UINT8)
- stack.append(scriptUnpacker.get(BINARY_CHUNK, nBytes))
- elif opcode == OP_PUSHDATA2:
- nBytes = scriptUnpacker.get(UINT16)
- stack.append(scriptUnpacker.get(BINARY_CHUNK, nBytes))
- elif opcode == OP_PUSHDATA4:
- nBytes = scriptUnpacker.get(UINT32)
- stack.append(scriptUnpacker.get(BINARY_CHUNK, nBytes))
- elif opcode == OP_1NEGATE:
- stack.append(-1)
- elif opcode == OP_TRUE:
- stack.append(1)
- elif 81 < opcode < 97:
- stack.append(opcode-80)
- elif opcode == OP_NOP:
- pass
-
- # TODO: figure out the conditional op codes...
- elif opcode == OP_IF:
- return OP_NOT_IMPLEMENTED
- elif opcode == OP_NOTIF:
- return OP_NOT_IMPLEMENTED
- elif opcode == OP_ELSE:
- return OP_NOT_IMPLEMENTED
- elif opcode == OP_ENDIF:
- return OP_NOT_IMPLEMENTED
-
- elif opcode == OP_VERIFY:
- if not self.castToBool(stack.pop()):
- stack.append(0)
- return TX_INVALID
- elif opcode == OP_RETURN:
- return TX_INVALID
- elif opcode == OP_TOALTSTACK:
- stackAlt.append( stack.pop() )
- elif opcode == OP_FROMALTSTACK:
- stack.append( stackAlt.pop() )
-
- elif opcode == OP_IFDUP:
- # Looks like this method duplicates the top item if it's not zero
- if not stackSizeAtLeast(1): return SCRIPT_STACK_SIZE_ERROR
- if self.castToBool(stack[-1]):
- stack.append(stack[-1]);
-
- elif opcode == OP_DEPTH:
- stack.append( len(stack) )
- elif opcode == OP_DROP:
- stack.pop()
- elif opcode == OP_DUP:
- stack.append( stack[-1] )
- elif opcode == OP_NIP:
- if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
- del stack[-2]
- elif opcode == OP_OVER:
- if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
- stack.append(stack[-2])
- elif opcode == OP_PICK:
- n = stack.pop()
- if not stackSizeAtLeast(n): return SCRIPT_STACK_SIZE_ERROR
- stack.append(stack[-n])
- elif opcode == OP_ROLL:
- n = stack.pop()
- if not stackSizeAtLeast(n): return SCRIPT_STACK_SIZE_ERROR
- stack.append(stack[-(n+1)])
- del stack[-(n+2)]
- elif opcode == OP_ROT:
- if not stackSizeAtLeast(3): return SCRIPT_STACK_SIZE_ERROR
- stack.append( stack[-3] )
- del stack[-4]
- elif opcode == OP_SWAP:
- if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
- x2 = stack.pop()
- x1 = stack.pop()
- stack.extend([x2, x1])
- elif opcode == OP_TUCK:
- if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
- x2 = stack.pop()
- x1 = stack.pop()
- stack.extend([x2, x1, x2])
- elif opcode == OP_2DROP:
- if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
- stack.pop()
- stack.pop()
- elif opcode == OP_2DUP:
- if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
- stack.append( stack[-2] )
- stack.append( stack[-2] )
- elif opcode == OP_3DUP:
- if not stackSizeAtLeast(3): return SCRIPT_STACK_SIZE_ERROR
- stack.append( stack[-3] )
- stack.append( stack[-3] )
- stack.append( stack[-3] )
- elif opcode == OP_2OVER:
- if not stackSizeAtLeast(4): return SCRIPT_STACK_SIZE_ERROR
- stack.append( stack[-4] )
- stack.append( stack[-4] )
- elif opcode == OP_2ROT:
- if not stackSizeAtLeast(6): return SCRIPT_STACK_SIZE_ERROR
- stack.append( stack[-6] )
- stack.append( stack[-6] )
- elif opcode == OP_2SWAP:
- if not stackSizeAtLeast(4): return SCRIPT_STACK_SIZE_ERROR
- x4 = stack.pop()
- x3 = stack.pop()
- x2 = stack.pop()
- x1 = stack.pop()
- stack.extend( [x3, x4, x1, x2] )
- elif opcode == OP_CAT:
- return OP_DISABLED
- elif opcode == OP_SUBSTR:
- return OP_DISABLED
- elif opcode == OP_LEFT:
- return OP_DISABLED
- elif opcode == OP_RIGHT:
- return OP_DISABLED
- elif opcode == OP_SIZE:
- if isinstance(stack[-1], int):
- stack.append(0)
- else:
- stack.append( len(stack[-1]) )
- elif opcode == OP_INVERT:
- return OP_DISABLED
- elif opcode == OP_AND:
- return OP_DISABLED
- elif opcode == OP_OR:
- return OP_DISABLED
- elif opcode == OP_XOR:
- return OP_DISABLED
- elif opcode == OP_EQUAL:
- x1 = stack.pop()
- x2 = stack.pop()
- stack.append( 1 if x1==x2 else 0 )
- elif opcode == OP_EQUALVERIFY:
- x1 = stack.pop()
- x2 = stack.pop()
- if not x1==x2:
- stack.append(0)
- return TX_INVALID
-
-
- elif opcode == OP_1ADD:
- stack[-1] += 1
- elif opcode == OP_1SUB:
- stack[-1] -= 1
- elif opcode == OP_2MUL:
- stack[-1] *= 2
- return OP_DISABLED
- elif opcode == OP_2DIV:
- stack[-1] /= 2
- return OP_DISABLED
- elif opcode == OP_NEGATE:
- stack[-1] *= -1
- elif opcode == OP_ABS:
- stack[-1] = abs(stack[-1])
- elif opcode == OP_NOT:
- top = stack.pop()
- if top==0:
- stack.append(1)
- else:
- stack.append(0)
- elif opcode == OP_0NOTEQUAL:
- top = stack.pop()
- if top==0:
- stack.append(0)
- else:
- stack.append(1)
- top = stack.pop()
- if top==0:
- stack.append(1)
- else:
- stack.append(0)
- elif opcode == OP_ADD:
- b = stack.pop()
- a = stack.pop()
- stack.append(a+b)
- elif opcode == OP_SUB:
- b = stack.pop()
- a = stack.pop()
- stack.append(a-b)
- elif opcode == OP_MUL:
- return OP_DISABLED
- elif opcode == OP_DIV:
- return OP_DISABLED
- elif opcode == OP_MOD:
- return OP_DISABLED
- elif opcode == OP_LSHIFT:
- return OP_DISABLED
- elif opcode == OP_RSHIFT:
- return OP_DISABLED
- elif opcode == OP_BOOLAND:
- b = stack.pop()
- a = stack.pop()
- if (not a==0) and (not b==0):
- stack.append(1)
- else:
- stack.append(0)
- elif opcode == OP_BOOLOR:
- b = stack.pop()
- a = stack.pop()
- stack.append( 1 if (self.castToBool(a) or self.castToBool(b)) else 0 )
- elif opcode == OP_NUMEQUAL:
- b = stack.pop()
- a = stack.pop()
- stack.append( 1 if a==b else 0 )
- elif opcode == OP_NUMEQUALVERIFY:
- b = stack.pop()
- a = stack.pop()
- if not a==b:
- stack.append(0)
- return TX_INVALID
- elif opcode == OP_NUMNOTEQUAL:
- b = stack.pop()
- a = stack.pop()
- stack.append( 1 if not a==b else 0 )
- elif opcode == OP_LESSTHAN:
- b = stack.pop()
- a = stack.pop()
- stack.append( 1 if ab else 0)
- elif opcode == OP_LESSTHANOREQUAL:
- b = stack.pop()
- a = stack.pop()
- stack.append( 1 if a<=b else 0)
- elif opcode == OP_GREATERTHANOREQUAL:
- b = stack.pop()
- a = stack.pop()
- stack.append( 1 if a>=b else 0)
- elif opcode == OP_MIN:
- b = stack.pop()
- a = stack.pop()
- stack.append( min(a,b) )
- elif opcode == OP_MAX:
- b = stack.pop()
- a = stack.pop()
- stack.append( max(a,b) )
- elif opcode == OP_WITHIN:
- xmax = stack.pop()
- xmin = stack.pop()
- x = stack.pop()
- stack.append( 1 if (xmin <= x < xmax) else 0 )
-
- elif opcode == OP_RIPEMD160:
- bits = stack.pop()
- stack.append( ripemd160(bits) )
- elif opcode == OP_SHA1:
- bits = stack.pop()
- stack.append( sha1(bits) )
- elif opcode == OP_SHA256:
- bits = stack.pop()
- stack.append( sha256(bits) )
- elif opcode == OP_HASH160:
- bits = stack.pop()
- if isinstance(bits, int):
- bits = ''
- stack.append( hash160(bits) )
- elif opcode == OP_HASH256:
- bits = stack.pop()
- if isinstance(bits, int):
- bits = ''
- stack.append( sha256(sha256(bits) ) )
- elif opcode == OP_CODESEPARATOR:
- self.lastOpCodeSepPos = scriptUnpacker.getPosition()
- elif opcode == OP_CHECKSIG or opcode == OP_CHECKSIGVERIFY:
-
- # 1. Pop key and sig from the stack
- binPubKey = stack.pop()
- binSig = stack.pop()
-
- # 2-10. encapsulated in sep method so CheckMultiSig can use it too
- txIsValid = self.checkSig( binSig, \
- binPubKey, \
- scriptUnpacker.getBinaryString(), \
- self.txNew, \
- self.txInIndex, \
- self.lastOpCodeSepPos)
- stack.append(1 if txIsValid else 0)
- if opcode==OP_CHECKSIGVERIFY:
- verifyCode = self.executeOpCode(OP_VERIFY)
- if verifyCode == TX_INVALID:
- return TX_INVALID
-
- elif opcode == OP_CHECKMULTISIG or opcode == OP_CHECKMULTISIGVERIFY:
- # OP_CHECKMULTISIG procedure ported directly from Satoshi client code
- # Location: bitcoin-0.4.0-linux/src/src/script.cpp:775
- i=1
- if len(stack) < i:
- return TX_INVALID
-
- nKeys = int(stack[-i])
- if nKeys < 0 or nKeys > 20:
- return TX_INVALID
-
- i += 1
- iKey = i
- i += nKeys
- if len(stack) < i:
- return TX_INVALID
-
- nSigs = int(stack[-i])
- if nSigs < 0 or nSigs > nKeys:
- return TX_INVALID
-
- iSig = i
- i += 1
- i += nSigs
- if len(stack) < i:
- return TX_INVALID
-
- stack.pop()
-
- # Apply the ECDSA verification to each of the supplied Sig-Key-pairs
- enoughSigsMatch = True
- while enoughSigsMatch and nSigs > 0:
- binSig = stack[-iSig]
- binKey = stack[-iKey]
-
- if( self.checkSig(binSig, \
- binKey, \
- scriptUnpacker.getBinaryString(), \
- self.txNew, \
- self.txInIndex, \
- self.lastOpCodeSepPos) ):
- iSig += 1
- nSigs -= 1
-
- iKey +=1
- nKeys -=1
-
- if(nSigs > nKeys):
- enoughSigsMatch = False
-
- # Now pop the things off the stack, we only accessed in-place before
- while i > 1:
- i -= 1
- stack.pop()
-
-
- stack.append(1 if enoughSigsMatch else 0)
-
- if opcode==OP_CHECKMULTISIGVERIFY:
- verifyCode = self.executeOpCode(OP_VERIFY)
- if verifyCode == TX_INVALID:
- return TX_INVALID
-
- else:
- return SCRIPT_ERROR
-
- return SCRIPT_NO_ERROR
-
-
-
-################################################################################
-#def getUnspentTxOutsForAddrList(addr160List, utxoType='Sweep', startBlk=-1, \
-def getUnspentTxOutsForAddr160List(addr160List, utxoType='Sweep', startBlk=-1, \
- abortIfBDMBusy=False):
- """
-
- You have a list of addresses (or just one) and you want to get all the
- unspent TxOuts for it. This can either be for computing its balance, or
- for sweeping the address(es).
-
- This will return a list of pairs of [addr160, utxoObj]
- This isn't the most efficient method for producing the pairs
-
- NOTE: At the moment, this only gets STANDARD TxOuts... non-std uses
- a different BDM call
-
- This method will return null output if the BDM is currently in the
- middle of a scan. You can use waitAsLongAsNecessary=True if you
- want to wait for the previous scan AND the next scan. Otherwise,
- you can check for bal==-1 and then try again later...
-
- Multi-threading update:
-
- This one-stop-shop method has to be blocking. Instead, you might want
- to register the address and rescan asynchronously, skipping this method
- entirely:
-
- cppWlt = Cpp.BtcWallet()
- cppWlt.addScrAddress_1_(Hash160ToScrAddr(self.getAddr160()))
- TheBDM.registerScrAddr(Hash160ToScrAddr(self.getAddr160()))
- TheBDM.rescanBlockchain(wait=False)
-
- <... do some other stuff ...>
-
- if TheBDM.getBDMState()=='BlockchainReady':
- TheBDM.updateWalletsAfterScan(wait=True) # fast after a rescan
- bal = cppWlt.getBalance('Spendable')
- utxoList = cppWlt.getUnspentTxOutList()
- else:
- <...come back later...>
- """
- if TheBDM.getBDMState()=='BlockchainReady' or \
- (TheBDM.isScanning() and not abortIfBDMBusy):
- if not isinstance(addr160List, (list,tuple)):
- addr160List = [addr160List]
-
- cppWlt = Cpp.BtcWallet()
- for addr in addr160List:
- if isinstance(addr, PyBtcAddress):
- cppWlt.addScrAddress_1_(Hash160ToScrAddr(addr.getAddr160()))
- else:
- cppWlt.addScrAddress_1_(Hash160ToScrAddr(addr))
-
- TheBDM.registerWallet(cppWlt)
- currBlk = TheBDM.getTopBlockHeight()
- TheBDM.scanBlockchainForTx(cppWlt, currBlk+1 if startBlk==-1 else startBlk)
- #TheBDM.scanRegisteredTxForWallet(cppWlt, currBlk+1 if startBlk==-1 else startBlk)
-
- if utxoType.lower() in ('sweep','unspent','full','all','ultimate'):
- return cppWlt.getFullTxOutList(currBlk)
- elif utxoType.lower() in ('spend','spendable','confirmed'):
- return cppWlt.getSpendableTxOutList(currBlk)
- else:
- raise TypeError, 'Unknown utxoType!'
- else:
- return []
-
-
-
-################################################################################
-# NOTE: This method was actually used to create the Blockchain-reorg unit-
-# test, and hence why coinbase transactions are supported. However,
-# for normal transactions supported by PyBtcEngine, this support is
-# unnecessary.
-#
-# Additionally, this method both creates and signs the tx: however
-# PyBtcEngine employs TxDistProposals which require the construction
-# and signing to be two separate steps. This method is not suited
-# for most of the armoryengine CONOPS.
-#
-# On the other hand, this method DOES work, and there is no reason
-# not to use it if you already have PyBtcAddress-w-PrivKeys avail
-# and have a list of inputs and outputs as described below.
-#
-# This method will take an already-selected set of TxOuts, along with
-# PyBtcAddress objects containing necessary the private keys
-#
-# Src TxOut ~ {PyBtcAddr, PrevTx, PrevTxOutIdx} --OR-- COINBASE = -1
-# Dst TxOut ~ {PyBtcAddr, value}
-#
-# Of course, we usually don't have the private keys of the dst addrs...
-#
-def PyCreateAndSignTx(srcTxOuts, dstAddrsVals):
- newTx = PyTx()
- newTx.version = 1
- newTx.lockTime = 0
- newTx.inputs = []
- newTx.outputs = []
-
-
- numInputs = len(srcTxOuts)
- numOutputs = len(dstAddrsVals)
-
- coinbaseTx = False
- if numInputs==1 and srcTxOuts[0] == -1:
- coinbaseTx = True
-
-
- #############################
- # Fill in TxOuts first
- for i in range(numOutputs):
- txout = PyTxOut()
- txout.value = dstAddrsVals[i][1]
- dstAddr = dstAddrsVals[i][0]
- if(coinbaseTx):
- txout.binScript = ''.join([ '\x41', \
- dstAddr.binPublicKey65.toBinStr(), \
- getOpCode('OP_CHECKSIG' )])
- else:
- txout.binScript = ''.join([ getOpCode('OP_DUP' ), \
- getOpCode('OP_HASH160' ), \
- '\x14', \
- dstAddr.getAddr160(), \
- getOpCode('OP_EQUALVERIFY'), \
- getOpCode('OP_CHECKSIG' )])
-
- newTx.outputs.append(txout)
-
-
- #############################
- # Create temp TxIns with blank scripts
- for i in range(numInputs):
- txin = PyTxIn()
- txin.outpoint = PyOutPoint()
- if(coinbaseTx):
- txin.outpoint.txHash = '\x00'*32
- txin.outpoint.txOutIndex = binary_to_int('\xff'*4)
- else:
- txin.outpoint.txHash = hash256(srcTxOuts[i][1].serialize())
- txin.outpoint.txOutIndex = srcTxOuts[i][2]
- txin.binScript = ''
- txin.intSeq = 2**32-1
- newTx.inputs.append(txin)
-
-
- #############################
- # Now we apply the ultra-complicated signature procedure
- # We need a copy of the Tx with all the txin scripts blanked out
- txCopySerialized = newTx.serialize()
- for i in range(numInputs):
- if coinbaseTx:
- pass
- else:
- txCopy = PyTx().unserialize(txCopySerialized)
- srcAddr = srcTxOuts[i][0]
- txoutIdx = srcTxOuts[i][2]
- prevTxOut = srcTxOuts[i][1].outputs[txoutIdx]
- binToSign = ''
-
- assert(srcAddr.hasPrivKey())
-
- # Only implemented one type of hashing: SIGHASH_ALL
- hashType = 1 # SIGHASH_ALL
- hashCode1 = int_to_binary(1, widthBytes=1)
- hashCode4 = int_to_binary(1, widthBytes=4)
-
- # Copy the script of the TxOut we're spending, into the txIn script
- txCopy.inputs[i].binScript = prevTxOut.binScript
- preHashMsg = txCopy.serialize() + hashCode4
-
- # CppBlockUtils::CryptoECDSA modules do the hashing for us
- ##binToSign = hash256(preHashMsg)
- ##binToSign = binary_switchEndian(binToSign)
-
- signature = srcAddr.generateDERSignature(preHashMsg)
-
-
- # If we are spending a Coinbase-TxOut, only need sig, no pubkey
- # Don't forget to tack on the one-byte hashcode and consider it part of sig
- if len(prevTxOut.binScript) > 30:
- sigLenInBinary = int_to_binary(len(signature) + 1)
- newTx.inputs[i].binScript = sigLenInBinary + signature + hashCode1
- else:
- pubkey = srcAddr.binPublicKey65.toBinStr()
- sigLenInBinary = int_to_binary(len(signature) + 1)
- pubkeyLenInBinary = int_to_binary(len(pubkey) )
- newTx.inputs[i].binScript = sigLenInBinary + signature + hashCode1 + \
- pubkeyLenInBinary + pubkey
-
- #############################
- # Finally, our tx is complete!
- return newTx
-
-
-
-################################################################################
-################################################################################
-#
-# SelectCoins algorithms
-#
-# The following methods define multiple ways that one could select coins
-# for a given transaction. However, the "best" solution is extremely
-# dependent on the variety of unspent outputs, and also the preferences
-# of the user. Things to take into account when selecting coins:
-#
-# - Number of inputs: If we have a lot of inputs in this transaction
-# from different addresses, then all those addresses
-# have now been linked together. We want to use
-# as few outputs as possible
-#
-# - Tx Fess/Size: The bigger the transaction, in bytes, the more
-# fee we're going to have to pay to the miners
-#
-# - Priority: Low-priority transactions might require higher
-# fees and/or take longer to make it into the
-# blockchain. Priority is the sum of TxOut
-# priorities: (NumConfirm * NumBTC / SizeKB)
-# We especially want to avoid 0-confirmation txs
-#
-# - Output values: In almost every transaction, we must return
-# change to ourselves. This means there will
-# be two outputs, one to the recipient, one to
-# us. We prefer that both outputs be about the
-# same size, so that it's not clear which is the
-# recipient, which is the change. But we don't
-# want to use too many inputs to do this.
-#
-# - Sustainability: We should pick a strategy that tends to leave our
-# wallet containing a variety of TxOuts that are
-# well-suited for future transactions to benefit.
-# For instance, always favoring the single TxOut
-# with a value close to the target, will result
-# in a future wallet full of tiny TxOuts. This
-# guarantees that in the future, we're going to
-# have to do 10+ inputs for a single Tx.
-#
-#
-# The strategy is to execute a half dozen different types of SelectCoins
-# algorithms, each with a different goal in mind. Then we examine each
-# of the results and evaluate a "select-score." Use the one with the
-# best score. In the future, we could make the scoring algorithm based
-# on user preferences. We expect that depending on what the availble
-# list looks like, some of these algorithms could produce perfect results,
-# and in other instances *terrible* results.
-#
-################################################################################
-################################################################################
-
-################################################################################
-# These would normally be defined by C++ and fed in, but I've recreated
-# the C++ class here... it's really just a container, anyway
-#
-# TODO: LevelDB upgrade: had to upgrade this class to use arbitrary
-# ScrAddress "notation", even though everything else on the python
-# side expects pure hash160 values. For now, it looks like it can
-# handle arbitrary scripts, but the CheckHash160() calls will
-# (correctly) throw errors if you don't. We can upgrade this in
-# the future.
-class PyUnspentTxOut(object):
- def __init__(self, scrAddr='', val=-1, numConf=-1):
- pass
- #self.scrAddr = scrAddr
- #self.val = long(val*ONE_BTC)
- #self.conf = numConf
- def createFromCppUtxo(self, cppUtxo):
- self.scrAddr = cppUtxo.getRecipientScrAddr()
- self.val = cppUtxo.getValue()
- self.conf = cppUtxo.getNumConfirm()
- # For now, this will throw errors unless we always use hash160 scraddrs
- self.binScript = '\x76\xa9\x14' + CheckHash160(self.scrAddr) + '\x88\xac'
- self.txHash = cppUtxo.getTxHash()
- self.txOutIndex = cppUtxo.getTxOutIndex()
- return self
- def getTxHash(self):
- return self.txHash
- def getTxOutIndex(self):
- return self.txOutIndex
- def getValue(self):
- return self.val
- def getNumConfirm(self):
- return self.conf
- def getScript(self):
- return self.binScript
- def getRecipientScrAddr(self):
- return self.scrAddr
- def getRecipientHash160(self):
- return CheckHash160(self.scrAddr)
- def prettyStr(self, indent=''):
- pstr = [indent]
- pstr.append(binary_to_hex(self.scrAddr[:8]))
- pstr.append(coin2str(self.val))
- pstr.append(str(self.conf).rjust(8,' '))
- return ' '.join(pstr)
- def pprint(self, indent=''):
- print self.prettyStr(indent)
-
-
-################################################################################
-def sumTxOutList(txoutList):
- return sum([u.getValue() for u in txoutList])
-
-################################################################################
-# This is really just for viewing a TxOut list -- usually for debugging
-def pprintUnspentTxOutList(utxoList, headerLine='Coin Selection: '):
- totalSum = sum([u.getValue() for u in utxoList])
- print headerLine, '(Total = %s BTC)' % coin2str(totalSum)
- print ' ','Owner Address'.ljust(34),
- print ' ','TxOutValue'.rjust(18),
- print ' ','NumConf'.rjust(8),
- print ' ','PriorityFactor'.rjust(16)
- for utxo in utxoList:
- a160 = CheckHash160(utxo.getRecipientScrAddr())
- print ' ',hash160_to_addrStr(a160).ljust(34),
- print ' ',(coin2str(utxo.getValue()) + ' BTC').rjust(18),
- print ' ',str(utxo.getNumConfirm()).rjust(8),
- print ' ', ('%0.2f' % (utxo.getValue()*utxo.getNumConfirm()/(ONE_BTC*144.))).rjust(16)
-
-
-################################################################################
-# Sorting currently implemented in C++, but we implement a different kind, here
-def PySortCoins(unspentTxOutInfo, sortMethod=1):
- """
- Here we define a few different ways to sort a list of unspent TxOut objects.
- Most of them are simple, some are more complex. In particular, the last
- method (4) tries to be intelligent, by grouping together inputs from the
- same address.
-
- The goal is not to do the heavy lifting for SelectCoins... we simply need
- a few different ways to sort coins so that the SelectCoins algorithms has
- a variety of different inputs to play with. Each sorting method is useful
- for some types of unspent-TxOut lists, so as long as we have one good
- sort, the PyEvalCoinSelect method will pick it out.
-
- As a precaution we send all the zero-confirmation UTXO's to the back
- of the list, so that they will only be used if absolutely necessary.
- """
- zeroConfirm = []
-
- if sortMethod==0:
- priorityFn = lambda a: a.getValue() * a.getNumConfirm()
- return sorted(unspentTxOutInfo, key=priorityFn, reverse=True)
- if sortMethod==1:
- priorityFn = lambda a: (a.getValue() * a.getNumConfirm())**(1/3.)
- return sorted(unspentTxOutInfo, key=priorityFn, reverse=True)
- if sortMethod==2:
- priorityFn = lambda a: (math.log(a.getValue()*a.getNumConfirm()+1)+4)**4
- return sorted(unspentTxOutInfo, key=priorityFn, reverse=True)
- if sortMethod==3:
- priorityFn = lambda a: a.getValue() if a.getNumConfirm()>0 else 0
- return sorted(unspentTxOutInfo, key=priorityFn, reverse=True)
- if sortMethod==4:
- addrMap = {}
- zeroConfirm = []
- for utxo in unspentTxOutInfo:
- if utxo.getNumConfirm() == 0:
- zeroConfirm.append(utxo)
- else:
- addr = TxOutScriptExtractAddr160(utxo.getScript())
- if not addrMap.has_key(addr):
- addrMap[addr] = [utxo]
- else:
- addrMap[addr].append(utxo)
-
- priorityUTXO = (lambda a: (a.getNumConfirm()*a.getValue()**0.333))
- for addr,txoutList in addrMap.iteritems():
- txoutList.sort(key=priorityUTXO, reverse=True)
-
- priorityGrp = lambda a: max([priorityUTXO(utxo) for utxo in a])
- finalSortedList = []
- for utxo in sorted(addrMap.values(), key=priorityGrp, reverse=True):
- finalSortedList.extend(utxo)
-
- finalSortedList.extend(zeroConfirm)
- return finalSortedList
- if sortMethod in (5, 6, 7):
- utxoSorted = PySortCoins(unspentTxOutInfo, 1)
- # Rotate the top 1,2 or 3 elements to the bottom of the list
- for i in range(sortMethod-4):
- utxoSorted.append(utxoSorted[0])
- del utxoSorted[0]
- return utxoSorted
-
- # TODO: Add a semi-random sort method: it will favor putting high-priority
- # outputs at the front of the list, but will not be deterministic
- # This should give us some high-fitness variation compared to sorting
- # uniformly
- if sortMethod==8:
- utxosNoZC = filter(lambda a: a.getNumConfirm()!=0, unspentTxOutInfo)
- random.shuffle(utxosNoZC)
- utxosNoZC.extend(filter(lambda a: a.getNumConfirm()==0, unspentTxOutInfo))
- return utxosNoZC
- if sortMethod==9:
- utxoSorted = PySortCoins(unspentTxOutInfo, 1)
- sz = len(filter(lambda a: a.getNumConfirm()!=0, utxoSorted))
- # swap 1/3 of the values at random
- topsz = int(min(max(round(sz/3), 5), sz))
- for i in range(topsz):
- pick1 = int(random.uniform(0,topsz))
- pick2 = int(random.uniform(0,sz-topsz))
- utxoSorted[pick1], utxoSorted[pick2] = utxoSorted[pick2], utxoSorted[pick1]
- return utxoSorted
-
-
-
-
-################################################################################
-# Now we try half a dozen different selection algorithms
-################################################################################
-
-
-
-################################################################################
-def PySelectCoins_SingleInput_SingleValue( \
- unspentTxOutInfo, targetOutVal, minFee=0):
- """
- This method should usually be called with a small number added to target val
- so that a tx can be constructed that has room for user to add some extra fee
- if necessary.
-
- However, we must also try calling it with the exact value, in case the user
- is trying to spend exactly their remaining balance.
- """
- target = targetOutVal + minFee
- bestMatchVal = 2**64
- bestMatchUtxo = None
- for utxo in unspentTxOutInfo:
- if target <= utxo.getValue() < bestMatchVal:
- bestMatchVal = utxo.getValue()
- bestMatchUtxo = utxo
-
- closeness = bestMatchVal - target
- if 0 < closeness <= CENT:
- # If we're going to have a change output, make sure it's above CENT
- # to avoid a mandatory fee
- try2Val = 2**64
- try2Utxo = None
- for utxo in unspentTxOutInfo:
- if target+CENT < utxo.getValue() < try2Val:
- try2Val = utxo.getValue()
- try2Val = utxo
- if not try2Utxo==None:
- bestMatchUtxo = try2Utxo
-
-
- if bestMatchUtxo==None:
- return []
- else:
- return [bestMatchUtxo]
-
-################################################################################
-def PySelectCoins_MultiInput_SingleValue( \
- unspentTxOutInfo, targetOutVal, minFee=0):
- """
- This method should usually be called with a small number added to target val
- so that a tx can be constructed that has room for user to add some extra fee
- if necessary.
-
- However, we must also try calling it with the exact value, in case the user
- is trying to spend exactly their remaining balance.
- """
- target = targetOutVal + minFee
- outList = []
- sumVal = 0
- for utxo in unspentTxOutInfo:
- sumVal += utxo.getValue()
- outList.append(utxo)
- if sumVal>=target:
- break
-
- return outList
-
-
-
-################################################################################
-def PySelectCoins_SingleInput_DoubleValue( \
- unspentTxOutInfo, targetOutVal, minFee=0):
- """
- We will look for a single input that is within 30% of the target
- In case the tx value is tiny rel to the fee: the minTarget calc
- may fail to exceed the actual tx size needed, so we add an extra
-
- We restrain the search to 25%. If there is no one output in this
- range, then we will return nothing, and the SingleInput_SingleValue
- method might return a usable result
- """
- idealTarget = 2*targetOutVal + minFee
-
- # check to make sure we're accumulating enough
- minTarget = long(0.75 * idealTarget)
- minTarget = max(minTarget, targetOutVal+minFee)
- maxTarget = long(1.25 * idealTarget)
-
- if sum([u.getValue() for u in unspentTxOutInfo]) < minTarget:
- return []
-
- bestMatch = 2**64-1
- bestUTXO = None
- for txout in unspentTxOutInfo:
- if minTarget <= txout.getValue() <= maxTarget:
- if abs(txout.getValue()-idealTarget) < bestMatch:
- bestMatch = abs(txout.getValue()-idealTarget)
- bestUTXO = txout
-
- if bestUTXO==None:
- return []
- else:
- return [bestUTXO]
-
-################################################################################
-def PySelectCoins_MultiInput_DoubleValue( \
- unspentTxOutInfo, targetOutVal, minFee=0):
-
- idealTarget = 2.0 * targetOutVal
- minTarget = long(0.80 * idealTarget)
- minTarget = max(minTarget, targetOutVal+minFee)
- if sum([u.getValue() for u in unspentTxOutInfo]) < minTarget:
- return []
-
- outList = []
- lastDiff = 2**64-1
- sumVal = 0
- for utxo in unspentTxOutInfo:
- sumVal += utxo.getValue()
- outList.append(utxo)
- currDiff = abs(sumVal - idealTarget)
- # should switch from decreasing to increasing when best match
- if sumVal>=minTarget and currDiff>lastDiff:
- del outList[-1]
- break
- lastDiff = currDiff
-
- return outList
-
-
-
-
-################################################################################
-def getSelectCoinsScores(utxoSelectList, targetOutVal, minFee):
- """
- Define a metric for scoring the output of SelectCoints. The output of
- this method is a tuple of scores which identify a few different factors
- of a txOut selection that users might care about in a selectCoins algorithm.
-
- This method only returns an absolute score, usually between 0 and 1 for
- each factor. It is up to the person calling this method to decide how
- much "weight" they want to give each one. You could even use the scores
- as multiplicative factors if you wanted, though they were designed with
- the following equation in mind: finalScore = sum(WEIGHT[i] * SCORE[i])
-
- TODO: I need to recalibrate some of these factors, and modify them to
- represent more directly what the user would be concerned about --
- such as PayFeeFactor, AnonymityFactor, etc. The information is
- indirectly available with the current set of factors here
- """
-
- # Need to calculate how much the change will be returned to sender on this tx
- totalIn = sum([utxo.getValue() for utxo in utxoSelectList])
- totalChange = totalIn - (targetOutVal+minFee)
-
- # Abort if this is an empty list (negative score) or not enough coins
- if len(utxoSelectList)==0 or totalIn 0)
- #
- # On the other hand, if we have 1.832 and 10.00, and the 10.000 is the
- # change, we don't really care that they're not close, it's still
- # damned good/deceptive output anonymity (so: only execute
- # the following block if outAnonFactor <= 1)
- if 0 < outAnonFactor <= 1 and not totalChange==0:
- outValDiff = abs(totalChange - targetOutVal)
- diffPct = (outValDiff / max(totalChange, targetOutVal))
- if diffPct < 0.20:
- outAnonFactor *= 1
- elif diffPct < 0.50:
- outAnonFactor *= 0.7
- elif diffPct < 1.0:
- outAnonFactor *= 0.3
- else:
- outAnonFactor = 0
-
-
- ##################
- # Tx size: we don't have signatures yet, but we assume that each txin is
- # about 180 Bytes, TxOuts are 35, and 10 other bytes in the Tx
- numBytes = 10
- numBytes += 180 * len(utxoSelectList)
- numBytes += 35 * (1 if totalChange==0 else 2)
- txSizeFactor = 0
- numKb = int(numBytes / 1000)
- # Will compute size factor after we see this tx priority and AllowFree
- # results. If the tx qualifies for free, we don't need to penalize
- # a 3 kB transaction vs one that is 0.5 kB
-
-
- ##################
- # Priority: If our priority is above the 1-btc-after-1-day threshold
- # then we might be allowed a free tx. But, if its priority
- # isn't much above this thresh, it might take a couple blocks
- # to be included
- dPriority = 0
- anyZeroConfirm = False
- for utxo in utxoSelectList:
- if utxo.getNumConfirm() == 0:
- anyZeroConfirm = True
- else:
- dPriority += utxo.getValue() * utxo.getNumConfirm()
-
- dPriority = dPriority / numBytes
- priorityThresh = ONE_BTC * 144 / 250
- if dPriority < priorityThresh:
- priorityFactor = 0
- elif dPriority < 10.0*priorityThresh:
- priorityFactor = 0.7
- elif dPriority < 100.0*priorityThresh:
- priorityFactor = 0.9
- else:
- priorityFactor = 1.0
-
-
- ##################
- # AllowFree: If three conditions are met, then the tx can be sent safely
- # without a tx fee. Granted, it may not be included in the
- # current block if the free space is full, but definitely in
- # the next one
- isFreeAllowed = 0
- haveDustOutputs = (0= priorityThresh and \
- numBytes <= 10000):
- isFreeAllowed = 1
-
-
- ##################
- # Finish size-factor calculation -- if free is allowed, kB is irrelevant
- txSizeFactor = 0
- if isFreeAllowed or numKb<1:
- txSizeFactor = 1
- else:
- if numKb < 2:
- txSizeFactor=0.2
- elif numKb<3:
- txSizeFactor=0.1
- elif numKb<4:
- txSizeFactor=0
- else:
- txSizeFactor=-1 #if this is huge, actually subtract score
-
- return (isFreeAllowed, noZeroConf, priorityFactor, numAddrFactor, txSizeFactor, outAnonFactor)
-
-
-################################################################################
-# We define default preferences for weightings. Weightings are used to
-# determine the "priorities" for ranking various SelectCoins results
-# By setting the weights to different orders of magnitude, you are essentially
-# defining a sort-order: order by FactorA, then sub-order by FactorB...
-################################################################################
-# TODO: ADJUST WEIGHTING!
-IDX_ALLOWFREE = 0
-IDX_NOZEROCONF = 1
-IDX_PRIORITY = 2
-IDX_NUMADDR = 3
-IDX_TXSIZE = 4
-IDX_OUTANONYM = 5
-WEIGHTS = [None]*6
-WEIGHTS[IDX_ALLOWFREE] = 100000
-WEIGHTS[IDX_NOZEROCONF] = 1000000 # let's avoid zero-conf if possible
-WEIGHTS[IDX_PRIORITY] = 50
-WEIGHTS[IDX_NUMADDR] = 100000
-WEIGHTS[IDX_TXSIZE] = 100
-WEIGHTS[IDX_OUTANONYM] = 30
-
-
-################################################################################
-def PyEvalCoinSelect(utxoSelectList, targetOutVal, minFee, weights=WEIGHTS):
- """
- Use a specified set of weightings and sub-scores for a unspentTxOut list,
- to assign an absolute "fitness" of this particular selection. The goal of
- getSelectCoinsScores() is to produce weighting-agnostic subscores -- then
- this method applies the weightings to these scores to get a final answer.
-
- If list A has a higher score than list B, then it's a better selection for
- that transaction. If you the two scores don't look right to you, then you
- probably just need to adjust the weightings to your liking.
-
- These weightings may become user-configurable in the future -- likely as an
- option of coin-selection profiles -- such as "max anonymity", "min fee",
- "balanced", etc).
- """
- scores = getSelectCoinsScores(utxoSelectList, targetOutVal, minFee)
- if scores==-1:
- return -1
-
- # Combine all the scores
- theScore = 0
- theScore += weights[IDX_NOZEROCONF] * scores[IDX_NOZEROCONF]
- theScore += weights[IDX_PRIORITY] * scores[IDX_PRIORITY]
- theScore += weights[IDX_NUMADDR] * scores[IDX_NUMADDR]
- theScore += weights[IDX_TXSIZE] * scores[IDX_TXSIZE]
- theScore += weights[IDX_OUTANONYM] * scores[IDX_OUTANONYM]
-
- # If we're already paying a fee, why bother including this weight?
- if minFee < 0.0005:
- theScore += weights[IDX_ALLOWFREE] * scores[IDX_ALLOWFREE]
-
- return theScore
-
-
-################################################################################
-def PySelectCoins(unspentTxOutInfo, targetOutVal, minFee=0, numRand=10, margin=CENT):
- """
- Intense algorithm for coin selection: computes about 30 different ways to
- select coins based on the desired target output and the min tx fee. Then
- ranks the various solutions and picks the best one
- """
-
- TimerStart('PySelectCoins')
-
- if sum([u.getValue() for u in unspentTxOutInfo]) < targetOutVal:
- return []
-
- targExact = targetOutVal
- targMargin = targetOutVal+margin
-
- selectLists = []
-
- # Start with the intelligent solutions with different sortings
- for sortMethod in range(8):
- diffSortList = PySortCoins(unspentTxOutInfo, sortMethod)
- selectLists.append(PySelectCoins_SingleInput_SingleValue( diffSortList, targExact, minFee ))
- selectLists.append(PySelectCoins_MultiInput_SingleValue( diffSortList, targExact, minFee ))
- selectLists.append(PySelectCoins_SingleInput_SingleValue( diffSortList, targMargin, minFee ))
- selectLists.append(PySelectCoins_MultiInput_SingleValue( diffSortList, targMargin, minFee ))
- selectLists.append(PySelectCoins_SingleInput_DoubleValue( diffSortList, targExact, minFee ))
- selectLists.append(PySelectCoins_MultiInput_DoubleValue( diffSortList, targExact, minFee ))
- selectLists.append(PySelectCoins_SingleInput_DoubleValue( diffSortList, targMargin, minFee ))
- selectLists.append(PySelectCoins_MultiInput_DoubleValue( diffSortList, targMargin, minFee ))
-
- # Throw in a couple random solutions, maybe we get lucky
- # But first, make a copy before in-place shuffling
- # NOTE: using list[:] like below, really causes a swig::vector to freak out!
- #utxos = unspentTxOutInfo[:]
- #utxos = list(unspentTxOutInfo)
- for method in range(8,10):
- for i in range(numRand):
- utxos = PySortCoins(unspentTxOutInfo, method)
- selectLists.append(PySelectCoins_MultiInput_SingleValue(utxos, targExact, minFee))
- selectLists.append(PySelectCoins_MultiInput_DoubleValue(utxos, targExact, minFee))
- selectLists.append(PySelectCoins_MultiInput_SingleValue(utxos, targMargin, minFee))
- selectLists.append(PySelectCoins_MultiInput_DoubleValue(utxos, targMargin, minFee))
-
- # Now we define PyEvalCoinSelect as our sorting metric, and find the best solution
- scoreFunc = lambda ulist: PyEvalCoinSelect(ulist, targetOutVal, minFee)
- finalSelection = max(selectLists, key=scoreFunc)
- SCORES = getSelectCoinsScores(finalSelection, targetOutVal, minFee)
- if len(finalSelection)==0:
- return []
-
- # If we selected a list that has only one or two inputs, and we have
- # other, tiny, unspent outputs from the same addresses, we should
- # throw one or two of them in to help clear them out. However, we
- # only do so if a plethora of conditions exist:
- #
- # First, we only consider doing this if the tx has <5 inputs already.
- # Also, we skip this process if the current tx doesn't have excessive
- # priority already -- we don't want to risk de-prioritizing a tx for
- # this purpose.
- #
- # Next we sort by LOWEST value, because we really benefit from this most
- # by clearing out tiny outputs. Along those lines, we don't even do
- # unless it has low priority -- don't want to take a high-priority utxo
- # and convert it to one that will be low-priority to start.
- #
- # Finally, we shouldn't do this if a high score was assigned to output
- # anonymity: this extra output may cause a tx with good output anonymity
- # to no longer possess this property
- IDEAL_NUM_INPUTS = 5
- if len(finalSelection) < IDEAL_NUM_INPUTS and \
- SCORES[IDX_OUTANONYM] == 0:
-
- utxoToHash160 = lambda a: CheckHash160(a.getRecipientScrAddr())
- getPriority = lambda a: a.getValue() * a.getNumConfirm()
- getUtxoID = lambda a: a.getTxHash() + int_to_binary(a.getTxOutIndex())
-
- alreadyUsedAddr = set( [utxoToHash160(utxo) for utxo in finalSelection] )
- utxoSmallToLarge = sorted(unspentTxOutInfo, key=getPriority)
- utxoSmToLgIDs = [getUtxoID(utxo) for utxo in utxoSmallToLarge]
- finalSelectIDs = [getUtxoID(utxo) for utxo in finalSelection]
-
- for other in utxoSmallToLarge:
-
- # Skip it if it is already selected
- if getUtxoID(other) in finalSelectIDs:
- continue
-
- # We only consider UTXOs that won't link any new addresses together
- if not utxoToHash160(other) in alreadyUsedAddr:
- continue
-
- # Avoid zero-conf inputs altogether
- if other.getNumConfirm() == 0:
- continue
-
- # Don't consider any inputs that are high priority already
- if getPriority(other) > ONE_BTC*144:
- continue
-
- finalSelection.append(other)
- if len(finalSelection)>=IDEAL_NUM_INPUTS:
- break
-
- TimerStop('PySelectCoins')
-
- return finalSelection
-
-
-def calcMinSuggestedFees(selectCoinsResult, targetOutVal, preSelectedFee):
- """
- Returns two fee options: one for relay, one for include-in-block.
- In general, relay fees are required to get your block propagated
- (since most nodes are Satoshi clients), but there's no guarantee
- it will be included in a block -- though I'm sure there's plenty
- of miners out there will include your tx for sub-standard fee.
- However, it's virtually guaranteed that a miner will accept a fee
- equal to the second return value from this method.
-
- We have to supply the fee that was used in the selection algorithm,
- so that we can figure out how much change there will be. Without
- this information, we might accidentally declare a tx to be freeAllow
- when it actually is not.
- """
-
- if len(selectCoinsResult)==0:
- return [-1,-1]
-
- paid = targetOutVal + preSelectedFee
- change = sum([u.getValue() for u in selectCoinsResult]) - paid
-
- # Calc approx tx size
- numBytes = 10
- numBytes += 180 * len(selectCoinsResult)
- numBytes += 35 * (1 if change==0 else 2)
- numKb = int(numBytes / 1000)
-
- if numKb>10:
- return [(1+numKb)*MIN_RELAY_TX_FEE, (1+numKb)*MIN_TX_FEE]
-
- # Compute raw priority of tx
- prioritySum = 0
- for utxo in selectCoinsResult:
- prioritySum += utxo.getValue() * utxo.getNumConfirm()
- prioritySum = prioritySum / numBytes
-
- # Any tiny/dust outputs?
- haveDustOutputs = (0= ONE_BTC * 144 / 250. and \
- numBytes < 10000):
- return [0,0]
-
- # This cannot be a free transaction.
- minFeeMultiplier = (1 + numKb)
-
- # At the moment this condition never triggers
- if minFeeMultiplier<1.0 and haveDustOutputs:
- minFeeMultiplier = 1.0
-
-
- return [minFeeMultiplier * MIN_RELAY_TX_FEE, \
- minFeeMultiplier * MIN_TX_FEE]
-
-
-
-
-
-
-################################################################################
-################################################################################
-# This class can be used for both multi-signature tx collection, as well as
-# offline wallet signing (you are collecting signatures for a 1-of-1 tx only
-# involving yourself).
-class PyTxDistProposal(object):
- """
- PyTxDistProposal is created from a PyTx object, and represents
- an unsigned transaction, that may require the signatures of
- multiple parties before being accepted by the network.
-
- This technique (https://en.bitcoin.it/wiki/BIP_0010) is that
- once TxDP is created, the system signing it only needs the
- ECDSA private keys and nothing else. This enables the device
- providing the signatures to be extremely lightweight, since it
- doesn't have to store the blockchain.
-
- For a given TxDP, we will be storing the following structure
- in memory. Use a 3-input tx as an example, with the first
- being a 2-of-3 multi-sig transaction (unsigned)
-
- self.scriptTypes = [TXOUT_SCRIPT_MULTISIG,
- TXOUT_SCRIPT_STANDARD,
- TXOUT_SCRIPT_STANDARD]
-
- self.inputValues = [ 2313000000,
- 400000000,
- 1000000000]
-
- self.signatures = [ ['', '', ''],
- [''],
- [''], ]
-
- self.inAddr20Lists = [ [addr1, addr2, addr3],
- [addr4]
- [addr5] ]
-
- # Usually only have public keys on multi-sig TxOuts
- self.inPubKeyLists = [ [pubKey1, pubKey2, pubKey3],
- ['']
- [''] ]
-
- self.numSigsNeeded = [ 2
- 1
- 1 ]
-
- self.relevantTxMap = [ prevTx0Hash: prevTx0.serialize(),
- prevTx1Hash: prevTx1.serialize(),
- prevTx2Hash: prevTx2.serialize() ]
-
- UPDATE Feb 2012: Before Jan 29, 2012, BIP 0010 used a different technique
- for communicating blockchain information to the offline
- device. This is no longer the case
-
- Gregory Maxwell identified a reasonable-enough security
- risk with the fact that previous BIP 0010 cannot guarantee
- validity of stated input values in a TxDP. This is solved
- by adding the supporting transactions to the TxDP, so that
- the signing device can get the input values from those
- tx and verify the hash matches the OutPoint on the tx
- being signed (which *is* part of what's being signed).
- The concern was that someone could manipulate your online
- computer to misrepresent the inputs, and cause you to
- send you entire wallet to tx-fees. Not the most useful
- attack (for someone trying to steal your coins), but it is
- still a risk that can be avoided by adding some "bloat" to
- the TxDP
-
-
-
- """
- #############################################################################
- def __init__(self, pytx=None, txMap={}):
- self.pytxObj = UNINITIALIZED
- self.uniqueB58 = ''
- self.scriptTypes = []
- self.signatures = []
- self.txOutScripts = []
- self.inAddr20Lists = []
- self.inPubKeyLists = []
- self.inputValues = []
- self.numSigsNeeded = []
- self.relevantTxMap = {} # needed to support input values of each TxIn
- if pytx:
- self.createFromPyTx(pytx, txMap)
-
- #############################################################################
- def createFromPyTx(self, pytx, txMap={}):
- sz = len(pytx.inputs)
- self.pytxObj = pytx.copy()
- self.uniqueB58 = binary_to_base58(hash256(pytx.serialize()))[:8]
- self.scriptTypes = []
- self.signatures = []
- self.txOutScripts = []
- self.inAddr20Lists = []
- self.inPubKeyLists = []
- self.inputValues = []
- self.numSigsNeeded = []
- self.relevantTxMap = {} # needed to support input values of each TxIn
-
- if len(txMap)==0 and not TheBDM.getBDMState()=='BlockchainReady':
- # TxDP includes the transactions that supply the inputs to this
- # transaction, so the BDM needs to be available to fetch those.
- raise BlockchainUnavailableError, ('Must input supporting transactions '
- 'or access to the blockchain, to '
- 'create the TxDP')
- for i in range(sz):
- # First, make sure that we have the previous Tx data available
- # We can't continue without it, since BIP 0010 will now require
- # the full tx of outputs being spent
- outpt = self.pytxObj.inputs[i].outpoint
- txhash = outpt.txHash
- txidx = outpt.txOutIndex
- pyPrevTx = None
- if len(txMap)>0:
- # If supplied a txMap, we expect it to have everything we need
- if not txMap.has_key(txhash):
- raise InvalidHashError, ('Could not find the referenced tx '
- 'in supplied txMap')
- pyPrevTx = txMap[txhash].copy()
- elif TheBDM.getBDMState()=='BlockchainReady':
- cppPrevTx = TheBDM.getTxByHash(txhash)
- if not cppPrevTx:
- raise InvalidHashError, 'Could not find the referenced tx'
- pyPrevTx = PyTx().unserialize(cppPrevTx.serialize())
- else:
- raise InvalidHashError, 'No previous-tx data available for TxDP'
- self.relevantTxMap[txhash] = pyPrevTx.copy()
-
-
- # Now we have the previous transaction. We need to pull the
- # script out of the specific TxOut so we know how it can be
- # spent.
- script = pyPrevTx.outputs[txidx].binScript
- value = pyPrevTx.outputs[txidx].value
- scrType = getTxOutScriptType(script)
-
- self.inputValues.append(value)
- self.txOutScripts.append(str(script)) # copy it
- self.scriptTypes.append(scrType)
- self.inAddr20Lists.append([])
- self.inPubKeyLists.append([])
- self.signatures.append([])
- if scrType in (TXOUT_SCRIPT_STANDARD, TXOUT_SCRIPT_COINBASE):
- self.inAddr20Lists[-1].append(TxOutScriptExtractAddr160(script))
- self.inPubKeyLists[-1].append('')
- self.signatures[-1].append('')
- self.numSigsNeeded.append(1)
- elif scrType==TXOUT_SCRIPT_MULTISIG:
- mstype, addrs, pubs = getTxOutMultiSigInfo(script)
- self.inAddr20Lists[-1] = addrs
- self.inPubKeyLists[-1] = pubs
- self.signatures[-1] = ['']*len(addrs)
- self.numSigsNeeded[-1] = mstype[0] # mstype for M-of-N tx is (M,N)
- elif scrType in (TXOUT_SCRIPT_OP_EVAL, TXOUT_SCRIPT_UNKNOWN):
- pass
-
- return self
-
-
- #############################################################################
- def createFromTxOutSelection(self, utxoSelection, recip160ValPairs, txMap={}):
- """
- This creates a TxDP for a standard transaction from a list of inputs and
- a list of recipient-value-pairs.
-
- NOTE: I have modified this so that if the "recip" is not a 20-byte binary
- string, it is instead interpretted as a SCRIPT -- which could be
- anything, including a multi-signature transaction
- """
-
- pprintUnspentTxOutList(utxoSelection)
- #print sumTxOutList(utxoSelection)
- #print sum([a[1] for a in recip160ValPairs])
- assert(sumTxOutList(utxoSelection) >= sum([a[1] for a in recip160ValPairs]))
- thePyTx = PyTx()
- thePyTx.version = 1
- thePyTx.lockTime = 0
- thePyTx.inputs = []
- thePyTx.outputs = []
-
- # We can prepare the outputs, first
- for recipObj,value in recip160ValPairs:
- txout = PyTxOut()
- txout.value = long(value)
-
- # Assume recipObj is either a PBA or a string
- if isinstance(recipObj, PyBtcAddress):
- recipObj = recipObj.getAddr160()
-
- # Now recipObj is def a string
- if len(recipObj)!=20:
- # If not an address, it's a full script
- txout.binScript = recipObj
- else:
- # Construct a std TxOut from addr160 str
- txout.binScript = ''.join([ getOpCode('OP_DUP' ), \
- getOpCode('OP_HASH160' ), \
- '\x14', \
- recipObj,
- getOpCode('OP_EQUALVERIFY'), \
- getOpCode('OP_CHECKSIG' )])
- thePyTx.outputs.append(txout)
-
- # Prepare the inputs based on the utxo objects
- for iin,utxo in enumerate(utxoSelection):
- # First, make sure that we have the previous Tx data available
- # We can't continue without it, since BIP 0010 will now require
- # the full tx of outputs being spent
- txin = PyTxIn()
- txin.outpoint = PyOutPoint()
- txin.binScript = ''
- txin.intSeq = 2**32-1
-
- txhash = utxo.getTxHash()
- txidx = utxo.getTxOutIndex()
- txin.outpoint.txHash = str(txhash)
- txin.outpoint.txOutIndex = txidx
- thePyTx.inputs.append(txin)
-
- return self.createFromPyTx(thePyTx, txMap)
-
-
-
- #############################################################################
- def appendSignature(self, binSig, txinIndex=None):
- """
- Use this to add a signature to the TxDP object in memory.
- """
- idx, pos, addr = self.processSignature(binSig, txinIndex, checkAllInputs=True)
- if addr:
- self.signatures[validIdx].append(binSig)
- return True
-
- return False
-
-
- #############################################################################
- def processSignature(self, sigStr, txinIdx, checkAllInputs=False):
- """
- For standard transaction types, the signature field is actually the raw
- script to be plugged into the final transaction that allows it to eval
- to true -- except for multi-sig transactions. We have to mess with the
- data a little bit if we want to use the script-processor to verify the
- signature. Instead, we will use the crypto ops directly.
-
- The return value is everything we need to know about this signature:
- -- TxIn-index: if checkAllInputs=True, we need to know which one worked
- -- Addr-position: for multi-sig tx, we need to know which addr it matches
- -- Addr160: address to which this signature corresponds
- """
-
- if txinIdx==None or txinIdx<0 or txinIdx>=len(self.pytxObj.inputs):
- pass
- else:
- scriptType = self.scriptTypes[txinIdx]
- txCopy = self.pytxObj.copy()
- if scriptType in (TXOUT_SCRIPT_STANDARD, TXOUT_SCRIPT_COINBASE):
- # For standard Tx types, sigStr is the full script itself (copy it)
- txCopy.inputs[txinIdx].binScript = str(sigStr)
- prevOutScript = str(self.txOutScripts[txinIdx])
- psp = PyScriptProcessor(prevOutScript, txCopy, txinIdx)
- if psp.verifyTransactionValid():
- return txinIdx, 0, TxOutScriptExtractAddr160(prevOutScript)
- elif scriptType == TXOUT_SCRIPT_MULTISIG:
- # For multi-sig, sigStr is the raw ECDSA sig ... we will have to
- # manually construct a tx that the script processor can check,
- # without the other signatures
- for i in range(len(txCopy.inputs)):
- if not i==idx:
- txCopy.inputs[i].binScript = ''
- else:
- txCopy.inputs[i].binScript = self.txOutScripts[i]
-
- hashCode = binary_to_int(sigStr[-1])
- hashCode4 = int_to_binary(hashcode, widthBytes=4)
- preHashMsg = txCopy.serialize() + hashCode4
- if not hashCode==1:
- raise NotImplementedError, 'Non-standard hashcodes not supported!'
-
- # Now check all public keys in the multi-sig TxOut script
- for i,pubkey in enumerate(self.inPubKeyLists):
- tempAddr = PyBtcAddress().createFromPublicKeyData(pubkey)
- if tempAddr.verifyDERSignature(preHashMsg, sigStr):
- return txInIdx, i, hash160(pubkey)
-
-
- if checkAllInputs:
- for i in range(len(self.pytxObj.inputs)):
- idx, pos, addr160 = self.processSignature(sigStr, i)
- if idx>0:
- return idx, pos, addr160
-
- return -1,-1,''
-
-
- #############################################################################
- def checkTxHasEnoughSignatures(self, alsoVerify=False):
- """
- This method only counts signatures, unless verify==True
- """
- for i in range(len(self.pytxObj.inputs)):
- numSigsHave = sum( [(1 if sig else 0) for sig in self.signatures[i]] )
- if numSigsHave 0:
- nextTx = PyTx().unserialize(binUnpacker)
- self.relevantTxMap[nextTx.getHash()] = nextTx
-
- for txin in targetTx.inputs:
- if not self.relevantTxMap.has_key(txin.outpoint.txHash):
- raise TxdpError, 'Not all inputs can be verified for TxDP. Aborting!'
-
- self.createFromPyTx( targetTx, self.relevantTxMap )
- numIn = len(self.pytxObj.inputs)
-
- # Do some sanity checks
- if not self.uniqueB58 == dpIdB58:
- raise UnserializeError, 'TxDP: Actual DPID does not match listed ID'
- if not MAGIC_BYTES==magic:
- raise NetworkIDError, 'TxDP is for diff blockchain! (%s)' % \
- BLOCKCHAINS[magic]
-
- # At this point, we should have a TxDP constructed, now we need to
- # simply scan the rest of the serialized structure looking for any
- # signatures that may be included
- while not 'END-TRANSACTION' in line:
- [iin, val] = line.split('_')[2:]
- iin = int(iin)
- self.inputValues[iin] = str2coin(val)
-
- line = nextLine(L)
- while '_SIG_' in line:
- addrB58, sz, sigszHex = line.split('_')[2:]
- sz = int(sz)
- sigsz = hex_to_int(sigszHex, endIn=BIGENDIAN)
- hexSig = ''
- line = nextLine(L)
- while (not '_SIG_' in line) and \
- (not 'TXINPUT' in line) and \
- (not 'END-TRANSACTION' in line):
- hexSig += line
- line = nextLine(L)
- binSig = hex_to_binary(hexSig)
- idx, sigOrder, addr160 = self.processSignature(binSig, iin)
- if idx == -1:
- LOGWARN('Invalid sig: Input %d, addr=%s' % (iin, addrB58))
- elif not hash160_to_addrStr(addr160)== addrB58:
- LOGERROR('Listed addr does not match computed addr')
- raise BadAddressError
- # If we got here, the signature is valid!
- self.signatures[iin][sigOrder] = binSig
-
- return self
-
-
-
- #############################################################################
- def pprint(self, indent=' '):
- tx = self.pytxObj
- propID = hash256(tx.serialize())
- print indent+'Distribution Proposal : ', binary_to_base58(propID)[:8]
- print indent+'Transaction Version : ', tx.version
- print indent+'Transaction Lock Time : ', tx.lockTime
- print indent+'Num Inputs : ', len(tx.inputs)
- for i,txin in enumerate(tx.inputs):
- prevHash = txin.outpoint.txHash
- prevIndex = txin.outpoint.txOutIndex
- #print ' PrevOut: (%s, index=%d)' % (binary_to_hex(prevHash[:8]),prevIndex),
- print indent*2 + 'Value: %s' % self.inputValues[i]
- print indent*2 + 'SrcScript: %s' % binary_to_hex(self.txOutScripts[i])
- for ns, sig in enumerate(self.signatures[i]):
- print indent*2 + 'Sig%d = "%s"'%(ns, binary_to_hex(sig))
- print indent+'Num Outputs : ', len(tx.outputs)
- for i,txout in enumerate(tx.outputs):
- print ' Recipient: %s BTC' % coin2str(txout.value),
- scrType = getTxOutScriptType(txout.binScript)
- if scrType in (TXOUT_SCRIPT_STANDARD, TXOUT_SCRIPT_COINBASE):
- print hash160_to_addrStr(TxOutScriptExtractAddr160(txout.binScript))
- elif scrType in (TXOUT_SCRIPT_MULTISIG,):
- mstype, addrs, pubs = getTxOutMultiSigInfo(txout.binScript)
- print 'MULTI-SIG-SCRIPT:%d-of-%d' % mstype
- for addr in addrs:
- print indent*2, hash160_to_addrStr(addr)
-
-
-
-# Random method for creating
-def touchFile(fname):
- try:
- os.utime(fname, None)
- except:
- f = open(fname, 'a')
- f.flush()
- os.fsync(f.fileno())
- f.close()
-
-BLOCKCHAIN_READONLY = 0
-BLOCKCHAIN_READWRITE = 1
-BLOCKCHAIN_DONOTUSE = 2
-
-WLT_UPDATE_ADD = 0
-WLT_UPDATE_MODIFY = 1
-
-WLT_DATATYPE_KEYDATA = 0
-WLT_DATATYPE_ADDRCOMMENT = 1
-WLT_DATATYPE_TXCOMMENT = 2
-WLT_DATATYPE_OPEVAL = 3
-WLT_DATATYPE_DELETED = 4
-
-DEFAULT_COMPUTE_TIME_TARGET = 0.25
-DEFAULT_MAXMEM_LIMIT = 32*1024*1024
-
-
-#############################################################################
-def DeriveChaincodeFromRootKey(sbdPrivKey):
- return SecureBinaryData( HMAC256( sbdPrivKey.getHash256(), \
- 'Derive Chaincode from Root Key'))
-
-
-################################################################################
-def HardcodedKeyMaskParams():
- paramMap = {}
-
- # Nothing up my sleeve! Need some hardcoded random numbers to use for
- # encryption IV and salt. Using the first 256 digits of Pi for the
- # the IV, and first 256 digits of e for the salt (hashed)
- digits_pi = ( \
- 'ARMORY_ENCRYPTION_INITIALIZATION_VECTOR_'
- '1415926535897932384626433832795028841971693993751058209749445923'
- '0781640628620899862803482534211706798214808651328230664709384460'
- '9550582231725359408128481117450284102701938521105559644622948954'
- '9303819644288109756659334461284756482337867831652712019091456485')
- digits_e = ( \
- 'ARMORY_KEY_DERIVATION_FUNCTION_SALT_'
- '7182818284590452353602874713526624977572470936999595749669676277'
- '2407663035354759457138217852516642742746639193200305992181741359'
- '6629043572900334295260595630738132328627943490763233829880753195'
- '2510190115738341879307021540891499348841675092447614606680822648')
-
- paramMap['IV'] = SecureBinaryData( hash256(digits_pi)[:16] )
- paramMap['SALT'] = SecureBinaryData( hash256(digits_e) )
- paramMap['KDFBYTES'] = long(16*MEGABYTE)
-
- def hardcodeCreateSecurePrintPassphrase(secret):
- if isinstance(secret, basestring):
- secret = SecureBinaryData(secret)
- bin7 = HMAC512(secret.getHash256(), paramMap['SALT'].toBinStr())[:7]
- out,bin7 = SecureBinaryData(binary_to_base58(bin7 + hash256(bin7)[0])), None
- return out
-
- def hardcodeCheckPassphrase(passphrase):
- if isinstance(passphrase, basestring):
- pwd = base58_to_binary(passphrase)
- else:
- pwd = base58_to_binary(passphrase.toBinStr())
-
- isgood,pwd = (hash256(pwd[:7])[0] == pwd[-1]), None
- return isgood
-
- def hardcodeApplyKdf(secret):
- if isinstance(secret, basestring):
- secret = SecureBinaryData(secret)
- kdf = KdfRomix()
- kdf.usePrecomputedKdfParams(paramMap['KDFBYTES'], 1, paramMap['SALT'])
- return kdf.DeriveKey(secret)
-
- def hardcodeMask(secret, passphrase=None, ekey=None):
- if not ekey:
- ekey = hardcodeApplyKdf(passphrase)
- return CryptoAES().EncryptCBC(secret, ekey, paramMap['IV'])
-
- def hardcodeUnmask(secret, passphrase=None, ekey=None):
- if not ekey:
- ekey = applyKdf(passphrase)
- return CryptoAES().DecryptCBC(secret, ekey, paramMap['IV'])
-
- paramMap['FUNC_PWD'] = hardcodeCreateSecurePrintPassphrase
- paramMap['FUNC_KDF'] = hardcodeApplyKdf
- paramMap['FUNC_MASK'] = hardcodeMask
- paramMap['FUNC_UNMASK'] = hardcodeUnmask
- paramMap['FUNC_CHKPWD'] = hardcodeCheckPassphrase
- return paramMap
-
-
-################################################################################
-################################################################################
-class PyBtcWallet(object):
- """
- This class encapsulates all the concepts and variables in a "wallet",
- and maintains the passphrase protection, key stretching, encryption,
- etc, required to maintain the wallet. This class also includes the
- file I/O methods for storing and loading wallets.
-
- ***NOTE: I have ONLY implemented deterministic wallets, using ECDSA
- Diffie-Hellman shared-secret crypto operations. This allows
- one to actually determine the next PUBLIC KEY in the address
- chain without actually having access to the private keys.
- This makes it possible to synchronize online-offline computers
- once and never again.
-
- You can import random keys into your wallet, but if it is
- encrypted, you will have to supply a passphrase to make sure
- it can be encrypted as well.
-
- Presumably, wallets will be used for one of three purposes:
-
- (1) Spend money and receive payments
- (2) Watching-only wallets - have the private keys, just not on this computer
- (3) May be watching *other* people's addrs. There's a variety of reasons
- we might want to watch other peoples' addresses, but most them are not
- relevant to a "basic" BTC user. Nonetheless it should be supported to
- watch money without considering it part of our own assets
-
- This class is included in the combined-python-cpp module, because we really
- need to maintain a persistent Cpp.BtcWallet if this class is to be useful
- (we don't want to have to rescan the entire blockchain every time we do any
- wallet operations).
-
- The file format was designed from the outset with lots of unused space to
- allow for expansion without having to redefine the file format and break
- previous wallets. Luckily, wallet information is cheap, so we don't have
- to stress too much about saving space (100,000 addresses should take 15 MB)
-
- This file is NOT for storing Tx-related information. I want this file to
- be the minimal amount of information you need to secure and backup your
- entire wallet. Tx information can always be recovered from examining the
- blockchain... your private keys cannot be.
-
- We track version numbers, just in case. We start with 1.0
-
- Version 1.0:
- ---
- fileID -- (8) '\xbaWALLET\x00' for wallet files
- version -- (4) getVersionInt(PYBTCWALLET_VERSION)
- magic bytes -- (4) defines the blockchain for this wallet (BTC, NMC)
- wlt flags -- (8) 64 bits/flags representing info about wallet
- binUniqueID -- (6) first 5 bytes of first address in wallet
- (rootAddr25Bytes[:5][::-1]), reversed
- This is not intended to look like the root addr str
- and is reversed to avoid having all wallet IDs start
- with the same characters (since the network byte is front)
- create date -- (8) unix timestamp of when this wallet was created
- (actually, the earliest creation date of any addr
- in this wallet -- in the case of importing addr
- data). This is used to improve blockchain searching
- Short Name -- (32) Null-terminated user-supplied short name for wlt
- Long Name -- (256) Null-terminated user-supplied description for wlt
- Highest Used-- (8) The chain index of the highest used address
- ---
- Crypto/KDF -- (512) information identifying the types and parameters
- of encryption used to secure wallet, and key
- stretching used to secure your passphrase.
- Includes salt. (the breakdown of this field will
- be described separately)
- KeyGenerator-- (237) The base address for a determinstic wallet.
- Just a serialized PyBtcAddress object.
- ---
- UNUSED -- (1024) unused space for future expansion of wallet file
- ---
- Remainder of file is for key storage and various other things. Each
- "entry" will start with a 4-byte code identifying the entry type, then
- 20 bytes identifying what address the data is for, and finally then
- the subsequent data . So far, I have three types of entries that can
- be included:
-
- \x01 -- Address/Key data (as of PyBtcAddress version 1.0, 237 bytes)
- \x02 -- Address comments (variable-width field)
- \x03 -- Address comments (variable-width field)
- \x04 -- OP_EVAL subscript (when this is enabled, in the future)
-
- Please see PyBtcAddress for information on how key data is serialized.
- Comments (\x02) are var-width, and if a comment is changed to
- something longer than the existing one, we'll just blank out the old
- one and append a new one to the end of the file. It looks like
-
- 02000000 01 4f This comment is enabled (01) with 4f characters
-
-
- For file syncing, we protect against corrupted wallets by doing atomic
- operations before even telling the user that new data has been added.
- We do this by copying the wallet file, and creating a walletUpdateFailed
- file. We then modify the original, verify its integrity, and then delete
- the walletUpdateFailed file. Then we create a backupUpdateFailed flag,
- do the identical update on the backup file, and delete the failed flag.
- This guaranatees that no matter which nanosecond the power goes out,
- there will be an uncorrupted wallet and we know which one it is.
-
- We never let the user see any data until the atomic write-to-file operation
- has completed
-
-
- Additionally, we implement key locking and unlocking, with timeout. These
- key locking features are only DEFINED here, not actually enforced (because
- this is a library, not an application). You can set the default/temporary
- time that the KDF key is maintained in memory after the passphrase is
- entered, and this class will keep track of when the wallet should be next
- locked. It is up to the application to check whether the current time
- exceeds the lock time. This will probably be done in a kind of heartbeat
- method, which checks every few seconds for all sorts of things -- including
- wallet locking.
- """
-
- #############################################################################
- def __init__(self):
- self.fileTypeStr = '\xbaWALLET\x00'
- self.magicBytes = MAGIC_BYTES
- self.version = PYBTCWALLET_VERSION # (Major, Minor, Minor++, even-more-minor)
- self.eofByte = 0
- self.cppWallet = None # Mirror of PyBtcWallet in C++ object
- self.cppInfo = {} # Extra info about each address to help sync
- self.watchingOnly = False
- self.wltCreateDate = 0
-
- # Three dictionaries hold all data
- self.addrMap = {} # maps 20-byte addresses to PyBtcAddress objects
- self.commentsMap = {} # maps 20-byte addresses to user-created comments
- self.commentLocs = {} # map comment keys to wallet file locations
- self.opevalMap = {} # maps 20-byte addresses to OP_EVAL data (future)
- self.labelName = ''
- self.labelDescr = ''
- self.linearAddr160List = []
- self.chainIndexMap = {}
- self.txAddrMap = {} # cache for getting tx-labels based on addr search
- if USE_TESTNET:
- self.addrPoolSize = 10 # this makes debugging so much easier!
- else:
- self.addrPoolSize = CLI_OPTIONS.keypool
-
- # For file sync features
- self.walletPath = ''
- self.doBlockchainSync = BLOCKCHAIN_READONLY
- self.lastSyncBlockNum = 0
-
- # Private key encryption details
- self.useEncryption = False
- self.kdf = None
- self.crypto = None
- self.kdfKey = None
- self.defaultKeyLifetime = 10 # seconds after unlock, that key is discarded
- self.lockWalletAtTime = 0 # seconds after unlock, that key is discarded
- self.isLocked = False
- self.testedComputeTime=None
-
- # Deterministic wallet, need a root key. Though we can still import keys.
- # The unique ID contains the network byte (id[-1]) but is not intended to
- # resemble the address of the root key
- self.uniqueIDBin = ''
- self.uniqueIDB58 = '' # Base58 version of reversed-uniqueIDBin
- self.lastComputedChainAddr160 = ''
- self.lastComputedChainIndex = 0
- self.highestUsedChainIndex = 0
-
- # All PyBtcAddress serializations are exact same size, figure it out now
- self.pybtcaddrSize = len(PyBtcAddress().serialize())
-
-
- # All BDM calls by default go on the multi-thread-queue. But if the BDM
- # is the one calling the PyBtcWallet methods, it will deadlock if it uses
- # the queue. Therefore, the BDM will set this flag before making any
- # calls, which will tell PyBtcWallet to use __direct methods.
- self.calledFromBDM = False
-
- # Finally, a bunch of offsets that tell us where data is stored in the
- # file: this can be generated automatically on unpacking (meaning it
- # doesn't require manually updating offsets if I change the format), and
- # will save us a couple lines of code later, when we need to update things
- self.offsetWltFlags = -1
- self.offsetLabelName = -1
- self.offsetLabelDescr = -1
- self.offsetTopUsed = -1
- self.offsetRootAddr = -1
- self.offsetKdfParams = -1
- self.offsetCrypto = -1
-
- # These flags are ONLY for unit-testing the walletFileSafeUpdate function
- self.interruptTest1 = False
- self.interruptTest2 = False
- self.interruptTest3 = False
-
- #############################################################################
- def getWalletVersion(self):
- return (getVersionInt(self.version), getVersionString(self.version))
-
- #############################################################################
- def getWalletVersion(self):
- return (getVersionInt(self.version), getVersionString(self.version))
-
-
- #############################################################################
- def getWalletPath(self):
- return self.walletPath
-
- #############################################################################
- def getTimeRangeForAddress(self, addr160):
- if not self.addrMap.has_key(addr160):
- return None
- else:
- return self.addrMap[addr160].getTimeRange()
-
- #############################################################################
- def getBlockRangeForAddress(self, addr20):
- if not self.addrMap.has_key(addr160):
- return None
- else:
- return self.addrMap[addr160].getBlockRange()
-
- #############################################################################
- def setBlockchainSyncFlag(self, syncYes=True):
- self.doBlockchainSync = syncYes
-
- #############################################################################
- def syncWithBlockchain(self, startBlk=None):
- """
- Will block until getTopBlockHeader() returns, which could be a while.
- If you don't want to wait, check TheBDM.getBDMState()=='BlockchainReady'
- before calling this method. If you expect the blockchain will have to
- be rescanned, then call TheBDM.rescanBlockchain or TheBDM.loadBlockchain
-
- If this method is called from the BDM itself, calledFromBDM will signal
- to use the BDM methods directly, not the queue. This will deadlock
- otherwise.
- """
-
- TimerStart('syncWithBlockchain')
-
- if TheBDM.getBDMState() in ('Offline', 'Uninitialized'):
- LOGWARN('Called syncWithBlockchain but BDM is %s', TheBDM.getBDMState())
- return
-
- if not self.doBlockchainSync==BLOCKCHAIN_DONOTUSE:
- if startBlk==None:
- startBlk = self.lastSyncBlockNum + 1
-
- # calledFromBDM means that ultimately the BDM itself called this
- # method and is blocking waiting for it. So we can't use the
- # BDM-thread queue, must call its methods directly
- if self.calledFromBDM:
- TheBDM.scanBlockchainForTx_bdm_direct(self.cppWallet, startBlk)
- self.lastSyncBlockNum = TheBDM.getTopBlockHeight_bdm_direct()
- else:
- TheBDM.scanBlockchainForTx(self.cppWallet, startBlk, wait=True)
- self.lastSyncBlockNum = TheBDM.getTopBlockHeight(wait=True)
- else:
- LOGERROR('Blockchain-sync requested, but current wallet')
- LOGERROR('is set to BLOCKCHAIN_DONOTUSE')
-
-
- TimerStop('syncWithBlockchain')
-
-
-
- #############################################################################
- def syncWithBlockchainLite(self, startBlk=None):
- """
- This is just like a regular sync, but it won't rescan the whole blockchain
- if the wallet is dirty -- if addresses were imported recently, it will
- still only scan what the blockchain picked up on the last scan. Use the
- non-lite version to allow a full scan.
- """
-
- TimerStart('syncWithBlockchain')
-
- if TheBDM.getBDMState() in ('Offline', 'Uninitialized'):
- LOGWARN('Called syncWithBlockchainLite but BDM is %s', TheBDM.getBDMState())
- return
-
- if not self.doBlockchainSync==BLOCKCHAIN_DONOTUSE:
- if startBlk==None:
- startBlk = self.lastSyncBlockNum + 1
-
- # calledFromBDM means that ultimately the BDM itself called this
- # method and is blocking waiting for it. So we can't use the
- # BDM-thread queue, must call its methods directly
- if self.calledFromBDM:
- TheBDM.scanRegisteredTxForWallet_bdm_direct(self.cppWallet, startBlk)
- self.lastSyncBlockNum = TheBDM.getTopBlockHeight_bdm_direct()
- else:
- TheBDM.scanRegisteredTxForWallet(self.cppWallet, startBlk, wait=True)
- self.lastSyncBlockNum = TheBDM.getTopBlockHeight(wait=True)
- else:
- LOGERROR('Blockchain-sync requested, but current wallet')
- LOGERROR('is set to BLOCKCHAIN_DONOTUSE')
-
-
- TimerStop('syncWithBlockchain')
-
-
- #############################################################################
- def getCommentForAddrBookEntry(self, abe):
- comment = self.getComment(abe.getAddr160())
- if len(comment)>0:
- return comment
-
- # SWIG BUG!
- # http://sourceforge.net/tracker/?func=detail&atid=101645&aid=3403085&group_id=1645
- # Apparently, using the -threads option when compiling the swig module
- # causes the "for i in vector<...>:" mechanic to sometimes throw seg faults!
- # For this reason, this method was replaced with the one below:
- for regTx in abe.getTxList():
- comment = self.getComment(regTx.getTxHash())
- if len(comment)>0:
- return comment
-
- return ''
-
- #############################################################################
- def getCommentForTxList(self, a160, txhashList):
- comment = self.getComment(a160)
- if len(comment)>0:
- return comment
-
- for txHash in txhashList:
- comment = self.getComment(txHash)
- if len(comment)>0:
- return comment
-
- return ''
-
- #############################################################################
- def printAddressBook(self):
- addrbook = self.cppWallet.createAddressBook()
- for abe in addrbook:
- print hash160_to_addrStr(abe.getAddr160()),
- txlist = abe.getTxList()
- print len(txlist)
- for rtx in txlist:
- print '\t', binary_to_hex(rtx.getTxHash(), BIGENDIAN)
-
- #############################################################################
- def hasAnyImported(self):
- for a160,addr in self.addrMap.iteritems():
- if addr.chainIndex == -2:
- return True
- return False
-
-
- #############################################################################
- def getBalance(self, balType="Spendable"):
- if not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM:
- return -1
- else:
- currBlk = TheBDM.getTopBlockHeight(calledFromBDM=self.calledFromBDM)
- if balType.lower() in ('spendable','spend'):
- return self.cppWallet.getSpendableBalance(currBlk)
- elif balType.lower() in ('unconfirmed','unconf'):
- return self.cppWallet.getUnconfirmedBalance(currBlk)
- elif balType.lower() in ('total','ultimate','unspent','full'):
- return self.cppWallet.getFullBalance()
- else:
- raise TypeError, 'Unknown balance type! "' + balType + '"'
-
-
- #############################################################################
- def getAddrBalance(self, addr160, balType="Spendable", currBlk=UINT32_MAX):
- if (not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM) or \
- not self.hasAddr(addr160):
- return -1
- else:
- addr = self.cppWallet.getScrAddrObjByKey(Hash160ToScrAddr(addr160))
- if balType.lower() in ('spendable','spend'):
- return addr.getSpendableBalance(currBlk)
- elif balType.lower() in ('unconfirmed','unconf'):
- return addr.getUnconfirmedBalance(currBlk)
- elif balType.lower() in ('ultimate','unspent','full'):
- return addr.getFullBalance()
- else:
- raise TypeError, 'Unknown balance type!'
-
- #############################################################################
- def getTxLedger(self, ledgType='Full'):
- """
- Gets the ledger entries for the entire wallet, from C++/SWIG data structs
- """
- if not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM:
- return []
- else:
- ledgBlkChain = self.cppWallet.getTxLedger()
- ledgZeroConf = self.cppWallet.getZeroConfLedger()
- if ledgType.lower() in ('full','all','ultimate'):
- ledg = []
- ledg.extend(ledgBlkChain)
- ledg.extend(ledgZeroConf)
- return ledg
- elif ledgType.lower() in ('blk', 'blkchain', 'blockchain'):
- return ledgBlkChain
- elif ledgType.lower() in ('zeroconf', 'zero'):
- return ledgZeroConf
- else:
- raise TypeError, 'Unknown ledger type! "' + ledgType + '"'
-
-
-
-
- #############################################################################
- def getAddrTxLedger(self, addr160, ledgType='Full'):
- """
- Gets the ledger entries for the entire wallet, from C++/SWIG data structs
- """
- if (not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM) or \
- not self.hasAddr(addr160):
- return []
- else:
- scrAddr = Hash160ToScrAddr(addr160)
- ledgBlkChain = self.cppWallet.getScrAddrObjByKey(scrAddr).getTxLedger()
- ledgZeroConf = self.cppWallet.getScrAddrObjByKey(scrAddr).getZeroConfLedger()
- if ledgType.lower() in ('full','all','ultimate'):
- ledg = []
- ledg.extend(ledgBlkChain)
- ledg.extend(ledgZeroConf)
- return ledg
- elif ledgType.lower() in ('blk', 'blkchain', 'blockchain'):
- return ledgBlkChain
- elif ledgType.lower() in ('zeroconf', 'zero'):
- return ledgZeroConf
- else:
- raise TypeError, 'Unknown balance type! "' + ledgType + '"'
-
-
- #############################################################################
- def getTxOutList(self, txType='Spendable'):
- """ Returns UnspentTxOut/C++ objects """
- if TheBDM.getBDMState()=='BlockchainReady' and \
- not self.doBlockchainSync==BLOCKCHAIN_DONOTUSE:
-
- currBlk = TheBDM.getTopBlockHeight(calledFromBDM=self.calledFromBDM)
- self.syncWithBlockchain()
- if txType.lower() in ('spend', 'spendable'):
- return self.cppWallet.getSpendableTxOutList(currBlk);
- elif txType.lower() in ('full', 'all', 'unspent', 'ultimate'):
- return self.cppWallet.getFullTxOutList(currBlk);
- else:
- raise TypeError, 'Unknown balance type! ' + txType
- else:
- LOGERROR('***Blockchain is not available for accessing wallet-tx data')
- return []
-
- #############################################################################
- def getAddrTxOutList(self, addr160, txType='Spendable'):
- """ Returns UnspentTxOut/C++ objects """
- if TheBDM.getBDMState()=='BlockchainReady' and \
- self.hasAddr(addr160) and \
- not self.doBlockchainSync==BLOCKCHAIN_DONOTUSE:
-
- currBlk = TheBDM.getTopBlockHeight(calledFromBDM=self.calledFromBDM)
- self.syncWithBlockchain()
- scrAddrStr = Hash160ToScrAddr(addr160)
- cppAddr = self.cppWallet.getScrAddrObjByKey(scrAddrStr)
- if txType.lower() in ('spend', 'spendable'):
- return cppAddr.getSpendableTxOutList(currBlk);
- elif txType.lower() in ('full', 'all', 'unspent', 'ultimate'):
- return cppAddr.getFullTxOutList(currBlk);
- else:
- raise TypeError, 'Unknown TxOutList type! ' + txType
- else:
- LOGERROR('***Blockchain is not available for accessing wallet-tx data')
- return []
-
-
- #############################################################################
- def getAddrByHash160(self, addr160):
- return (None if not self.hasAddr(addr160) else self.addrMap[addr160])
-
- #############################################################################
- def hasAddr(self, addrData):
- if isinstance(addrData, str):
- if len(addrData) == 20:
- return self.addrMap.has_key(addrData)
- elif isLikelyDataType(addrData)==DATATYPE.Base58:
- return self.addrMap.has_key(addrStr_to_hash160(addrData))
- else:
- return False
- elif isinstance(addrData, PyBtcAddress):
- return self.addrMap.has_key(addrData.getAddr160())
- else:
- return False
-
-
- #############################################################################
- def setDefaultKeyLifetime(self, newlifetime):
- """ Set a new default lifetime for holding the unlock key. Min 2 sec """
- self.defaultKeyLifetime = max(newlifetime, 2)
-
- #############################################################################
- def checkWalletLockTimeout(self):
- if not self.isLocked and self.kdfKey and RightNow()>self.lockWalletAtTime:
- self.lock()
- if self.kdfKey:
- self.kdfKey.destroy()
- self.kdfKey = None
-
- if self.useEncryption:
- self.isLocked = True
-
-
-
- #############################################################################
- def lockTxOutsOnNewTx(self, pytxObj):
- for txin in pytxObj.inputs:
- self.cppWallet.lockTxOutSwig(txin.outpoint.txHash, txin.outpoint.txOutIndex)
-
-
-
- #############################################################################
- def setDefaultKeyLifetime(self, lifetimeInSec):
- """
- This is used to set (in memory only) the default time to keep the encrypt
- key in memory after the encryption passphrase has been entered. This is
- NOT enforced by PyBtcWallet, but the unlock method will use it to calc a
- unix timestamp when the wallet SHOULD be locked, and the external program
- can use that to decide when to call the lock method.
- """
- self.defaultKeyLifetime = lifetimeInSec
-
-
- #############################################################################
- # THIS WAS CREATED ORIGINALLY TO SUPPORT BITSAFE INTEGRATION INTO ARMORY
- # But it's also a good first step into general BIP 32 support
- def getChildExtPubFromRoot(self, i):
- root = self.addrMap['ROOT']
- ekey = ExtendedKey().CreateFromPublic(root.binPublicKey65, root.chaincode)
- newKey = HDWalletCrypto().ChildKeyDeriv(ekey, i)
- newKey.setIndex(i)
- return newKey
- #newAddr = PyBtcAddress().createFromExtendedPublicKey(newKey)
-
- #############################################################################
- #def createFromExtendedPublicKey(self, ekey):
- #pub65 = ekey.getPub()
- #chain = ekey.getChain()
- #newAddr = self.createFromPublicKeyData(pub65, chain)
- #newAddr.chainIndex = newAddr.getIndex()
- #return newAddr
-
- #############################################################################
- #def deriveChildPublicKey(self, i):
- #newKey = HDWalletCrypto().ChildKeyDeriv(self.getExtendedPublicKey(), i)
- #newAddr = PyBtcAddress().createFromExtendedPublicKey(newKey)
-
-
- #############################################################################
- # THIS WAS CREATED ORIGINALLY TO SUPPORT BITSAFE INTEGRATION INTO ARMORY
- # But it's also a good first step into general BIP 32 support
- def createWalletFromMasterPubKey(self, masterHex, \
- isActuallyNew=True, \
- doRegisterWithBDM=True):
- # This function eats hex inputs, not sure why I chose to do that...
- p0 = masterHex.index('4104') + 2
- pubkey = SecureBinaryData(hex_to_binary(masterHex[p0:p0+130]))
- c0 = masterHex.index('1220') + 4
- chain = SecureBinaryData(hex_to_binary(masterHex[c0:c0+64]))
-
- # Create the root address object
- rootAddr = PyBtcAddress().createFromPublicKeyData( pubkey )
- rootAddr.markAsRootAddr(chain)
- self.addrMap['ROOT'] = rootAddr
-
- ekey = self.getChildExtPubFromRoot(0)
- firstAddr = PyBtcAddress().createFromPublicKeyData(ekey.getPub())
- firstAddr.chaincode = ekey.getChain()
- firstAddr.chainIndex = 0
- first160 = firstAddr.getAddr160()
-
- # Update wallet object with the new data
- # NEW IN WALLET VERSION 1.35: unique ID is now based on
- # the first chained address: this guarantees that the unique ID
- # is based not only on the private key, BUT ALSO THE CHAIN CODE
- self.useEncryption = False
- self.addrMap[firstAddr.getAddr160()] = firstAddr
- self.uniqueIDBin = (ADDRBYTE + firstAddr.getAddr160()[:5])[::-1]
- self.uniqueIDB58 = binary_to_base58(self.uniqueIDBin)
- self.labelName = 'BitSafe Demo Wallet'
- self.labelDescr = 'We\'ll be lucky if this works!'
- self.lastComputedChainAddr160 = first160
- self.lastComputedChainIndex = firstAddr.chainIndex
- self.highestUsedChainIndex = firstAddr.chainIndex-1
- self.wltCreateDate = long(RightNow())
- self.linearAddr160List = [first160]
- self.chainIndexMap[firstAddr.chainIndex] = first160
- self.watchingOnly = True
-
- # We don't have to worry about atomic file operations when
- # creating the wallet: so we just do it naively here.
- newWalletFilePath = os.path.join(ARMORY_HOME_DIR, 'bitsafe_demo_%s.wallet' % self.uniqueIDB58)
- self.walletPath = newWalletFilePath
- if not newWalletFilePath:
- shortName = self.labelName .replace(' ','_')
- # This was really only needed when we were putting name in filename
- #for c in ',?;:\'"?/\\=+-|[]{}<>':
- #shortName = shortName.replace(c,'_')
- newName = 'armory_%s_.wallet' % self.uniqueIDB58
- self.walletPath = os.path.join(ARMORY_HOME_DIR, newName)
-
- LOGINFO(' New wallet will be written to: %s', self.walletPath)
- newfile = open(self.walletPath, 'wb')
- fileData = BinaryPacker()
-
- # packHeader method writes KDF params and root address
- headerBytes = self.packHeader(fileData)
-
- # We make sure we have byte locations of the two addresses, to start
- self.addrMap[first160].walletByteLoc = headerBytes + 21
-
- fileData.put(BINARY_CHUNK, '\x00' + first160 + firstAddr.serialize())
-
-
- # Store the current localtime and blocknumber. Block number is always
- # accurate if available, but time may not be exactly right. Whenever
- # basing anything on time, please assume that it is up to one day off!
- time0,blk0 = getCurrTimeAndBlock() if isActuallyNew else (0,0)
-
- # Don't forget to sync the C++ wallet object
- self.cppWallet = Cpp.BtcWallet()
- self.cppWallet.addAddress_5_(rootAddr.getAddr160(), time0,blk0,time0,blk0)
- self.cppWallet.addAddress_5_(first160, time0,blk0,time0,blk0)
-
- # We might be holding the wallet temporarily and not ready to register it
- if doRegisterWithBDM:
- TheBDM.registerWallet(self.cppWallet, isFresh=isActuallyNew) # new wallet
-
- newfile.write(fileData.getBinaryString())
- newfile.close()
-
- walletFileBackup = self.getWalletPath('backup')
- shutil.copy(self.walletPath, walletFileBackup)
-
-
- # Let's fill the address pool while we are unlocked
- # It will get a lot more expensive if we do it on the next unlock
- if doRegisterWithBDM:
- self.fillAddressPool(self.addrPoolSize, isActuallyNew=isActuallyNew)
-
- return self
-
-
-
-
- #############################################################################
- def createNewWallet(self, newWalletFilePath=None, \
- plainRootKey=None, chaincode=None, \
- withEncrypt=True, IV=None, securePassphrase=None, \
- kdfTargSec=DEFAULT_COMPUTE_TIME_TARGET, \
- kdfMaxMem=DEFAULT_MAXMEM_LIMIT, \
- shortLabel='', longLabel='', isActuallyNew=True, \
- doRegisterWithBDM=True, skipBackupFile=False):
- """
- This method will create a new wallet, using as much customizability
- as you want. You can enable encryption, and set the target params
- of the key-derivation function (compute-time and max memory usage).
- The KDF parameters will be experimentally determined to be as hard
- as possible for your computer within the specified time target
- (default, 0.25s). It will aim for maximizing memory usage and using
- only 1 or 2 iterations of it, but this can be changed by scaling
- down the kdfMaxMem parameter (default 32 MB).
-
- If you use encryption, don't forget to supply a 32-byte passphrase,
- created via SecureBinaryData(pythonStr). This method will apply
- the passphrase so that the wallet is "born" encrypted.
-
- The field plainRootKey could be used to recover a written backup
- of a wallet, since all addresses are deterministically computed
- from the root address. This obviously won't reocver any imported
- keys, but does mean that you can recover your ENTIRE WALLET from
- only those 32 plaintext bytes AND the 32-byte chaincode.
-
- We skip the atomic file operations since we don't even have
- a wallet file yet to safely update.
-
- DO NOT CALL THIS FROM BDM METHOD. IT MAY DEADLOCK.
- """
-
-
- if self.calledFromBDM:
- LOGERROR('Called createNewWallet() from BDM method!')
- LOGERROR('Don\'t do this!')
- return None
-
- if securePassphrase:
- securePassphrase = SecureBinaryData(securePassphrase)
- if plainRootKey:
- plainRootKey = SecureBinaryData(plainRootKey)
- if chaincode:
- chaincode = SecureBinaryData(chaincode)
-
- if withEncrypt and not securePassphrase:
- raise EncryptionError, 'Cannot create encrypted wallet without passphrase'
-
- LOGINFO('***Creating new deterministic wallet')
-
- # Set up the KDF
- if not withEncrypt:
- self.kdfKey = None
- else:
- LOGINFO('(with encryption)')
- self.kdf = KdfRomix()
- LOGINFO('Target (time,RAM)=(%0.3f,%d)', kdfTargSec, kdfMaxMem)
- (mem,niter,salt) = self.computeSystemSpecificKdfParams( \
- kdfTargSec, kdfMaxMem)
- self.kdf.usePrecomputedKdfParams(mem, niter, salt)
- self.kdfKey = self.kdf.DeriveKey(securePassphrase)
-
- if not plainRootKey:
- # TODO: We should find a source for injecting extra entropy
- # At least, Crypto++ grabs from a few different sources, itself
- plainRootKey = SecureBinaryData().GenerateRandom(32)
-
- if not chaincode:
- #chaincode = SecureBinaryData().GenerateRandom(32)
- # For wallet 1.35a, derive chaincode deterministically from root key
- # The root key already has 256 bits of entropy which is excessive,
- # anyway. And my original reason for having the chaincode random is
- # no longer valid.
- chaincode = DeriveChaincodeFromRootKey(plainRootKey)
-
-
-
- # Create the root address object
- rootAddr = PyBtcAddress().createFromPlainKeyData( \
- plainRootKey, \
- IV16=IV, \
- willBeEncr=withEncrypt, \
- generateIVIfNecessary=True)
- rootAddr.markAsRootAddr(chaincode)
-
- # This does nothing if no encryption
- rootAddr.lock(self.kdfKey)
- rootAddr.unlock(self.kdfKey)
-
- firstAddr = rootAddr.extendAddressChain(self.kdfKey)
- first160 = firstAddr.getAddr160()
-
- # Update wallet object with the new data
- # NEW IN WALLET VERSION 1.35: unique ID is now based on
- # the first chained address: this guarantees that the unique ID
- # is based not only on the private key, BUT ALSO THE CHAIN CODE
- self.useEncryption = withEncrypt
- self.addrMap['ROOT'] = rootAddr
- self.addrMap[firstAddr.getAddr160()] = firstAddr
- self.uniqueIDBin = (ADDRBYTE + firstAddr.getAddr160()[:5])[::-1]
- self.uniqueIDB58 = binary_to_base58(self.uniqueIDBin)
- self.labelName = shortLabel[:32]
- self.labelDescr = longLabel[:256]
- self.lastComputedChainAddr160 = first160
- self.lastComputedChainIndex = firstAddr.chainIndex
- self.highestUsedChainIndex = firstAddr.chainIndex-1
- self.wltCreateDate = long(RightNow())
- self.linearAddr160List = [first160]
- self.chainIndexMap[firstAddr.chainIndex] = first160
-
- # We don't have to worry about atomic file operations when
- # creating the wallet: so we just do it naively here.
- self.walletPath = newWalletFilePath
- if not newWalletFilePath:
- shortName = self.labelName .replace(' ','_')
- # This was really only needed when we were putting name in filename
- #for c in ',?;:\'"?/\\=+-|[]{}<>':
- #shortName = shortName.replace(c,'_')
- newName = 'armory_%s_.wallet' % self.uniqueIDB58
- self.walletPath = os.path.join(ARMORY_HOME_DIR, newName)
-
- LOGINFO(' New wallet will be written to: %s', self.walletPath)
- newfile = open(self.walletPath, 'wb')
- fileData = BinaryPacker()
-
- # packHeader method writes KDF params and root address
- headerBytes = self.packHeader(fileData)
-
- # We make sure we have byte locations of the two addresses, to start
- self.addrMap[first160].walletByteLoc = headerBytes + 21
-
- fileData.put(BINARY_CHUNK, '\x00' + first160 + firstAddr.serialize())
-
-
- # Store the current localtime and blocknumber. Block number is always
- # accurate if available, but time may not be exactly right. Whenever
- # basing anything on time, please assume that it is up to one day off!
- time0,blk0 = getCurrTimeAndBlock() if isActuallyNew else (0,0)
-
- # Don't forget to sync the C++ wallet object
- self.cppWallet = Cpp.BtcWallet()
- self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(rootAddr.getAddr160()), \
- time0,blk0,time0,blk0)
- self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(first160), \
- time0,blk0,time0,blk0)
-
- # We might be holding the wallet temporarily and not ready to register it
- if doRegisterWithBDM:
- TheBDM.registerWallet(self.cppWallet, isFresh=isActuallyNew) # new wallet
-
-
- newfile.write(fileData.getBinaryString())
- newfile.close()
-
- if not skipBackupFile:
- walletFileBackup = self.getWalletPath('backup')
- shutil.copy(self.walletPath, walletFileBackup)
-
- # Lock/unlock to make sure encrypted keys are computed and written to file
- if self.useEncryption:
- self.unlock(secureKdfOutput=self.kdfKey)
-
- # Let's fill the address pool while we are unlocked
- # It will get a lot more expensive if we do it on the next unlock
- if doRegisterWithBDM:
- self.fillAddressPool(self.addrPoolSize, isActuallyNew=isActuallyNew)
-
- if self.useEncryption:
- self.lock()
- return self
-
-
-
-
-
- #############################################################################
- def advanceHighestIndex(self, ct=1):
- topIndex = self.highestUsedChainIndex + ct
- topIndex = min(topIndex, self.lastComputedChainIndex)
- topIndex = max(topIndex, 0)
-
- self.highestUsedChainIndex = topIndex
- self.walletFileSafeUpdate( [[WLT_UPDATE_MODIFY, self.offsetTopUsed, \
- int_to_binary(self.highestUsedChainIndex, widthBytes=8)]])
- self.fillAddressPool()
-
- #############################################################################
- def rewindHighestIndex(self, ct=1):
- self.advanceHighestIndex(-ct)
-
-
- #############################################################################
- def peekNextUnusedAddr160(self):
- try:
- return self.getAddress160ByChainIndex(self.highestUsedChainIndex+1)
- except:
- # Not sure why we'd fail, maybe addrPoolSize==0?
- return ''
-
- #############################################################################
- def getNextUnusedAddress(self):
- if self.lastComputedChainIndex - self.highestUsedChainIndex < \
- max(self.addrPoolSize-1,1):
- self.fillAddressPool(self.addrPoolSize)
-
- self.advanceHighestIndex(1)
- new160 = self.getAddress160ByChainIndex(self.highestUsedChainIndex)
- self.addrMap[new160].touch()
- self.walletFileSafeUpdate( [[WLT_UPDATE_MODIFY, \
- self.addrMap[new160].walletByteLoc, \
- self.addrMap[new160].serialize()]] )
- return self.addrMap[new160]
-
-
- #############################################################################
- def computeNextAddress(self, addr160=None, isActuallyNew=True, doRegister=True):
- """
- Use this to extend the chain beyond the last-computed address.
-
- We will usually be computing the next address from the tip of the
- chain, but I suppose someone messing with the file format may
- leave gaps in the chain requiring some to be generated in the middle
- (then we can use the addr160 arg to specify which address to extend)
- """
- if not addr160:
- addr160 = self.lastComputedChainAddr160
-
- newAddr = self.addrMap[addr160].extendAddressChain(self.kdfKey)
- new160 = newAddr.getAddr160()
- newDataLoc = self.walletFileSafeUpdate( \
- [[WLT_UPDATE_ADD, WLT_DATATYPE_KEYDATA, new160, newAddr]])
- self.addrMap[new160] = newAddr
- self.addrMap[new160].walletByteLoc = newDataLoc[0] + 21
-
- if newAddr.chainIndex > self.lastComputedChainIndex:
- self.lastComputedChainAddr160 = new160
- self.lastComputedChainIndex = newAddr.chainIndex
-
- self.linearAddr160List.append(new160)
- self.chainIndexMap[newAddr.chainIndex] = new160
-
- # In the future we will enable first/last seen, but not yet
- time0,blk0 = getCurrTimeAndBlock() if isActuallyNew else (0,0)
- self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(new160), \
- time0,blk0,time0,blk0)
-
- # For recovery rescans, this method will be called directly by
- # the BDM, which may cause a deadlock if we go through the
- # thread queue. The calledFromBDM is "permission" to access the
- # BDM private methods directly
- if doRegister:
- if self.calledFromBDM:
- TheBDM.registerScrAddr_bdm_direct(Hash160ToScrAddr(new160), timeInfo=isActuallyNew)
- else:
- # This uses the thread queue, which means the address will be
- # registered next time the BDM is not busy
- TheBDM.registerScrAddr(Hash160ToScrAddr(new160), isFresh=isActuallyNew)
-
- return new160
-
-
-
-
- #############################################################################
- def fillAddressPool(self, numPool=None, isActuallyNew=True, doRegister=True):
- """
- Usually, when we fill the address pool, we are generating addresses
- for the first time, and thus there is no chance it's ever seen the
- blockchain. However, this method is also used for recovery/import
- of wallets, where the address pool has addresses that probably have
- transactions already in the blockchain.
- """
- if not numPool:
- numPool = self.addrPoolSize
-
- gap = self.lastComputedChainIndex - self.highestUsedChainIndex
- numToCreate = max(numPool - gap, 0)
- for i in range(numToCreate):
- self.computeNextAddress(isActuallyNew=isActuallyNew, doRegister=doRegister)
- return self.lastComputedChainIndex
-
- #############################################################################
- def setAddrPoolSize(self, newSize):
- if newSize<5:
- LOGERROR('Will not allow address pool sizes smaller than 5...')
- return
-
- self.addrPoolSize = newSize
- self.fillAddressPool(newSize)
-
-
- #############################################################################
- def getHighestUsedIndex(self):
- """
- This only retrieves the stored value, but it may not be correct if,
- for instance, the wallet was just imported but has been used before.
- """
- return self.highestUsedChainIndex
-
-
- #############################################################################
- def getHighestComputedIndex(self):
- """
- This only retrieves the stored value, but it may not be correct if,
- for instance, the wallet was just imported but has been used before.
- """
- return self.lastComputedChainIndex
-
-
-
- #############################################################################
- def detectHighestUsedIndex(self, writeResultToWallet=False, fullscan=False):
- """
- This method is used to find the highestUsedChainIndex value of the
- wallet WITHIN its address pool. It will NOT extend its address pool
- in this search, because it is assumed that the wallet couldn't have
- used any addresses it had not calculated yet.
-
- If you have a wallet IMPORT, though, or a wallet that has been used
- before but does not have this information stored with it, then you
- should be using the next method:
-
- self.freshImportFindHighestIndex()
-
- which will actually extend the address pool as necessary to find the
- highest address used.
- """
- if not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM:
- LOGERROR('Cannot detect any usage information without the blockchain')
- return -1
-
- oldSync = self.doBlockchainSync
- self.doBlockchainSync = BLOCKCHAIN_READONLY
- if fullscan:
- # Will initiate rescan if wallet is dirty
- self.syncWithBlockchain(0)
- else:
- # Will only use data already scanned, even if wallet is dirty
- self.syncWithBlockchainLite(0)
- self.doBlockchainSync = oldSync
-
- highestIndex = max(self.highestUsedChainIndex, 0)
- for addr in self.getLinearAddrList(withAddrPool=True):
- a160 = addr.getAddr160()
- if len(self.getAddrTxLedger(a160)) > 0:
- highestIndex = max(highestIndex, addr.chainIndex)
-
- if writeResultToWallet:
- self.highestUsedChainIndex = highestIndex
- self.walletFileSafeUpdate( [[WLT_UPDATE_MODIFY, self.offsetTopUsed, \
- int_to_binary(highestIndex, widthBytes=8)]])
-
-
- return highestIndex
-
-
-
-
- #############################################################################
- def freshImportFindHighestIndex(self, stepSize=None):
- """
- This is much like detectHighestUsedIndex, except this will extend the
- address pool as necessary. It assumes that you have a fresh wallet
- that has been used before, but was deleted and restored from its root
- key and chaincode, and thus we don't know if only 10 or 10,000 addresses
- were used.
-
- If this was an exceptionally active wallet, it's possible that we
- may need to manually increase the step size to be sure we find
- everything. In fact, there is no way to tell FOR SURE what is the
- last addressed used: one must make an assumption that the wallet
- never calculated more than X addresses without receiving a payment...
- """
- if not stepSize:
- stepSize = self.addrPoolSize
-
- topCompute = 0
- topUsed = 0
- oldPoolSize = self.addrPoolSize
- self.addrPoolSize = stepSize
- # When we hit the highest address, the topCompute value will extend
- # out [stepsize] addresses beyond topUsed, and the topUsed will not
- # change, thus escaping the while loop
- nWhile = 0
- while topCompute - topUsed < 0.9*stepSize:
- topCompute = self.fillAddressPool(stepSize, isActuallyNew=False)
- topUsed = self.detectHighestUsedIndex(True)
- nWhile += 1
- if nWhile>10000:
- raise WalletAddressError, 'Escaping inf loop in freshImport...'
-
-
- self.addrPoolSize = oldPoolSize
- return topUsed
-
-
- #############################################################################
- def writeFreshWalletFile(self, path, newName='', newDescr=''):
- newFile = open(path, 'wb')
- bp = BinaryPacker()
- self.packHeader(bp)
- newFile.write(bp.getBinaryString())
-
- for addr160,addrObj in self.addrMap.iteritems():
- if not addr160=='ROOT':
- newFile.write('\x00' + addr160 + addrObj.serialize())
-
- for hashVal,comment in self.commentsMap.iteritems():
- twoByteLength = int_to_binary(len(comment), widthBytes=2)
- if len(hashVal)==20:
- typestr = int_to_binary(WLT_DATATYPE_ADDRCOMMENT)
- newFile.write(typestr + hashVal + twoByteLength + comment)
- elif len(hashVal)==32:
- typestr = int_to_binary(WLT_DATATYPE_TXCOMMENT)
- newFile.write(typestr + hashVal + twoByteLength + comment)
-
- newFile.close()
-
-
- #############################################################################
- def makeUnencryptedWalletCopy(self, newPath, securePassphrase=None):
-
- self.writeFreshWalletFile(newPath)
- if not self.useEncryption:
- return True
-
- if self.isLocked:
- if not securePassphrase:
- LOGERROR('Attempted to make unencrypted copy without unlocking')
- return False
- else:
- self.unlock(securePassphrase=SecureBinaryData(securePassphrase))
-
- newWlt = PyBtcWallet().readWalletFile(newPath)
- newWlt.unlock(self.kdfKey)
- newWlt.changeWalletEncryption(None)
-
-
- walletFileBackup = newWlt.getWalletPath('backup')
- if os.path.exists(walletFileBackup):
- LOGINFO('New wallet created, deleting backup file')
- os.remove(walletFileBackup)
- return True
-
-
- #############################################################################
- def makeEncryptedWalletCopy(self, newPath, securePassphrase=None):
- """
- Unlike the previous method, I can't just copy it if it's unencrypted,
- because the target device probably shouldn't be exposed to the
- unencrypted wallet. So for that case, we will encrypt the wallet
- in place, copy, then remove the encryption.
- """
-
- if self.useEncryption:
- # Encrypted->Encrypted: Easy!
- self.writeFreshWalletFile(newPath)
- return True
-
- if not securePassphrase:
- LOGERROR("Tried to make encrypted copy, but no passphrase supplied")
- return False
-
- # If we're starting unencrypted...encrypt it in place
- (mem,nIter,salt) = self.computeSystemSpecificKdfParams(0.25)
- self.changeKdfParams(mem, nIter, salt)
- self.changeWalletEncryption(securePassphrase=securePassphrase)
-
- # Write the encrypted wallet to the target directory
- self.writeFreshWalletFile(newPath)
-
- # Unencrypt the wallet now
- self.unlock(securePassphrase=securePassphrase)
- self.changeWalletEncryption(None)
- return True
-
-
-
-
-
- #############################################################################
- def forkOnlineWallet(self, newWalletFile, shortLabel='', longLabel=''):
- """
- Make a copy of this wallet that contains no private key data
- """
- if not self.addrMap['ROOT'].hasPrivKey():
- LOGWARN('This wallet is already void of any private key data!')
- LOGWARN('Aborting wallet fork operation.')
-
- onlineWallet = PyBtcWallet()
- onlineWallet.fileTypeStr = self.fileTypeStr
- onlineWallet.version = self.version
- onlineWallet.magicBytes = self.magicBytes
- onlineWallet.wltCreateDate = self.wltCreateDate
- onlineWallet.useEncryption = False
- onlineWallet.watchingOnly = True
-
- if not shortLabel:
- shortLabel = self.labelName
- if not longLabel:
- longLabel = self.labelDescr
-
- onlineWallet.labelName = (shortLabel + ' (Watch)')[:32]
- onlineWallet.labelDescr = (longLabel + ' (Watching-only copy)')[:256]
-
- newAddrMap = {}
- for addr160,addrObj in self.addrMap.iteritems():
- onlineWallet.addrMap[addr160] = addrObj.copy()
- onlineWallet.addrMap[addr160].binPrivKey32_Encr = SecureBinaryData()
- onlineWallet.addrMap[addr160].binPrivKey32_Plain = SecureBinaryData()
- onlineWallet.addrMap[addr160].useEncryption = False
- onlineWallet.addrMap[addr160].createPrivKeyNextUnlock = False
-
- onlineWallet.commentsMap = self.commentsMap
- onlineWallet.opevalMap = self.opevalMap
-
- onlineWallet.uniqueIDBin = self.uniqueIDBin
- onlineWallet.highestUsedChainIndex = self.highestUsedChainIndex
- onlineWallet.lastComputedChainAddr160 = self.lastComputedChainAddr160
- onlineWallet.lastComputedChainIndex = self.lastComputedChainIndex
-
- onlineWallet.writeFreshWalletFile(newWalletFile, shortLabel, longLabel)
- return onlineWallet
-
-
- #############################################################################
- def supplyRootKeyForWatchingOnlyWallet(self, securePlainRootKey32, \
- permanent=False):
- """
- If you have a watching only wallet, you might want to upgrade it to a
- full wallet by supplying the 32-byte root private key. Generally, this
- will be used to make a 'permanent' upgrade to your wallet, and the new
- keys will be written to file ( NOTE: you should setup encryption just
- after doing this, to make sure that the plaintext keys get wiped from
- your wallet file).
-
- On the other hand, if you don't want this to be a permanent upgrade,
- this could potentially be used to maintain a watching only wallet on your
- harddrive, and actually plug in your plaintext root key instead of an
- encryption password whenever you want sign transactions.
- """
- pass
-
-
- #############################################################################
- def touchAddress(self, addr20):
- """
- Use this to update your wallet file to recognize the first/last times
- seen for the address. This information will improve blockchain search
- speed, if it knows not to search transactions that happened before they
- were created.
- """
- pass
-
- #############################################################################
- def testKdfComputeTime(self):
- """
- Experimentally determines the compute time required by this computer
- to execute with the current key-derivation parameters. This may be
- useful for when you transfer a wallet to a new computer that has
- different speed/memory characteristic.
- """
- testPassphrase = SecureBinaryData('This is a simple passphrase')
- start = RightNow()
- self.kdf.DeriveKey(testPassphrase)
- self.testedComputeTime = (RightNow()-start)
- return self.testedComputeTime
-
- #############################################################################
- def serializeKdfParams(self, kdfObj=None, binWidth=256):
- """
- Pack key-derivation function parameters into a binary stream.
- As of wallet version 1.0, there is only one KDF technique used
- in these wallets, and thus we only need to store the parameters
- of this KDF. In the future, we may have multiple KDFs and have
- to store the selection in this serialization.
- """
- if not kdfObj:
- kdfObj = self.kdf
-
- if not kdfObj:
- return '\x00'*binWidth
-
- binPacker = BinaryPacker()
- binPacker.put(UINT64, kdfObj.getMemoryReqtBytes())
- binPacker.put(UINT32, kdfObj.getNumIterations())
- binPacker.put(BINARY_CHUNK, kdfObj.getSalt().toBinStr(), width=32)
-
- kdfStr = binPacker.getBinaryString()
- binPacker.put(BINARY_CHUNK, computeChecksum(kdfStr,4), width=4)
- padSize = binWidth - binPacker.getSize()
- binPacker.put(BINARY_CHUNK, '\x00'*padSize)
-
- return binPacker.getBinaryString()
-
-
-
- #############################################################################
- def unserializeKdfParams(self, toUnpack, binWidth=256):
-
- if isinstance(toUnpack, BinaryUnpacker):
- binUnpacker = toUnpack
- else:
- binUnpacker = BinaryUnpacker(toUnpack)
-
-
-
- allKdfData = binUnpacker.get(BINARY_CHUNK, 44)
- kdfChksum = binUnpacker.get(BINARY_CHUNK, 4)
- kdfBytes = len(allKdfData) + len(kdfChksum)
- padding = binUnpacker.get(BINARY_CHUNK, binWidth-kdfBytes)
-
- if allKdfData=='\x00'*44:
- return None
-
- fixedKdfData = verifyChecksum(allKdfData, kdfChksum)
- if len(fixedKdfData)==0:
- raise UnserializeError, 'Corrupted KDF params, could not fix'
- elif not fixedKdfData==allKdfData:
- self.walletFileSafeUpdate( \
- [[WLT_UPDATE_MODIFY, self.offsetKdfParams, fixedKdfData]])
- allKdfData = fixedKdfData
- LOGWARN('KDF params in wallet were corrupted, but fixed')
-
- kdfUnpacker = BinaryUnpacker(allKdfData)
- mem = kdfUnpacker.get(UINT64)
- nIter = kdfUnpacker.get(UINT32)
- salt = kdfUnpacker.get(BINARY_CHUNK, 32)
-
- kdf = KdfRomix(mem, nIter, SecureBinaryData(salt))
- return kdf
-
-
- #############################################################################
- def serializeCryptoParams(self, binWidth=256):
- """
- As of wallet version 1.0, all wallets use the exact same encryption types,
- so there is nothing to serialize or unserialize. The 256 bytes here may
- be used in the future, though.
- """
- return '\x00'*binWidth
-
- #############################################################################
- def unserializeCryptoParams(self, toUnpack, binWidth=256):
- """
- As of wallet version 1.0, all wallets use the exact same encryption types,
- so there is nothing to serialize or unserialize. The 256 bytes here may
- be used in the future, though.
- """
- if isinstance(toUnpack, BinaryUnpacker):
- binUnpacker = toUnpack
- else:
- binUnpacker = BinaryUnpacker(toUnpack)
-
- binUnpacker.get(BINARY_CHUNK, binWidth)
- return CryptoAES()
-
- #############################################################################
- def verifyPassphrase(self, securePassphrase):
- """
- Verify a user-submitted passphrase. This passphrase goes into
- the key-derivation function to get actual encryption key, which
- is what actually needs to be verified
-
- Since all addresses should have the same encryption, we only need
- to verify correctness on the root key
- """
- kdfOutput = self.kdf.DeriveKey(securePassphrase)
- try:
- isValid = self.addrMap['ROOT'].verifyEncryptionKey(kdfOutput)
- return isValid
- finally:
- kdfOutput.destroy()
-
-
- #############################################################################
- def verifyEncryptionKey(self, secureKdfOutput):
- """
- Verify the underlying encryption key (from KDF).
- Since all addresses should have the same encryption,
- we only need to verify correctness on the root key.
- """
- return self.addrMap['ROOT'].verifyEncryptionKey(secureKdfOutput)
-
-
- #############################################################################
- def computeSystemSpecificKdfParams(self, targetSec=0.25, maxMem=32*1024*1024):
- """
- WARNING!!! DO NOT CHANGE KDF PARAMS AFTER ALREADY ENCRYPTED THE WALLET
- By changing them on an already-encrypted wallet, we are going
- to lose the original AES256-encryption keys -- which are
- uniquely determined by (numIter, memReqt, salt, passphrase)
-
- Only use this method before you have encrypted your wallet,
- in order to determine good KDF parameters based on your
- computer's specific speed/memory capabilities.
- """
- kdf = KdfRomix()
- kdf.computeKdfParams(targetSec, long(maxMem))
-
- mem = kdf.getMemoryReqtBytes()
- nIter = kdf.getNumIterations()
- salt = SecureBinaryData(kdf.getSalt().toBinStr())
- return (mem, nIter, salt)
-
- #############################################################################
- def restoreKdfParams(self, mem, numIter, secureSalt):
- """
- This method should only be used when we are loading an encrypted wallet
- from file. DO NOT USE THIS TO CHANGE KDF PARAMETERS. Doing so may
- result in data loss!
- """
- self.kdf = KdfRomix(mem, numIter, secureSalt)
-
-
- #############################################################################
- def changeKdfParams(self, mem, numIter, salt, securePassphrase=None):
- """
- Changing KDF changes the wallet encryption key which means that a KDF
- change is essentially the same as an encryption key change. As such,
- the wallet must be unlocked if you intend to change an already-
- encrypted wallet with KDF.
-
- TODO: this comment doesn't belong here...where does it go? :
- If the KDF is NOT yet setup, this method will do it. Supply the target
- compute time, and maximum memory requirements, and the underlying C++
- code will experimentally determine the "hardest" key-derivation params
- that will run within the specified time and memory usage on the system
- executing this method. You should set the max memory usage very low
- (a few kB) for devices like smartphones, which have limited memory
- availability. The KDF will then use less memory but more iterations
- to achieve the same compute time.
- """
- if self.useEncryption:
- if not securePassphrase:
- LOGERROR('')
- LOGERROR('You have requested changing the key-derivation')
- LOGERROR('parameters on an already-encrypted wallet, which')
- LOGERROR('requires modifying the encryption on this wallet.')
- LOGERROR('Please unlock your wallet before attempting to')
- LOGERROR('change the KDF parameters.')
- raise WalletLockError, 'Cannot change KDF without unlocking wallet'
- elif not self.verifyPassphrase(securePassphrase):
- LOGERROR('Incorrect passphrase to unlock wallet')
- raise PassphraseError, 'Incorrect passphrase to unlock wallet'
-
- secureSalt = SecureBinaryData(salt)
- newkdf = KdfRomix(mem, numIter, secureSalt)
- bp = BinaryPacker()
- bp.put(BINARY_CHUNK, self.serializeKdfParams(newkdf), width=256)
- updList = [[WLT_UPDATE_MODIFY, self.offsetKdfParams, bp.getBinaryString()]]
-
- if not self.useEncryption:
- # We may be setting the kdf params before enabling encryption
- self.walletFileSafeUpdate(updList)
- else:
- # Must change the encryption key: and we won't get here unless
- # we have a passphrase to use. This call will take the
- self.changeWalletEncryption(securePassphrase=securePassphrase, \
- extraFileUpdates=updList, kdfObj=newkdf)
-
- self.kdf = newkdf
-
-
-
-
- #############################################################################
- def changeWalletEncryption(self, secureKdfOutput=None, \
- securePassphrase=None, \
- extraFileUpdates=[],
- kdfObj=None):
- """
- Supply the passphrase you would like to use to encrypt this wallet
- (or supply the KDF output directly, to skip the passphrase part).
- This method will attempt to re-encrypt with the new passphrase.
- This fails if the wallet is already locked with a different passphrase.
- If encryption is already enabled, please unlock the wallet before
- calling this method.
-
- Make sure you set up the key-derivation function (KDF) before changing
- from an unencrypted to an encrypted wallet. An error will be thrown
- if you don't. You can use something like the following
-
- # For a target of 0.05-0.1s compute time:
- (mem,nIter,salt) = wlt.computeSystemSpecificKdfParams(0.1)
- wlt.changeKdfParams(mem, nIter, salt)
-
- Use the extraFileUpdates to pass in other changes that need to be
- written to the wallet file in the same atomic operation as the
- encryption key modifications.
- """
-
- if not kdfObj:
- kdfObj = self.kdf
-
- oldUsedEncryption = self.useEncryption
- if securePassphrase or secureKdfOutput:
- newUsesEncryption = True
- else:
- newUsesEncryption = False
-
- oldKdfKey = None
- if oldUsedEncryption:
- if self.isLocked:
- raise WalletLockError, 'Must unlock wallet to change passphrase'
- else:
- oldKdfKey = self.kdfKey.copy()
-
-
- if newUsesEncryption and not self.kdf:
- raise EncryptionError, 'KDF must be setup before encrypting wallet'
-
- # Prep the file-update list with extras passed in as argument
- walletUpdateInfo = list(extraFileUpdates)
-
- # Derive the new KDF key if a passphrase was supplied
- newKdfKey = secureKdfOutput
- if securePassphrase:
- newKdfKey = self.kdf.DeriveKey(securePassphrase)
-
- if oldUsedEncryption and newUsesEncryption and self.verifyEncryptionKey(newKdfKey):
- LOGWARN('Attempting to change encryption to same passphrase!')
- return # Wallet is encrypted with the new passphrase already
-
-
- # With unlocked key data, put the rest in a try/except/finally block
- # To make sure we destroy the temporary kdf outputs
- try:
- # If keys were previously unencrypted, they will be not have
- # initialization vectors and need to be generated before encrypting.
- # This is why we have the enableKeyEncryption() call
-
- if not oldUsedEncryption==newUsesEncryption:
- # If there was an encryption change, we must change the flags
- # in the wallet file in the same atomic operation as changing
- # the stored keys. We can't let them get out of sync.
- self.useEncryption = newUsesEncryption
- walletUpdateInfo.append(self.createChangeFlagsEntry())
- self.useEncryption = oldUsedEncryption
- # Restore the old flag just in case the file write fails
-
- newAddrMap = {}
- for addr160,addr in self.addrMap.iteritems():
- newAddrMap[addr160] = addr.copy()
- newAddrMap[addr160].enableKeyEncryption(generateIVIfNecessary=True)
- newAddrMap[addr160].changeEncryptionKey(oldKdfKey, newKdfKey)
- newAddrMap[addr160].walletByteLoc = addr.walletByteLoc
- walletUpdateInfo.append( \
- [WLT_UPDATE_MODIFY, addr.walletByteLoc, newAddrMap[addr160].serialize()])
-
-
- # Try to update the wallet file with the new encrypted key data
- updateSuccess = self.walletFileSafeUpdate( walletUpdateInfo )
-
- if updateSuccess:
- # Finally give the new data to the user
- for addr160,addr in newAddrMap.iteritems():
- self.addrMap[addr160] = addr.copy()
-
- self.useEncryption = newUsesEncryption
- if newKdfKey:
- self.unlock(newKdfKey)
- finally:
- # Make sure we always destroy the temporary passphrase results
- if newKdfKey: newKdfKey.destroy()
- if oldKdfKey: oldKdfKey.destroy()
-
-
-
- #############################################################################
- def getWalletPath(self, nameSuffix=None):
- fpath = self.walletPath
-
- if self.walletPath=='':
- fpath = os.path.join(ARMORY_HOME_DIR, 'armory_%s_.wallet' % self.uniqueIDB58)
-
- if not nameSuffix==None:
- pieces = os.path.splitext(fpath)
- if not pieces[0].endswith('_'):
- fpath = pieces[0] + '_' + nameSuffix + pieces[1]
- else:
- fpath = pieces[0] + nameSuffix + pieces[1]
- return fpath
-
-
-
- #############################################################################
- def getCommentForAddress(self, addr160):
- if self.commentsMap.has_key(addr160):
- return self.commentsMap[addr160]
- else:
- return ''
-
- #############################################################################
- def getComment(self, hashVal):
- """
- This method is used for both address comments, as well as tx comments
- In the first case, use the 20-byte binary pubkeyhash. Use 32-byte tx
- hash for the tx-comment case.
- """
- if self.commentsMap.has_key(hashVal):
- return self.commentsMap[hashVal]
- else:
- return ''
-
- #############################################################################
- def setComment(self, hashVal, newComment):
- """
- This method is used for both address comments, as well as tx comments
- In the first case, use the 20-byte binary pubkeyhash. Use 32-byte tx
- hash for the tx-comment case.
- """
- updEntry = []
- isNewComment = False
- if self.commentsMap.has_key(hashVal):
- # If there is already a comment for this address, overwrite it
- oldCommentLen = len(self.commentsMap[hashVal])
- oldCommentLoc = self.commentLocs[hashVal]
- # The first 23 bytes are the datatype, hashVal, and 2-byte comment size
- offset = 1 + len(hashVal) + 2
- updEntry.append([WLT_UPDATE_MODIFY, oldCommentLoc+offset, '\x00'*oldCommentLen])
- else:
- isNewComment = True
-
-
- dtype = WLT_DATATYPE_ADDRCOMMENT
- if len(hashVal)>20:
- dtype = WLT_DATATYPE_TXCOMMENT
-
- updEntry.append([WLT_UPDATE_ADD, dtype, hashVal, newComment])
- newCommentLoc = self.walletFileSafeUpdate(updEntry)
- self.commentsMap[hashVal] = newComment
-
- # If there was a wallet overwrite, it's location is the first element
- self.commentLocs[hashVal] = newCommentLoc[-1]
-
-
-
- #############################################################################
- def getAddrCommentIfAvail(self, txHash):
- if not TheBDM.getBDMState()=='BlockchainReady':
- return self.getComment(txHash)
-
- # If we haven't extracted relevant addresses for this tx, yet -- do it
- if not self.txAddrMap.has_key(txHash):
- self.txAddrMap[txHash] = []
- tx = TheBDM.getTxByHash(txHash)
- if tx.isInitialized():
- for i in range(tx.getNumTxOut()):
- try:
- a160 = CheckHash160(tx.getScrAddrForTxOut(i))
- if self.hasAddr(a160):
- self.txAddrMap[txHash].append(a160)
- except:
- LOGERROR("Unrecognized scraddr: " + binary_to_hex(tx.getScrAddrForTxOut(i)))
-
-
-
-
- addrComments = []
- for a160 in self.txAddrMap[txHash]:
- if self.commentsMap.has_key(a160):
- addrComments.append(self.commentsMap[a160])
-
- return '; '.join(addrComments)
-
-
- #############################################################################
- def getCommentForLE(self, le):
- # Smart comments for LedgerEntry objects: get any direct comments ...
- # if none, then grab the one for any associated addresses.
- txHash = le.getTxHash()
- if self.commentsMap.has_key(txHash):
- comment = self.commentsMap[txHash]
- else:
- # [[ COMMENTS ]] are not meant to be displayed on main ledger
- comment = self.getAddrCommentIfAvail(txHash)
- if comment.startswith('[[') and comment.endswith(']]'):
- comment = ''
-
- return comment
-
-
-
-
-
- #############################################################################
- def setWalletLabels(self, lshort, llong=''):
- self.labelName = lshort
- self.labelDescr = llong
- toWriteS = lshort.ljust( 32, '\x00')
- toWriteL = llong.ljust(256, '\x00')
-
- updList = []
- updList.append([WLT_UPDATE_MODIFY, self.offsetLabelName, toWriteS])
- updList.append([WLT_UPDATE_MODIFY, self.offsetLabelDescr, toWriteL])
- self.walletFileSafeUpdate(updList)
-
-
- #############################################################################
- def packWalletFlags(self, binPacker):
- nFlagBytes = 8
- flags = [False]*nFlagBytes*8
- flags[0] = self.useEncryption
- flags[1] = self.watchingOnly
- flagsBitset = ''.join([('1' if f else '0') for f in flags])
- binPacker.put(UINT64, bitset_to_int(flagsBitset))
-
- #############################################################################
- def createChangeFlagsEntry(self):
- """
- Packs up the wallet flags and returns a update-entry that can be included
- in a walletFileSafeUpdate call.
- """
- bp = BinaryPacker()
- self.packWalletFlags(bp)
- toWrite = bp.getBinaryString()
- return [WLT_UPDATE_MODIFY, self.offsetWltFlags, toWrite]
-
- #############################################################################
- def unpackWalletFlags(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- flagData = toUnpack
- else:
- flagData = BinaryUnpacker( toUnpack )
-
- wltflags = flagData.get(UINT64, 8)
- wltflags = int_to_bitset(wltflags, widthBytes=8)
- self.useEncryption = (wltflags[0]=='1')
- self.watchingOnly = (wltflags[1]=='1')
-
-
- #############################################################################
- def packHeader(self, binPacker):
- if not self.addrMap['ROOT']:
- raise WalletAddressError, 'Cannot serialize uninitialzed wallet!'
-
- startByte = binPacker.getSize()
-
- binPacker.put(BINARY_CHUNK, self.fileTypeStr, width=8)
- binPacker.put(UINT32, getVersionInt(self.version))
- binPacker.put(BINARY_CHUNK, self.magicBytes, width=4)
-
- # Wallet info flags
- self.offsetWltFlags = binPacker.getSize() - startByte
- self.packWalletFlags(binPacker)
-
- # Binary Unique ID (firstAddr25bytes[:5][::-1])
- binPacker.put(BINARY_CHUNK, self.uniqueIDBin, width=6)
-
- # Unix time of wallet creations
- binPacker.put(UINT64, self.wltCreateDate)
-
- # User-supplied wallet label (short)
- self.offsetLabelName = binPacker.getSize() - startByte
- binPacker.put(BINARY_CHUNK, self.labelName , width=32)
-
- # User-supplied wallet label (long)
- self.offsetLabelDescr = binPacker.getSize() - startByte
- binPacker.put(BINARY_CHUNK, self.labelDescr, width=256)
-
- # Highest used address:
- self.offsetTopUsed = binPacker.getSize() - startByte
- binPacker.put(INT64, self.highestUsedChainIndex)
-
- # Key-derivation function parameters
- self.offsetKdfParams = binPacker.getSize() - startByte
- binPacker.put(BINARY_CHUNK, self.serializeKdfParams(), width=256)
-
- # Wallet encryption parameters (currently nothing to put here)
- self.offsetCrypto = binPacker.getSize() - startByte
- binPacker.put(BINARY_CHUNK, self.serializeCryptoParams(), width=256)
-
- # Address-chain root, (base-address for deterministic wallets)
- self.offsetRootAddr = binPacker.getSize() - startByte
- self.addrMap['ROOT'].walletByteLoc = self.offsetRootAddr
- binPacker.put(BINARY_CHUNK, self.addrMap['ROOT'].serialize())
-
- # In wallet version 1.0, this next kB is unused -- may be used in future
- binPacker.put(BINARY_CHUNK, '\x00'*1024)
- return binPacker.getSize() - startByte
-
-
-
-
- #############################################################################
- def unpackHeader(self, binUnpacker):
- """
- Unpacking the header information from a wallet file. See the help text
- on the base class, PyBtcWallet, for more information on the wallet
- serialization.
- """
- self.fileTypeStr = binUnpacker.get(BINARY_CHUNK, 8)
- self.version = readVersionInt(binUnpacker.get(UINT32))
- self.magicBytes = binUnpacker.get(BINARY_CHUNK, 4)
-
- # Decode the bits to get the flags
- self.offsetWltFlags = binUnpacker.getPosition()
- self.unpackWalletFlags(binUnpacker)
-
- # This is the first 4 bytes of the 25-byte address-chain-root address
- # This includes the network byte (i.e. main network, testnet, namecoin)
- self.uniqueIDBin = binUnpacker.get(BINARY_CHUNK, 6)
- self.uniqueIDB58 = binary_to_base58(self.uniqueIDBin)
- self.wltCreateDate = binUnpacker.get(UINT64)
-
- # We now have both the magic bytes and network byte
- if not self.magicBytes == MAGIC_BYTES:
- LOGERROR('Requested wallet is for a different blockchain!')
- LOGERROR('Wallet is for: %s ', BLOCKCHAINS[self.magicBytes])
- LOGERROR('ArmoryEngine: %s ', BLOCKCHAINS[MAGIC_BYTES])
- return
- if not self.uniqueIDBin[-1] == ADDRBYTE:
- LOGERROR('Requested wallet is for a different network!')
- LOGERROR('Wallet is for: %s ', NETWORKS[self.uniqueIDBin[-1]])
- LOGERROR('ArmoryEngine: %s ', NETWORKS[ADDRBYTE])
- return
-
- # User-supplied description/name for wallet
- self.offsetLabelName = binUnpacker.getPosition()
- self.labelName = binUnpacker.get(BINARY_CHUNK, 32).strip('\x00')
-
-
- # Longer user-supplied description/name for wallet
- self.offsetLabelDescr = binUnpacker.getPosition()
- self.labelDescr = binUnpacker.get(BINARY_CHUNK, 256).strip('\x00')
-
-
- self.offsetTopUsed = binUnpacker.getPosition()
- self.highestUsedChainIndex = binUnpacker.get(INT64)
-
-
- # Read the key-derivation function parameters
- self.offsetKdfParams = binUnpacker.getPosition()
- self.kdf = self.unserializeKdfParams(binUnpacker)
-
- # Read the crypto parameters
- self.offsetCrypto = binUnpacker.getPosition()
- self.crypto = self.unserializeCryptoParams(binUnpacker)
-
- # Read address-chain root address data
- self.offsetRootAddr = binUnpacker.getPosition()
-
-
- rawAddrData = binUnpacker.get(BINARY_CHUNK, self.pybtcaddrSize)
- self.addrMap['ROOT'] = PyBtcAddress().unserialize(rawAddrData)
- fixedAddrData = self.addrMap['ROOT'].serialize()
- if not rawAddrData==fixedAddrData:
- self.walletFileSafeUpdate([ \
- [WLT_UPDATE_MODIFY, self.offsetRootAddr, fixedAddrData]])
-
- self.addrMap['ROOT'].walletByteLoc = self.offsetRootAddr
- if self.useEncryption:
- self.addrMap['ROOT'].isLocked = True
- self.isLocked = True
-
- # In wallet version 1.0, this next kB is unused -- may be used in future
- binUnpacker.advance(1024)
-
- # TODO: automatic conversion if the code uses a newer wallet
- # version than the wallet... got a manual script, but it
- # would be nice to autodetect and correct
- #convertVersion
-
-
- #############################################################################
- def unpackNextEntry(self, binUnpacker):
- dtype = binUnpacker.get(UINT8)
- hashVal = ''
- binData = ''
- if dtype==WLT_DATATYPE_KEYDATA:
- hashVal = binUnpacker.get(BINARY_CHUNK, 20)
- binData = binUnpacker.get(BINARY_CHUNK, self.pybtcaddrSize)
- elif dtype==WLT_DATATYPE_ADDRCOMMENT:
- hashVal = binUnpacker.get(BINARY_CHUNK, 20)
- commentLen = binUnpacker.get(UINT16)
- binData = binUnpacker.get(BINARY_CHUNK, commentLen)
- elif dtype==WLT_DATATYPE_TXCOMMENT:
- hashVal = binUnpacker.get(BINARY_CHUNK, 32)
- commentLen = binUnpacker.get(UINT16)
- binData = binUnpacker.get(BINARY_CHUNK, commentLen)
- elif dtype==WLT_DATATYPE_OPEVAL:
- raise NotImplementedError, 'OP_EVAL not support in wallet yet'
- elif dtype==WLT_DATATYPE_DELETED:
- deletedLen = binUnpacker.get(UINT16)
- binUnpacker.advance(deletedLen)
-
-
- return (dtype, hashVal, binData)
-
- #############################################################################
- def readWalletFile(self, wltpath, verifyIntegrity=True, doScanNow=False):
-
- TimerStart('readWalletFile')
-
- if not os.path.exists(wltpath):
- raise FileExistsError, "No wallet file:"+wltpath
-
- self.__init__()
- self.walletPath = wltpath
-
- if verifyIntegrity:
- try:
- nError = self.doWalletFileConsistencyCheck()
- except KeyDataError, errmsg:
- LOGEXCEPT('***ERROR: Wallet file had unfixable errors.')
- raise KeyDataError, errmsg
-
-
- wltfile = open(wltpath, 'rb')
- wltdata = BinaryUnpacker(wltfile.read())
- wltfile.close()
-
- self.cppWallet = Cpp.BtcWallet()
- self.unpackHeader(wltdata)
-
- self.lastComputedChainIndex = -UINT32_MAX
- self.lastComputedChainAddr160 = None
- while wltdata.getRemainingSize()>0:
- byteLocation = wltdata.getPosition()
- dtype, hashVal, rawData = self.unpackNextEntry(wltdata)
- if dtype==WLT_DATATYPE_KEYDATA:
- newAddr = PyBtcAddress()
- newAddr.unserialize(rawData)
- newAddr.walletByteLoc = byteLocation + 21
- # Fix byte errors in the address data
- fixedAddrData = newAddr.serialize()
- if not rawData==fixedAddrData:
- self.walletFileSafeUpdate([ \
- [WLT_UPDATE_MODIFY, newAddr.walletByteLoc, fixedAddrData]])
- if newAddr.useEncryption:
- newAddr.isLocked = True
- self.addrMap[hashVal] = newAddr
- if newAddr.chainIndex > self.lastComputedChainIndex:
- self.lastComputedChainIndex = newAddr.chainIndex
- self.lastComputedChainAddr160 = newAddr.getAddr160()
- self.linearAddr160List.append(newAddr.getAddr160())
- self.chainIndexMap[newAddr.chainIndex] = newAddr.getAddr160()
-
- # Update the parallel C++ object that scans the blockchain for us
- timeRng = newAddr.getTimeRange()
- blkRng = newAddr.getBlockRange()
- self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(hashVal), \
- timeRng[0], blkRng[0], \
- timeRng[1], blkRng[1])
- if dtype in (WLT_DATATYPE_ADDRCOMMENT, WLT_DATATYPE_TXCOMMENT):
- self.commentsMap[hashVal] = rawData # actually ASCII data, here
- self.commentLocs[hashVal] = byteLocation
- if dtype==WLT_DATATYPE_OPEVAL:
- raise NotImplementedError, 'OP_EVAL not support in wallet yet'
- if dtype==WLT_DATATYPE_DELETED:
- pass
-
-
- if (not doScanNow or \
- not TheBDM.getBDMState()=='BlockchainReady' or \
- self.doBlockchainSync==BLOCKCHAIN_DONOTUSE):
- pass
- else:
- self.syncWithBlockchain()
-
-
- ### Update the wallet version if necessary ###
- if getVersionInt(self.version) < getVersionInt(PYBTCWALLET_VERSION):
- LOGERROR('Wallets older than version 1.35 no loger supported!')
- return
-
- TimerStop('readWalletFile')
-
- return self
-
-
-
- #############################################################################
- def walletFileSafeUpdate(self, updateList):
-
- """
- The input "toAddDataList" should be a list of triplets, such as:
- [
- [WLT_DATA_ADD, WLT_DATATYPE_KEYDATA, addr160_1, PyBtcAddrObj1]
- [WLT_DATA_ADD, WLT_DATATYPE_KEYDATA, addr160_2, PyBtcAddrObj2]
- [WLT_DATA_MODIFY, modifyStartByte1, binDataForOverwrite1 ]
- [WLT_DATA_ADD, WLT_DATATYPE_ADDRCOMMENT, addr160_3, 'Long-term savings']
- [WLT_DATA_MODIFY, modifyStartByte2, binDataForOverwrite2 ]
- ]
-
- The return value is the list of new file byte offsets (from beginning of
- the file), that specify the start of each modification made to the
- wallet file. For MODIFY fields, this just returns the modifyStartByte
- field that was provided as input. For adding data, it specifies the
- starting byte of the new field (the DATATYPE byte). We keep this data
- in PyBtcAddress objects so that we know where to apply modifications in
- case we need to change something, like converting from unencrypted to
- encrypted private keys.
-
- If this method fails, we simply return an empty list. We can check for
- an empty list to know if the file update succeeded.
-
- WHY IS THIS SO COMPLICATED? -- Because it's atomic!
-
- When we want to add data to the wallet file, we will do so in a completely
- recoverable way. We define this method to make sure a backup exists when
- we start modifying the file, and keep a flag to identify when the wallet
- might be corrupt. If we ever try to load the wallet file and see another
- file with the _update_unsuccessful suffix, we should instead just restore
- from backup.
-
- Similarly, we have to update the backup file after updating the main file
- so we will use a similar technique with the backup_unsuccessful suffix.
- We don't want to rely on a backup if somehow *the backup* got corrupted
- and the original file is fine. THEREFORE -- this is implemented in such
- a way that the user should know two things:
-
- (1) No matter when the power goes out, we ALWAYS have a uncorrupted
- wallet file, and know which one it is. Either the backup is safe,
- or the original is safe. Based on the flag files, we know which
- one is guaranteed to be not corrupted.
- (2) ALWAYS DO YOUR FILE OPERATIONS BEFORE SETTING DATA IN MEMORY
- You must write it to disk FIRST using this SafeUpdate method,
- THEN give the new data to the user -- never give it to them
- until you are sure that it was written safely to disk.
-
- Number (2) is easy to screw up because you plan to write the file just
- AFTER the data is created and stored in local memory. But an error
- might be thrown halfway which is handled higher up, and instead the data
- never made it to file. Then there is a risk that the user uses their
- new address that never made it into the wallet file.
- """
-
- if not os.path.exists(self.walletPath):
- raise FileExistsError, 'No wallet file exists to be updated!'
-
- if len(updateList)==0:
- return []
-
- # Make sure that the primary and backup files are synced before update
- self.doWalletFileConsistencyCheck()
-
- walletFileBackup = self.getWalletPath('backup')
- mainUpdateFlag = self.getWalletPath('update_unsuccessful')
- backupUpdateFlag = self.getWalletPath('backup_unsuccessful')
-
-
- # Will be passing back info about all data successfully added
- oldWalletSize = os.path.getsize(self.walletPath)
- updateLocations = []
- dataToChange = []
- toAppend = BinaryPacker()
-
- try:
- for entry in updateList:
- modType = entry[0]
- updateInfo = entry[1:]
-
- if(modType==WLT_UPDATE_ADD):
- dtype = updateInfo[0]
- updateLocations.append(toAppend.getSize()+oldWalletSize)
- if dtype==WLT_DATATYPE_KEYDATA:
- if len(updateInfo[1])!=20 or not isinstance(updateInfo[2], PyBtcAddress):
- raise Exception, 'Data type does not match update type'
- toAppend.put(UINT8, WLT_DATATYPE_KEYDATA)
- toAppend.put(BINARY_CHUNK, updateInfo[1])
- toAppend.put(BINARY_CHUNK, updateInfo[2].serialize())
-
- elif dtype in (WLT_DATATYPE_ADDRCOMMENT, WLT_DATATYPE_TXCOMMENT):
- if not isinstance(updateInfo[2], str):
- raise Exception, 'Data type does not match update type'
- toAppend.put(UINT8, dtype)
- toAppend.put(BINARY_CHUNK, updateInfo[1])
- toAppend.put(UINT16, len(updateInfo[2]))
- toAppend.put(BINARY_CHUNK, updateInfo[2])
-
- elif dtype==WLT_DATATYPE_OPEVAL:
- raise Exception, 'OP_EVAL not support in wallet yet'
-
- elif(modType==WLT_UPDATE_MODIFY):
- updateLocations.append(updateInfo[0])
- dataToChange.append( updateInfo )
- else:
- LOGERROR('Unknown wallet-update type!')
- raise Exception, 'Unknown wallet-update type!'
- except Exception:
- LOGEXCEPT('Bad input to walletFileSafeUpdate')
- return []
-
- binaryToAppend = toAppend.getBinaryString()
-
- # We need to safely modify both the main wallet file and backup
- # Start with main wallet
- touchFile(mainUpdateFlag)
-
- try:
- wltfile = open(self.walletPath, 'ab')
- wltfile.write(binaryToAppend)
- wltfile.close()
-
- # This is for unit-testing the atomic-wallet-file-update robustness
- if self.interruptTest1: raise InterruptTestError
-
- wltfile = open(self.walletPath, 'r+b')
- for loc,replStr in dataToChange:
- wltfile.seek(loc)
- wltfile.write(replStr)
- wltfile.close()
-
- except IOError:
- LOGEXCEPT('Could not write data to wallet. Permissions?')
- shutil.copy(walletFileBackup, self.walletPath)
- os.remove(mainUpdateFlag)
- return []
-
- # Write backup flag before removing main-update flag. If we see
- # both flags, we know file IO was interrupted RIGHT HERE
- touchFile(backupUpdateFlag)
-
- # This is for unit-testing the atomic-wallet-file-update robustness
- if self.interruptTest2: raise InterruptTestError
-
- os.remove(mainUpdateFlag)
-
- # Modify backup
- try:
- # This is for unit-testing the atomic-wallet-file-update robustness
- if self.interruptTest3: raise InterruptTestError
-
- backupfile = open(walletFileBackup, 'ab')
- backupfile.write(binaryToAppend)
- backupfile.close()
-
- backupfile = open(walletFileBackup, 'r+b')
- for loc,replStr in dataToChange:
- backupfile.seek(loc)
- backupfile.write(replStr)
- backupfile.close()
-
- except IOError:
- LOGEXCEPT('Could not write backup wallet. Permissions?')
- shutil.copy(self.walletPath, walletFileBackup)
- os.remove(mainUpdateFlag)
- return []
-
- os.remove(backupUpdateFlag)
-
- return updateLocations
-
-
-
- #############################################################################
- def doWalletFileConsistencyCheck(self, onlySyncBackup=True):
- """
- First we check the file-update flags (files we touched/removed during
- file modification operations), and then restore the primary wallet file
- and backup file to the exact same state -- we know that at least one of
- them is guaranteed to not be corrupt, and we know based on the flags
- which one that is -- so we execute the appropriate copy operation.
-
- ***NOTE: For now, the remaining steps are untested and unused!
-
- After we have guaranteed that main wallet and backup wallet are the
- same, we want to do a check that the data is consistent. We do this
- by simply reading in the key-data from the wallet, unserializing it
- and reserializing it to see if it matches -- this works due to the
- way the PyBtcAddress::unserialize() method works: it verifies the
- checksums in the address data, and corrects errors automatically!
- And it's part of the unit-tests that serialize/unserialize round-trip
- is guaranteed to match for all address types if there's no byte errors.
-
- If an error is detected, we do a safe-file-modify operation to re-write
- the corrected information to the wallet file, in-place. We DO NOT
- check comment fields, since they do not have checksums, and are not
- critical to protect against byte errors.
- """
-
-
-
- if not os.path.exists(self.walletPath):
- raise FileExistsError, 'No wallet file exists to be checked!'
-
- walletFileBackup = self.getWalletPath('backup')
- mainUpdateFlag = self.getWalletPath('update_unsuccessful')
- backupUpdateFlag = self.getWalletPath('backup_unsuccessful')
-
- if not os.path.exists(walletFileBackup):
- # We haven't even created a backup file, yet
- LOGDEBUG('Creating backup file %s', walletFileBackup)
- touchFile(backupUpdateFlag)
- shutil.copy(self.walletPath, walletFileBackup)
- os.remove(backupUpdateFlag)
-
- if os.path.exists(backupUpdateFlag) and os.path.exists(mainUpdateFlag):
- # Here we actually have a good main file, but backup never succeeded
- LOGWARN('***WARNING: error in backup file... how did that happen?')
- shutil.copy(self.walletPath, walletFileBackup)
- os.remove(mainUpdateFlag)
- os.remove(backupUpdateFlag)
- elif os.path.exists(mainUpdateFlag):
- LOGWARN('***WARNING: last file operation failed! Restoring wallet from backup')
- # main wallet file might be corrupt, copy from backup
- shutil.copy(walletFileBackup, self.walletPath)
- os.remove(mainUpdateFlag)
- elif os.path.exists(backupUpdateFlag):
- LOGWARN('***WARNING: creation of backup was interrupted -- fixing')
- shutil.copy(self.walletPath, walletFileBackup)
- os.remove(backupUpdateFlag)
-
- if onlySyncBackup:
- return 0
-
-
-
-
-
-
- #############################################################################
- #def getAddrByIndex(self, i):
- #return self.addrMap.values()[i]
-
- #############################################################################
- def deleteImportedAddress(self, addr160):
- """
- We want to overwrite a particular key in the wallet. Before overwriting
- the data looks like this:
- [ \x00 | <20-byte addr160> | <237-byte keydata> ]
- And we want it to look like:
- [ \x04 | <2-byte length> | \x00\x00\x00... ]
- So we need to construct a wallet-update vector to modify the data
- starting at the first byte, replace it with 0x04, specifies how many
- bytes are in the deleted entry, and then actually overwrite those
- bytes with 0s
- """
-
- if not self.addrMap[addr160].chainIndex==-2:
- raise WalletAddressError, 'You can only delete imported addresses!'
-
- overwriteLoc = self.addrMap[addr160].walletByteLoc - 21
- overwriteLen = 20 + self.pybtcaddrSize - 2
-
- overwriteBin = ''
- overwriteBin += int_to_binary(WLT_DATATYPE_DELETED, widthBytes=1)
- overwriteBin += int_to_binary(overwriteLen, widthBytes=2)
- overwriteBin += '\x00'*overwriteLen
-
- self.walletFileSafeUpdate([[WLT_UPDATE_MODIFY, overwriteLoc, overwriteBin]])
-
- # IMPORTANT: we need to update the wallet structures to reflect the
- # new state of the wallet. This will actually be easiest
- # if we just "forget" the current wallet state and re-read
- # the wallet from file
- wltPath = self.walletPath
- self.readWalletFile(wltPath, doScanNow=True)
-
-
- #############################################################################
- def importExternalAddressData(self, privKey=None, privChk=None, \
- pubKey=None, pubChk=None, \
- addr20=None, addrChk=None, \
- firstTime=UINT32_MAX, firstBlk=UINT32_MAX, \
- lastTime=0, lastBlk=0):
- """
- This wallet fully supports importing external keys, even though it is
- a deterministic wallet: determinism only adds keys to the pool based
- on the address-chain, but there's nothing wrong with adding new keys
- not on the chain.
-
- We don't know when this address was created, so we have to set its
- first/last-seen times to 0, to make sure we search the whole blockchain
- for tx related to it. This data will be updated later after we've done
- the search and know for sure when it is "relevant".
- (alternatively, if you know it's first-seen time for some reason, you
- can supply it as an input, but this seems rare: we don't want to get it
- wrong or we could end up missing wallet-relevant transactions)
-
- DO NOT CALL FROM A BDM THREAD FUNCTION. IT MAY DEADLOCK.
- """
-
- if self.calledFromBDM:
- LOGERROR('Called importExternalAddressData() from BDM method!')
- LOGERROR('Don\'t do this!')
- return ''
-
- if not privKey and not self.watchingOnly:
- LOGERROR('')
- LOGERROR('This wallet is strictly for addresses that you')
- LOGERROR('own. You cannot import addresses without the')
- LOGERROR('the associated private key. Instead, use a')
- LOGERROR('watching-only wallet to import this address.')
- LOGERROR('(actually, this is currently, completely disabled)')
- raise WalletAddressError, 'Cannot import non-private-key addresses'
-
-
-
- # First do all the necessary type conversions and error corrections
- computedPubKey = None
- computedAddr20 = None
- if privKey:
- if isinstance(privKey, str):
- privKey = SecureBinaryData(privKey)
-
- if privChk:
- privKey = SecureBinaryData(verifyChecksum(privKey.toBinStr(), privChk))
-
- computedPubkey = CryptoECDSA().ComputePublicKey(privKey)
- computedAddr20 = convertKeyDataToAddress(pubKey=computedPubkey)
-
- # If public key is provided, we prep it so we can verify Pub/Priv match
- if pubKey:
- if isinstance(pubKey, str):
- pubKey = SecureBinaryData(pubKey)
- if pubChk:
- pubKey = SecureBinaryData(verifyChecksum(pubKey.toBinStr(), pubChk))
-
- if not computedAddr20:
- computedAddr20 = convertKeyDataToAddress(pubKey=pubKey)
-
- # The 20-byte address (pubkey hash160) should always be a python string
- if addr20:
- if not isinstance(pubKey, str):
- addr20 = addr20.toBinStr()
- if addrChk:
- addr20 = verifyChecksum(addr20, addrChk)
-
-
- # Now a few sanity checks
- if self.addrMap.has_key(addr20):
- LOGWARN('This address is already in your wallet!')
- return
-
- #if pubKey and not computedPubkey==pubKey:
- #raise ECDSA_Error, 'Private and public keys to be imported do not match!'
- #if addr20 and not computedAddr20==addr20:
- #raise ECDSA_Error, 'Supplied address hash does not match key data!'
-
- addr20 = computedAddr20
-
- if self.addrMap.has_key(addr20):
- return None
-
- # If a private key is supplied and this wallet is encrypted&locked, then
- # we have no way to secure the private key without unlocking the wallet.
- if self.useEncryption and privKey and not self.kdfKey:
- raise WalletLockError, 'Cannot import private key when wallet is locked!'
-
-
- if privKey:
- # For priv key, lots of extra encryption and verification options
- newAddr = PyBtcAddress().createFromPlainKeyData( addr160=addr20, \
- plainPrivKey=privKey, publicKey65=computedPubkey, \
- willBeEncr=self.useEncryption, \
- generateIVIfNecessary=self.useEncryption, \
- skipCheck=True, skipPubCompute=True)
- if self.useEncryption:
- newAddr.lock(self.kdfKey)
- newAddr.unlock(self.kdfKey)
- elif pubKey:
- securePubKey = SecureBinaryData(pubKey)
- newAddr = PyBtcAddress().createFromPublicKeyData(securePubKey)
- else:
- newAddr = PyBtcAddress().createFromPublicKeyHash160(addr20)
-
-
- newAddr.chaincode = SecureBinaryData('\xff'*32)
- newAddr.chainIndex = -2
- newAddr.timeRange = [firstTime, lastTime]
- newAddr.blkRange = [firstBlk, lastBlk ]
- #newAddr.binInitVect16 = SecureBinaryData().GenerateRandom(16)
- newAddr160 = newAddr.getAddr160()
-
- newDataLoc = self.walletFileSafeUpdate( \
- [[WLT_UPDATE_ADD, WLT_DATATYPE_KEYDATA, newAddr160, newAddr]])
- self.addrMap[newAddr160] = newAddr.copy()
- self.addrMap[newAddr160].walletByteLoc = newDataLoc[0] + 21
- self.linearAddr160List.append(newAddr160)
- if self.useEncryption and self.kdfKey:
- self.addrMap[newAddr160].lock(self.kdfKey)
- if not self.isLocked:
- self.addrMap[newAddr160].unlock(self.kdfKey)
-
- self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(newAddr160), \
- firstTime, firstBlk, lastTime, lastBlk)
-
- # The following line MAY deadlock if this method is called from the BDM
- # thread. Do not write any BDM methods that calls this method!
- TheBDM.registerImportedScrAddr(Hash160ToScrAddr(newAddr160),
- firstTime, firstBlk, lastTime, lastBlk)
-
-
- return newAddr160
-
-
- #############################################################################
- def bulkImportAddresses(self, textBlock, privKeyEndian=BIGENDIAN, \
- sepList=":;'[]()=-_*&^%$#@!,./?\n"):
- """
- Attempts to import plaintext key data stored in a file. This method
- expects all data to be in hex or Base58:
-
- 20 bytes / 40 hex chars -- public key hashes
- 25 bytes / 50 hex chars -- full binary addresses
- 65 bytes / 130 hex chars -- public key
- 32 bytes / 64 hex chars -- private key
-
- 33 or 34 Base58 chars -- address strings
- 50 to 52 Base58 chars -- base58-encoded private key
-
- Since this is python, I don't have to require any particular format:
- I can pretty easily break apart the entire file into individual strings,
- search for addresses and public keys, then, search for private keys that
- correspond to that data. Obviously, simpler is better, but as long as
- the data is encoded as in the above list and separated by whitespace or
- punctuation, this method should succeed.
-
- We must throw an error if this is NOT a watching-only address and we
- find an address without a private key. We will need to create a
- separate watching-only wallet in order to import these keys.
-
- TODO: will finish this later
- """
-
- """
- STUB: (AGAIN) I just can't make this work out to be as stupid-proof
- as I originally planned. I'll have to put it on hold.
- self.__init__()
-
- newfile = open(filename,'rb')
- newdata = newfile.read()
- newfile.close()
-
- # Change all punctuation to the same char so split() works easier
- for ch in sepList:
- newdata.replace(ch, ' ')
-
- newdata = newdata.split()
- hexChars = '01234567890abcdef'
- b58Chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
- DATATYPES = enum( 'UNKNOWN', \
- 'Addr_Hex_20', \
- 'Addr_B58_25', \
- 'PubX_Hex_32', \
- 'PubY_Hex_32', \
- 'PubK_Hex_65', \
- 'Priv_Hex_32', \
- 'Priv_Hex_36', \
- 'Priv_Hex_37', \
- 'Priv_B58_32', \
- 'Priv_B58_37', \
- 'Priv_MiniPriv', \
- 'PubK_Hex_33_Compressed', \
- 'Priv_Hex_33_Compressed')
-
- DTYPES = enum('Unknown', 'Hash160', 'PubKey', 'PrivKey', 'Byte32', 'Byte33')
-
-
- lastAddr = None
- lastPubK = None
- lastPriv = None
- for theStr in newdata:
- if len(theStr)<20:
- continue
-
- hexCount = sum([1 if c in hexChars else 0 for c in theStr])
- b58Count = sum([1 if c in b58Chars else 0 for c in theStr])
- canBeHex = hexCount==len(theStr)
- canBeB58 = b58Count==len(theStr)
- isHex = canBeHex
- isB58 = canBeB58 and not canBeHex
- isStr = not isHex and not isB58
-
- dataAndType = [DTYPES.Unknown, '']
- if isHex:
- binData = hex_to_binary(theStr)
- sz = len(binData)
-
- if sz==20:
- dataAndType = [DTYPES.Hash160, binData]
- elif sz==25:
- dataAndType = [DTYPES.Hash160, binData[1:21]]
- elif sz==32:
- dataAndType = [DTYPES., binData[1:21]]
- elif isB58:
- binData = base58_to_binary(theStr)
- sz = len(binData)
-
-
- if isHex and sz==40:
- elif isHex and sz==50:
- dataAndType = [DTYPES.Hash160, hex_to_binary(theStr)[1:21]]
- elif isB58 and sz>=31 and sz<=35:
- dataAndType = [DTYPES.Hash160, addrStr_to_hash160(theStr)]
- elif isHex is sz==130:
- dataAndType = [DTYPES.PubKey, hex_to_binary(theStr)]
- elif isHex is sz==128:
- dataAndType = [DTYPES.PubKey, '\x04'+hex_to_binary(theStr)]
- elif isHex is sz==128:
-
-
-
- potentialKey = SecureBinaryData('\x04' + piece)
- isValid = CryptoECDSA().VerifyPublicKeyValid(potentialKey)
- """
- pass
-
-
-
-
-
- #############################################################################
- def checkIfRescanRequired(self):
- """
- Returns true is we have to go back to disk/mmap and rescan more than two
- weeks worth of blocks
-
- DO NOT CALL FROM A BDM METHOD. Instead, call directly:
- self.bdm.numBlocksToRescan(pywlt.cppWallet) > 2016
- """
- if self.calledFromBDM:
- LOGERROR('Called checkIfRescanRequired() from BDM method!')
- LOGERROR('Don\'t do this!')
-
- if TheBDM.getBDMState()=='BlockchainReady':
- return (TheBDM.numBlocksToRescan(self.cppWallet) > 2016)
- else:
- return False
-
-
-
- #############################################################################
- def signTxDistProposal(self, txdp, hashcode=1):
- if not hashcode==1:
- LOGERROR('hashcode!=1 is not supported at this time!')
- return
-
- # If the wallet is locked, we better bail now
- if self.isLocked:
- raise WalletLockError, "Cannot sign Tx when wallet is locked!"
-
- numInputs = len(txdp.pytxObj.inputs)
- wltAddr = []
- for index,txin in enumerate(txdp.pytxObj.inputs):
- scriptType = getTxOutScriptType(txdp.txOutScripts[index])
-
- if scriptType in (TXOUT_SCRIPT_STANDARD, TXOUT_SCRIPT_COINBASE):
- addr160 = TxOutScriptExtractAddr160(txdp.txOutScripts[index])
- if self.hasAddr(addr160) and self.addrMap[addr160].hasPrivKey():
- wltAddr.append( (self.addrMap[addr160], index, 0))
- elif scriptType==TXOUT_SCRIPT_MULTISIG:
- # Basically the same check but multiple addresses to consider
- addrList = getTxOutMultiSigInfo(txdp.txOutScripts[index])[1]
- for addrIdx, addr in enumerate(addrList):
- if self.hasAddr(addr) and self.addrMap[addr].hasPrivKey():
- wltAddr.append( (self.addrMap[addr], index, addrIdx) )
- break
-
-
- # WltAddr now contains a list of every input we can sign for, and the
- # PyBtcAddress object that can be used to sign it. Let's do it.
- numMyAddr = len(wltAddr)
- LOGDEBUG('Total number of inputs in transaction: %d', numInputs)
- LOGDEBUG('Number of inputs that you can sign for: %d', numMyAddr)
-
-
- # Unlock the wallet if necessary, sign inputs
- maxChainIndex = -1
- for addrObj,idx, sigIdx in wltAddr:
- maxChainIndex = max(maxChainIndex, addrObj.chainIndex)
- if addrObj.isLocked:
- if self.kdfKey:
- addrObj.unlock(self.kdfKey)
- else:
- raise WalletLockError, 'Cannot sign tx without unlocking wallet'
-
- if not addrObj.hasPubKey():
- # Make sure the public key is available for this address
- addrObj.binPublicKey65 = CryptoECDSA().ComputePublicKey(addrObj.binPrivKey32_Plain)
-
- # Copy the script, blank out out all other scripts (assume hashcode==1)
- txCopy = PyTx().unserialize(txdp.pytxObj.serialize())
- for i in range(len(txCopy.inputs)):
- if not i==idx:
- txCopy.inputs[i].binScript = ''
- else:
- txCopy.inputs[i].binScript = txdp.txOutScripts[i]
-
- hashCode1 = int_to_binary(hashcode, widthBytes=1)
- hashCode4 = int_to_binary(hashcode, widthBytes=4)
- preHashMsg = txCopy.serialize() + hashCode4
- signature = addrObj.generateDERSignature(preHashMsg) + hashCode1
-
- # Now we attach a binary signature or full script, depending on the type
- if txdp.scriptTypes[idx]==TXOUT_SCRIPT_COINBASE:
- # Only need the signature to complete coinbase TxOut
- sigLenInBinary = int_to_binary(len(signature))
- txdp.signatures[idx][0] = sigLenInBinary + signature
- elif txdp.scriptTypes[idx]==TXOUT_SCRIPT_STANDARD:
- # Gotta include the public key, too, for standard TxOuts
- pubkey = addrObj.binPublicKey65.toBinStr()
- sigLenInBinary = int_to_binary(len(signature))
- pubkeyLenInBinary = int_to_binary(len(pubkey) )
- txdp.signatures[idx][0] = sigLenInBinary + signature + \
- pubkeyLenInBinary + pubkey
- elif txdp.scriptTypes[idx]==TXOUT_SCRIPT_MULTISIG:
- # We attach just the sig for multi-sig transactions
- sigLenInBinary = int_to_binary(len(signature))
- txdp.signatures[idx][sigIdx] = (sigLenInBinary + signature)
- else:
- LOGERROR('Unknown txOut script type')
-
-
- prevHighestIndex = self.highestUsedChainIndex
- if prevHighestIndex=0):
- # Either we want imported addresses, or this isn't one
- if (withAddrPool or addr.chainIndex<=self.highestUsedChainIndex):
- addrList.append(addr)
-
- return addrList
-
-
- #############################################################################
- def getAddress160ByChainIndex(self, desiredIdx):
- """
- It should be safe to assume that if the index is less than the highest
- computed, it will be in the chainIndexMap, but I don't like making such
- assumptions. Perhaps something went wrong with the wallet, or it was
- manually reconstructed and has holes in the chain. We will regenerate
- addresses up to that point, if necessary (but nothing past the value
- self.lastComputedChainIndex.
- """
- if desiredIdx>self.lastComputedChainIndex or desiredIdx<0:
- # I removed the option for fillPoolIfNecessary, because of the risk
- # that a bug may lead to generation of billions of addresses, which
- # would saturate the system's resources and fill the HDD.
- raise WalletAddressError, 'Chain index is out of range'
-
-
- if self.chainIndexMap.has_key(desiredIdx):
- return self.chainIndexMap[desiredIdx]
- else:
- # Somehow the address isn't here, even though it is less than the
- # last computed index
- closestIdx = 0
- for idx,addr160 in self.chainIndexMap.iteritems():
- if closestIdx0 else 'Sent'
-
- blkStr = str(le.getBlockNum())
- print indent + 'LE %s %s %s %s' % \
- (addrStr.ljust(15), leVal, txType.ljust(8), blkStr.ljust(8))
-
-"""
-class PyLedgerEntry(object):
- def __init__(self):
- self.addr20 = UNINITIALIZED
- self.value = UNINITIALIZED
- self.blockNum = UNINITIALIZED
- self.txHash = UNINITIALIZED
- self.index = UNINITIALIZED
- self.isValid = UNINITIALIZED
- self.isSentToSelf = UNINITIALIZED
- self.isChangeBack = UNINITIALIZED
-
- def createForWalletFromTx(self, wlt, tx):
- numIn = len(tx.inputs)
- numOut = len(tx.outputs)
-
-
-
- // addr20_ - useless - originally had a purpose, but lost it
- // value_ - total debit/credit on WALLET balance, in Satoshis (1e-8 BTC)
- // blockNum_ - block height of the block in which this tx was included
- // txHash_ - hash of this tx
- // index_ - index of the tx in the block
- // isValid_ - default to true -- invalidated due to reorg/double-spend
- // isSentToSelf_ - if we supplied inputs and rx ALL outputs
- // isChangeBack_ - if we supplied inputs and rx ANY outputs
-"""
-
-
-
-
-###############################################################################
-###############################################################################
-#
-# Networking Objects
-#
-###############################################################################
-###############################################################################
-
-def quad_to_str( addrQuad):
- return '.'.join([str(a) for a in addrQuad])
-
-def quad_to_binary( addrQuad):
- return ''.join([chr(a) for a in addrQuad])
-
-def binary_to_quad(addrBin):
- return [ord(a) for a in addrBin]
-
-def str_to_quad(addrBin):
- return [int(a) for a in addrBin.split('.')]
-
-def str_to_binary(addrBin):
- """ I should come up with a better name for this -- it's net-addr only """
- return ''.join([chr(int(a)) for a in addrBin.split('.')])
-
-def parseNetAddress(addrObj):
- if isinstance(addrObj, str):
- if len(addrObj)==4:
- return binary_to_quad(addrObj)
- else:
- return str_to_quad(addrObj)
- # Probably already in the right form
- return addrObj
-
-
-
-MSG_INV_ERROR = 0
-MSG_INV_TX = 1
-MSG_INV_BLOCK = 2
-
-
-################################################################################
-class PyMessage(object):
- """
- All payload objects have a serialize and unserialize method, making them
- easy to attach to PyMessage objects
- """
- def __init__(self, cmd='', payload=None):
- """
- Can create a message by the command name, or the payload (or neither)
- """
- self.magic = MAGIC_BYTES
- self.cmd = cmd
- self.payload = payload
-
- if payload:
- self.cmd = payload.command
- elif cmd:
- self.payload = PayloadMap[self.cmd]()
-
-
-
- def serialize(self):
- bp = BinaryPacker()
- bp.put(BINARY_CHUNK, self.magic, width= 4)
- bp.put(BINARY_CHUNK, self.cmd.ljust(12, '\x00'), width=12)
- payloadBin = self.payload.serialize()
- bp.put(UINT32, len(payloadBin))
- bp.put(BINARY_CHUNK, hash256(payloadBin)[:4], width= 4)
- bp.put(BINARY_CHUNK, payloadBin)
- return bp.getBinaryString()
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- msgData = toUnpack
- else:
- msgData = BinaryUnpacker( toUnpack )
-
-
- self.magic = msgData.get(BINARY_CHUNK, 4)
- self.cmd = msgData.get(BINARY_CHUNK, 12).strip('\x00')
- length = msgData.get(UINT32)
- chksum = msgData.get(BINARY_CHUNK, 4)
- payload = msgData.get(BINARY_CHUNK, length)
- payload = verifyChecksum(payload, chksum)
-
- self.payload = PayloadMap[self.cmd]().unserialize(payload)
-
- if self.magic != MAGIC_BYTES:
- raise NetworkIDError, 'Message has wrong network bytes!'
- return self
-
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Bitcoin-Network-Message -- ' + self.cmd.upper()
- print indstr + indent + 'Magic: ' + binary_to_hex(self.magic)
- print indstr + indent + 'Command: ' + self.cmd
- print indstr + indent + 'Payload: ' + str(len(self.payload.serialize())) + ' bytes'
- self.payload.pprint(nIndent+1)
-
-
-################################################################################
-class PyNetAddress(object):
-
- def __init__(self, time=-1, svcs='0'*16, netaddrObj=[], port=-1):
- """
- For our client we will ALWAYS use svcs=0 (NODE_NETWORK=0)
-
- time is stored as a unix timestamp
- services is stored as a bitset -- a string of 16 '0's or '1's
- addrObj is stored as a list/tuple of four UINT8s
- port is a regular old port number...
- """
- self.time = time
- self.services = svcs
- self.addrQuad = parseNetAddress(netaddrObj)
- self.port = port
-
- def unserialize(self, toUnpack, hasTimeField=True):
- if isinstance(toUnpack, BinaryUnpacker):
- addrData = toUnpack
- else:
- addrData = BinaryUnpacker( toUnpack )
-
- if hasTimeField:
- self.time = addrData.get(UINT32)
-
- self.services = addrData.get(UINT64)
- self.addrQuad = addrData.get(BINARY_CHUNK,16)[-4:]
- self.port = addrData.get(UINT16, endianness=NETWORKENDIAN)
-
- self.services = int_to_bitset(self.services)
- self.addrQuad = binary_to_quad(self.addrQuad)
- return self
-
- def serialize(self, withTimeField=True):
- bp = BinaryPacker()
- if withTimeField:
- bp.put(UINT32, self.time)
- bp.put(UINT64, bitset_to_int(self.services))
- bp.put(BINARY_CHUNK, quad_to_binary(self.addrQuad).rjust(16,'\x00'))
- bp.put(UINT16, self.port, endianness=NETWORKENDIAN)
- return bp.getBinaryString()
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Network-Address:',
- print indstr + indent + 'Time: ' + unixTimeToFormatStr(self.time)
- print indstr + indent + 'Svcs: ' + self.services
- print indstr + indent + 'IPv4: ' + quad_to_str(self.addrQuad)
- print indstr + indent + 'Port: ' + self.port
-
- def pprintShort(self):
- print quad_to_str(self.addrQuad) + ':' + str(self.port)
-
-################################################################################
-################################################################################
-class PayloadAddr(object):
-
- command = 'addr'
-
- def __init__(self, addrList=[]):
- self.addrList = addrList # PyNetAddress objs
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- addrData = toUnpack
- else:
- addrData = BinaryUnpacker( toUnpack )
-
- self.addrList = []
- naddr = addrData.get(VAR_INT)
- for i in range(naddr):
- self.addrList.append( PyNetAddress().unserialize(addrData) )
- return self
-
- def serialize(self):
- bp = BinaryPacker()
- bp.put(VAR_INT, len(self.addrList))
- for netaddr in self.addrList:
- bp.put(BINARY_CHUNK, netaddr.serialize(), width=30)
- return bp.getBinaryString()
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(addr):',
- for a in self.addrList:
- a.pprintShort()
-
- def pprintShort(self):
- for a in self.addrList:
- print '[' + quad_to_str(a.pprintShort()) + '], '
-
-################################################################################
-################################################################################
-class PayloadPing(object):
- """
- All payload objects have a serialize and unserialize method, making them
- easy to attach to PyMessage objects
- """
- command = 'ping'
-
- def __init__(self):
- pass
-
- def unserialize(self, toUnpack):
- return self
-
- def serialize(self):
- return ''
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(ping)'
-
-
-
-################################################################################
-################################################################################
-class PayloadVersion(object):
-
- command = 'version'
-
- def __init__(self, version=0, svcs='0'*16, tstamp=-1, addrRcv=PyNetAddress(), \
- addrFrm=PyNetAddress(), nonce=-1, sub=-1, height=-1):
- self.version = version
- self.services = svcs
- self.time = tstamp
- self.addrRecv = addrRcv
- self.addrFrom = addrFrm
- self.nonce = nonce
- self.subver = sub
- self.height0 = height
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- verData = toUnpack
- else:
- verData = BinaryUnpacker( toUnpack )
-
- self.version = verData.get(INT32)
- self.services = int_to_bitset(verData.get(UINT64), widthBytes=8)
- self.time = verData.get(INT64)
- self.addrRecv = PyNetAddress().unserialize(verData, hasTimeField=False)
- self.addrFrom = PyNetAddress().unserialize(verData, hasTimeField=False)
- self.nonce = verData.get(UINT64)
- self.subver = verData.get(VAR_STR)
- self.height0 = verData.get(INT32)
- return self
-
- def serialize(self):
- bp = BinaryPacker()
- bp.put(INT32, self.version )
- bp.put(UINT64, bitset_to_int(self.services))
- bp.put(INT64, self.time ) # todo, should this really be int64?
- bp.put(BINARY_CHUNK, self.addrRecv.serialize(withTimeField=False))
- bp.put(BINARY_CHUNK, self.addrFrom.serialize(withTimeField=False))
- bp.put(UINT64, self.nonce )
- bp.put(VAR_STR, self.subver )
- bp.put(INT32, self.height0 )
- return bp.getBinaryString()
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(version):'
- print indstr + indent + 'Version: ' + str(self.version)
- print indstr + indent + 'Services: ' + self.services
- print indstr + indent + 'Time: ' + unixTimeToFormatStr(self.time)
- print indstr + indent + 'AddrTo: ',; self.addrRecv.pprintShort()
- print indstr + indent + 'AddrFrom:',; self.addrFrom.pprintShort()
- print indstr + indent + 'Nonce: ' + str(self.nonce)
- print indstr + indent + 'SubVer: ', self.subver
- print indstr + indent + 'StartHgt: ' + str(self.height0)
-
-################################################################################
-class PayloadVerack(object):
- """
- All payload objects have a serialize and unserialize method, making them
- easy to attach to PyMessage objects
- """
-
- command = 'verack'
-
- def __init__(self):
- pass
-
- def unserialize(self, toUnpack):
- return self
-
- def serialize(self):
- return ''
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(verack)'
-
-
-################################################################################
-################################################################################
-class PayloadInv(object):
- """
- All payload objects have a serialize and unserialize method, making them
- easy to attach to PyMessage objects
- """
-
- command = 'inv'
-
- def __init__(self):
- self.invList = [] # list of (type, hash) pairs
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- invData = toUnpack
- else:
- invData = BinaryUnpacker( toUnpack )
-
- numInv = invData.get(VAR_INT)
- for i in range(numInv):
- invType = invData.get(UINT32)
- invHash = invData.get(BINARY_CHUNK, 32)
- self.invList.append( [invType, invHash] )
- return self
-
- def serialize(self):
- bp = BinaryPacker()
- bp.put(VAR_INT, len(self.invList))
- for inv in self.invList:
- bp.put(UINT32, inv[0])
- bp.put(BINARY_CHUNK, inv[1], width=32)
- return bp.getBinaryString()
-
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(inv):'
- for inv in self.invList:
- print indstr + indent + ('BLOCK: ' if inv[0]==2 else 'TX : ') + \
- binary_to_hex(inv[1])
-
-
-
-################################################################################
-################################################################################
-class PayloadGetData(object):
- """
- All payload objects have a serialize and unserialize method, making them
- easy to attach to PyMessage objects
- """
-
- command = 'getdata'
-
- def __init__(self, invList=[]):
- if invList:
- self.invList = invList
- else:
- self.invList = []
-
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- invData = toUnpack
- else:
- invData = BinaryUnpacker( toUnpack )
-
- numInv = invData.get(VAR_INT)
- for i in range(numInv):
- invType = invData.get(UINT32)
- invHash = invData.get(BINARY_CHUNK, 32)
- self.invList.append( [invType, invHash] )
- return self
-
- def serialize(self):
- bp = BinaryPacker()
- bp.put(VAR_INT, len(self.invList))
- for inv in self.invList:
- bp.put(UINT32, inv[0])
- bp.put(BINARY_CHUNK, inv[1], width=32)
- return bp.getBinaryString()
-
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(getdata):'
- for inv in self.invList:
- print indstr + indent + ('BLOCK: ' if inv[0]==2 else 'TX : ') + \
- binary_to_hex(inv[1])
-
-
-################################################################################
-################################################################################
-class PayloadGetHeaders(object):
- command = 'getheaders'
-
- def __init__(self, hashStartList=[], hashStop=''):
- self.version = 1
- self.hashList = hashStartList
- self.hashStop = hashStop
-
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- ghData = toUnpack
- else:
- ghData = BinaryUnpacker( toUnpack )
-
- self.version = gbData.get(UINT32)
- nhash = ghData.get(VAR_INT)
- for i in range(nhash):
- self.hashList.append(ghData.get(BINARY_CHUNK, 32))
- self.hashStop = ghData.get(BINARY_CHUNK, 32)
- return self
-
- def serialize(self):
- nhash = len(self.hashList)
- bp = BinaryPacker()
- bp.put(UINT32, self.version)
- bp.put(VAR_INT, nhash)
- for i in range(nhash):
- bp.put(BINARY_CHUNK, self.hashList[i], width=32)
- bp.put(BINARY_CHUNK, self.hashStop, width=32)
- return bp.getBinaryString()
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(getheaders):'
- print indstr + indent + 'HashList(s) :' + binary_to_hex(self.hashList[0])
- for i in range(1,len(self.hashList)):
- print indstr + indent + ' :' + binary_to_hex(self.hashList[i])
- print indstr + indent + 'HashStop :' + binary_to_hex(self.hashStop)
-
-
-
-################################################################################
-################################################################################
-class PayloadGetBlocks(object):
- command = 'getblocks'
-
- def __init__(self, version=1, startCt=-1, hashStartList=[], hashStop=''):
- self.version = 1
- self.hashList = hashStartList
- self.hashStop = hashStop
-
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- gbData = toUnpack
- else:
- gbData = BinaryUnpacker( toUnpack )
-
- self.version = gbData.get(UINT32)
- nhash = gbData.get(VAR_INT)
- for i in range(nhash):
- self.hashList.append(gbData.get(BINARY_CHUNK, 32))
- self.hashStop = gbData.get(BINARY_CHUNK, 32)
- return self
-
- def serialize(self):
- nhash = len(self.hashList)
- bp = BinaryPacker()
- bp.put(UINT32, self.version)
- bp.put(VAR_INT, nhash)
- for i in range(nhash):
- bp.put(BINARY_CHUNK, self.hashList[i], width=32)
- bp.put(BINARY_CHUNK, self.hashList, width=32)
- return bp.getBinaryString()
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(getheaders):'
- print indstr + indent + 'Version :' + str(self.version)
- print indstr + indent + 'HashList(s) :' + binary_to_hex(self.hashList[0])
- for i in range(1,len(self.hashList)):
- print indstr + indent + ' :' + binary_to_hex(self.hashList[i])
- print indstr + indent + 'HashStop :' + binary_to_hex(self.hashStop)
-
-
-################################################################################
-################################################################################
-class PayloadTx(object):
- command = 'tx'
-
- def __init__(self, tx=PyTx()):
- self.tx = tx
-
- def unserialize(self, toUnpack):
- self.tx.unserialize(toUnpack)
- return self
-
- def serialize(self):
- return self.tx.serialize()
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(tx):'
- self.tx.pprint(nIndent+1)
-
-
-################################################################################
-################################################################################
-class PayloadHeaders(object):
- command = 'headers'
-
- def __init__(self, header=PyBlockHeader(), headerlist=[]):
- self.header = header
- self.headerList = headerlist
-
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- headerData = toUnpack
- else:
- headerData = BinaryUnpacker( toUnpack )
-
- self.headerList = []
- self.header.unserialize(headerData)
- numHeader = headerData.get(VAR_INT)
- for i in range(numHeader):
- self.headerList.append(PyBlockHeader().unserialize(headerData))
- headerData.get(VAR_INT) # Not sure if this is even used, ever
- return self
-
- def serialize(self):
- bp = BinaryPacker()
- bp.put(BINARY_CHUNK, self.header.serialize())
- bp.put(VAR_INT, len(self.headerList))
- for header in self.headerList:
- bp.put(BINARY_CHUNK, header.serialize())
- bp.put(VAR_INT, 0)
- return bp.getBinaryString()
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(headers):'
- self.header.pprint(nIndent+1)
- for header in self.headerList:
- print indstr + indent + 'Header:', header.getHash()
-
-
-################################################################################
-################################################################################
-class PayloadBlock(object):
- command = 'block'
-
- def __init__(self, header=PyBlockHeader(), txlist=[]):
- self.header = header
- self.txList = txlist
-
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- blkData = toUnpack
- else:
- blkData = BinaryUnpacker( toUnpack )
-
- self.txList = []
- self.header.unserialize(blkData)
- numTx = blkData.get(VAR_INT)
- for i in range(numTx):
- self.txList.append(PyTx().unserialize(blkData))
- return self
-
- def serialize(self):
- bp = BinaryPacker()
- bp.put(BINARY_CHUNK, self.header.serialize())
- bp.put(VAR_INT, len(self.txList))
- for tx in self.txList:
- bp.put(BINARY_CHUNK, tx.serialize())
- return bp.getBinaryString()
-
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print ''
- print indstr + 'Message(block):'
- self.header.pprint(nIndent+1)
- for tx in self.txList:
- print indstr + indent + 'Tx:', tx.getHashHex()
-
-
-################################################################################
-class PayloadAlert(object):
- command = 'alert'
-
- def __init__(self):
- self.version = 1
- self.relayUntil = 0
- self.expiration = 0
- self.uniqueID = 0
- self.cancelVal = 0
- self.cancelSet = []
- self.minVersion = 0
- self.maxVersion = 0
- self.subVerSet = []
- self.comment = ''
- self.statusBar = ''
- self.reserved = ''
- self.signature = ''
-
-
- def unserialize(self, toUnpack):
- if isinstance(toUnpack, BinaryUnpacker):
- blkData = toUnpack
- else:
- blkData = BinaryUnpacker( toUnpack )
-
- return self
-
- def serialize(self):
- bp = BinaryPacker()
- return bp.getBinaryString()
-
-
- def pprint(self, nIndent=0):
- print nIndent*'\t' + 'ALERT(...)'
-
-################################################################################
-# Use this map to figure out which object to serialize/unserialize from a cmd
-PayloadMap = {
- 'ping': PayloadPing,
- 'tx': PayloadTx,
- 'inv': PayloadInv,
- 'version': PayloadVersion,
- 'verack': PayloadVerack,
- 'addr': PayloadAddr,
- 'getdata': PayloadGetData,
- 'getheaders': PayloadGetHeaders,
- 'getblocks': PayloadGetBlocks,
- 'block': PayloadBlock,
- 'headers': PayloadHeaders,
- 'alert': PayloadAlert }
-
-
-
-
-
-try:
- from twisted.internet.protocol import Protocol, ReconnectingClientFactory
- from twisted.internet.defer import Deferred
-except ImportError:
- LOGERROR('***Python-Twisted is not installed -- cannot enable')
- LOGERROR(' networking-related methods for ArmoryEngine' )
-
-
-################################################################################
-def forceDeferred(callbk):
- if callbk:
- if isinstance(callbk, Deferred):
- return callbk
- else:
- d = Deferred()
- d.addCallback(callbk)
-
-
-################################################################################
-# It seems we need to do this frequently when downloading headers & blocks
-# This only returns a list of numbers, but one list-comprehension to get hashes
-def createBlockLocatorNumList(topblk):
- blockNumList = []
- n,step,niter = topblk,1,0
- while n>0:
- blockNumList.append(n)
- if niter >= 10:
- step *= 2
- n -= step
- niter += 1
- blockNumList.append(0)
- return blockNumList
-
-################################################################################
-#
-# Armory Networking:
-#
-# This is where I will define all the network operations needed for
-# Armory to operate, using python-twisted. There are "better"
-# ways to do this with "reusable" code structures (i.e. using huge
-# deferred callback chains), but this is not the central "creative"
-# part of the Bitcoin protocol. I need just enough to broadcast tx
-# and receive new tx that aren't in the blockchain yet. Beyond that,
-# I'll just be ignoring everything else.
-#
-################################################################################
-class ArmoryClient(Protocol):
- """
- This is where all the Bitcoin-specific networking stuff goes.
- In the Twisted way, you need to inject your own chains of
- callbacks through the factory in order to get this class to do
- the right thing on the various events.
- """
-
- ############################################################
- def __init__(self):
- self.recvData = ''
- self.handshakeFinished = False
- self.sentHeadersReq = True
- self.peer = []
-
- ############################################################
- def connectionMade(self):
- """
- Construct the initial version message and send it right away.
- Everything else will be handled by dataReceived.
- """
- LOGINFO('Connection initiated. Start handshake')
- addrTo = str_to_quad(self.transport.getPeer().host)
- portTo = self.transport.getPeer().port
- addrFrom = str_to_quad(self.transport.getHost().host)
- portFrom = self.transport.getHost().port
-
- self.peer = [addrTo, portTo]
-
- services = '0'*16
- msgVersion = PayloadVersion()
- msgVersion.version = 40000 # TODO: this is what my Satoshi client says
- msgVersion.services = services
- msgVersion.time = long(RightNow())
- msgVersion.addrRecv = PyNetAddress(0, services, addrTo, portTo )
- msgVersion.addrFrom = PyNetAddress(0, services, addrFrom, portFrom)
- msgVersion.nonce = random.randint(2**60, 2**64-1)
- msgVersion.subver = 'Armory:%s' % getVersionString(BTCARMORY_VERSION)
- msgVersion.height0 = -1
- self.sendMessage( msgVersion )
- self.factory.func_madeConnect()
-
-
- ############################################################
- def dataReceived(self, data):
- """
- Called by the reactor when data is received over the connection.
- This method will do nothing if we don't receive a full message.
- """
-
-
- #print '\n\nData Received:',
- #pprintHex(binary_to_hex(data), withAddr=False)
-
- # Put the current buffer into an unpacker, process until empty
- self.recvData += data
- buf = BinaryUnpacker(self.recvData)
-
- messages = []
- while True:
- try:
- # recvData is only modified if the unserialize succeeds
- # Had a serious issue with references, so I had to convert
- # messages to strings to guarantee that copies were being
- # made! (yes, hacky...)
- thisMsg = PyMessage().unserialize(buf)
- messages.append( thisMsg.serialize() )
- self.recvData = buf.getRemainingString()
- except NetworkIDError:
- LOGERROR('Message for a different network!' )
- if BLOCKCHAINS.has_key(self.recvData[:4]):
- LOGERROR( '(for network: %s)', BLOCKCHAINS[self.recvData[:4]])
- # Before raising the error, we should've finished reading the msg
- # So pop it off the front of the buffer
- self.recvData = buf.getRemainingString()
- return
- except UnpackerError:
- # Expect this error when buffer isn't full enough for a whole msg
- break
-
- # We might've gotten here without anything to process -- if so, bail
- if len(messages)==0:
- return
-
-
- # Finally, we have some message to process, let's do it
- for msgStr in messages:
- msg = PyMessage().unserialize(msgStr)
- cmd = msg.cmd
-
- # Log the message if netlog option
- if CLI_OPTIONS.netlog:
- LOGDEBUG( 'DataReceived: %s', msg.payload.command)
- if msg.payload.command == 'tx':
- LOGDEBUG('\t' + binary_to_hex(msg.payload.tx.thisHash))
- elif msg.payload.command == 'block':
- LOGDEBUG('\t' + msg.payload.header.getHashHex())
- elif msg.payload.command == 'inv':
- for inv in msg.payload.invList:
- LOGDEBUG(('\tBLOCK: ' if inv[0]==2 else '\tTX : ') + \
- binary_to_hex(inv[1]))
-
-
- # We process version and verackk regardless of handshakeFinished
- if cmd=='version' and not self.handshakeFinished:
- self.peerInfo = {}
- self.peerInfo['version'] = msg.payload.version
- self.peerInfo['subver'] = msg.payload.subver
- self.peerInfo['time'] = msg.payload.time
- self.peerInfo['height'] = msg.payload.height0
- LOGINFO('Received version message from peer:')
- LOGINFO(' Version: %s', str(self.peerInfo['version']))
- LOGINFO(' SubVersion: %s', str(self.peerInfo['subver']))
- LOGINFO(' TimeStamp: %s', str(self.peerInfo['time']))
- LOGINFO(' StartHeight: %s', str(self.peerInfo['height']))
- self.sendMessage( PayloadVerack() )
- elif cmd=='verack':
- self.handshakeFinished = True
- self.factory.handshakeFinished(self)
- #self.startHeaderDL()
-
- ####################################################################
- # Don't process any other messages unless the handshake is finished
- if self.handshakeFinished:
- self.processMessage(msg)
-
-
- ############################################################
- #def connectionLost(self, reason):
- #"""
- #Try to reopen connection (not impl yet)
- #"""
- #self.factory.connectionFailed(self, reason)
-
-
- ############################################################
- def processMessage(self, msg):
- # TODO: when I start expanding this class to be more versatile,
- # I'll consider chaining/setting callbacks from the calling
- # application. For now, it's pretty static.
- #msg.payload.pprint(nIndent=2)
- if msg.cmd=='inv':
- invobj = msg.payload
- getdataMsg = PyMessage('getdata')
- for inv in invobj.invList:
- if inv[0]==MSG_INV_BLOCK:
- if TheBDM.getBDMState()=='Scanning' or \
- TheBDM.hasHeaderWithHash(inv[1]):
- continue
- getdataMsg.payload.invList.append(inv)
- if inv[0]==MSG_INV_TX:
- if TheBDM.getBDMState()=='Scanning' or \
- TheBDM.hasTxWithHash(inv[1]):
- continue
- getdataMsg.payload.invList.append(inv)
-
- # Now send the full request
- if not TheBDM.getBDMState()=='Scanning':
- self.sendMessage(getdataMsg)
-
- if msg.cmd=='tx':
- pytx = msg.payload.tx
- self.factory.func_newTx(pytx)
- if msg.cmd=='block':
- pyHeader = msg.payload.header
- pyTxList = msg.payload.txList
- LOGINFO('Received new block. %s', binary_to_hex(pyHeader.getHash(), BIGENDIAN))
- self.factory.func_newBlock(pyHeader, pyTxList)
-
-
-
- ############################################################
- def startHeaderDL(self):
- numList = self.createBlockLocatorNumList(self.topBlk)
- msg = PyMessage('getheaders')
- msg.payload.version = 1
- msg.payload.hashList = [getHeaderByHeight(i).getHash() for i in numList]
- msg.payload.hashStop = '\x00'*32
-
- self.sentHeadersReq = True
-
-
-
- ############################################################
- def startBlockDL(self):
- numList = self.createBlockLocatorNumList(self.topBlk)
- msg = PyMessage('getblocks')
- msg.payload.version = 1
- msg.payload.hashList = [getHeaderByHeight(i).getHash() for i in numList]
- msg.payload.hashStop = '\x00'*32
-
-
- ############################################################
- def sendMessage(self, msg):
- """
- Must pass in a PyMessage, or one of the Payload types, which
- will be converted to a PyMessage -- and then sent to the peer.
- If you have a fully-serialized message (with header) already,
- easy enough to user PyMessage().unserialize(binMsg)
- """
-
- if isinstance(msg, PyMessage):
- #print '\n\nSending Message:', msg.payload.command.upper()
- #pprintHex(binary_to_hex(msg.serialize()), indent=' ')
- if CLI_OPTIONS.netlog:
- LOGDEBUG( 'SendMessage: %s', msg.payload.command)
- LOGRAWDATA( msg.serialize() )
- self.transport.write(msg.serialize())
- else:
- msg = PyMessage(payload=msg)
- #print '\n\nSending Message:', msg.payload.command.upper()
- #pprintHex(binary_to_hex(msg.serialize()), indent=' ')
- if CLI_OPTIONS.netlog:
- LOGDEBUG( 'SendMessage: %s', msg.payload.command)
- LOGRAWDATA( msg.serialize() )
- self.transport.write(msg.serialize())
-
-
- ############################################################
- def sendTx(self, txObj):
- """
- This is a convenience method for the special case of sending
- a locally-constructed transaction. Pass in either a PyTx
- object, or a binary serialized tx. It will be converted to
- a PyMessage and forwarded to our peer(s)
- """
- LOGINFO('sendTx called...')
- if isinstance(txObj, PyMessage):
- self.sendMessage( txObj )
- elif isinstance(txObj, PyTx):
- self.sendMessage( PayloadTx(txObj))
- elif isinstance(txObj, str):
- self.sendMessage( PayloadTx(PyTx().unserialize(txObj)) )
-
-
-
-
-
-
-
-
-################################################################################
-################################################################################
-class ArmoryClientFactory(ReconnectingClientFactory):
- """
- Spawns Protocol objects used for communicating over the socket. All such
- objects (ArmoryClients) can share information through this factory.
- However, at the moment, this class is designed to only create a single
- connection -- to localhost.
- """
- protocol = ArmoryClient
- lastAlert = 0
-
- #############################################################################
- def __init__(self, \
- def_handshake=None, \
- func_loseConnect=(lambda: None), \
- func_madeConnect=(lambda: None), \
- func_newTx=(lambda x: None), \
- func_newBlock=(lambda x,y: None)):
- """
- Initialize the ReconnectingClientFactory with a deferred for when the handshake
- finishes: there should be only one handshake, and thus one firing
- of the handshake-finished callback
- """
- self.lastAlert = 0
- self.deferred_handshake = forceDeferred(def_handshake)
- self.fileMemPool = os.path.join(ARMORY_HOME_DIR, 'mempool.bin')
-
- # All other methods will be regular callbacks: we plan to have a very
- # static set of behaviors for each message type
- # (NOTE: The logic for what I need right now is so simple, that
- # I finished implementing it in a few lines of code. When I
- # need to expand the versatility of this class, I'll start
- # doing more OOP/deferreds/etc
- self.func_loseConnect = func_loseConnect
- self.func_madeConnect = func_madeConnect
- self.func_newTx = func_newTx
- self.func_newBlock = func_newBlock
- self.proto = None
-
-
-
- #############################################################################
- def addTxToMemoryPool(self, pytx):
- if not TheBDM.getBDMState()=='Offline':
- TheBDM.addNewZeroConfTx(pytx.serialize(), long(RightNow()), True)
-
-
-
- #############################################################################
- def handshakeFinished(self, protoObj):
- LOGINFO('Handshake finished, connection open!')
- self.proto = protoObj
- if self.deferred_handshake:
- d, self.deferred_handshake = self.deferred_handshake, None
- d.callback(protoObj)
-
-
-
- #############################################################################
- def clientConnectionLost(self, connector, reason):
- LOGERROR('***Connection to Satoshi client LOST! Attempting to reconnect...')
- self.func_loseConnect()
- ReconnectingClientFactory.clientConnectionLost(self,connector,reason)
-
-
-
- #############################################################################
- def connectionFailed(self, protoObj, reason):
- """
- This method needs some serious work... I don't quite know yet how
- to reopen the connection... and I'll need to copy the Deferred so
- that it is ready for the next connection failure
- """
- LOGERROR('***Initial connection to Satoshi client failed! Retrying...')
- ReconnectingClientFactory.connectionFailed(self, protoObj, reason)
-
-
-
-
- #############################################################################
- def sendTx(self, pytxObj):
- if self.proto:
- self.proto.sendTx(pytxObj)
- else:
- raise ConnectionError, 'Connection to localhost DNE.'
-
-
- #############################################################################
- def sendMessage(self, msgObj):
- if self.proto:
- self.proto.sendMessage(msgObj)
- else:
- raise ConnectionError, 'Connection to localhost DNE.'
-
-
-
-
-class FakeClientFactory(ReconnectingClientFactory):
- """
- A fake class that has the same methods as an ArmoryClientFactory,
- but doesn't do anything. If there is no internet, then we want
- to be able to use the same calls
- """
- #############################################################################
- def __init__(self, \
- def_handshake=None, \
- func_loseConnect=(lambda: None), \
- func_madeConnect=(lambda: None), \
- func_newTx=(lambda x: None), \
- func_newBlock=(lambda x,y: None)): pass
- def addTxToMemoryPool(self, pytx): pass
- def handshakeFinished(self, protoObj): pass
- def clientConnectionLost(self, connector, reason): pass
- def connectionFailed(self, protoObj, reason): pass
- def sendTx(self, pytxObj): pass
-
-
-
-
-
-#############################################################################
-import socket
-def satoshiIsAvailable(host='127.0.0.1', port=BITCOIN_PORT, timeout=0.01):
-
- if not isinstance(port, (list,tuple)):
- port = [port]
-
- for p in port:
- s = socket.socket()
- s.settimeout(timeout) # Most of the time checking localhost -- FAST
- try:
- s.connect((host, p))
- s.close()
- return p
- except:
- pass
-
- return 0
-
-
-################################################################################
-def extractSignedDataFromVersionsDotTxt(wholeFile, doVerify=True):
- """
- This method returns a pair: a dictionary to lookup link by OS, and
- a formatted string that is sorted by OS, and re-formatted list that
- will hash the same regardless of original format or ordering
- """
-
- msgBegin = wholeFile.find('# -----BEGIN-SIGNED-DATA-')
- msgBegin = wholeFile.find('\n', msgBegin+1) + 1
- msgEnd = wholeFile.find('# -----SIGNATURE---------')
- sigBegin = wholeFile.find('\n', msgEnd+1) + 3
- sigEnd = wholeFile.find('# -----END-SIGNED-DATA---')
-
- MSGRAW = wholeFile[msgBegin:msgEnd]
- SIGHEX = wholeFile[sigBegin:sigEnd].strip()
-
- if -1 in [msgBegin,msgEnd,sigBegin,sigEnd]:
- LOGERROR('No signed data block found')
- return ''
-
-
- if doVerify:
- Pub = SecureBinaryData(hex_to_binary(ARMORY_INFO_SIGN_PUBLICKEY))
- Msg = SecureBinaryData(MSGRAW)
- Sig = SecureBinaryData(hex_to_binary(SIGHEX))
- isVerified = CryptoECDSA().VerifyData(Msg, Sig, Pub)
-
- if not isVerified:
- LOGERROR('Signed data block failed verification!')
- return ''
- else:
- LOGINFO('Signature on signed data block is GOOD!')
-
- return MSGRAW
-
-
-################################################################################
-def parseLinkList(theData):
- """
- Plug the verified data into here...
- """
- DLDICT,VERDICT = {},{}
- sectStr = None
- for line in theData.split('\n'):
- pcs = line[1:].split()
- if line.startswith('# SECTION-') and 'INSTALLERS' in line:
- sectStr = pcs[0].split('-')[-1]
- if not sectStr in DLDICT:
- DLDICT[sectStr] = {}
- VERDICT[sectStr] = ''
- if len(pcs)>1:
- VERDICT[sectStr] = pcs[-1]
- continue
-
- if len(pcs)==3 and pcs[1].startswith('http'):
- DLDICT[sectStr][pcs[0]] = pcs[1:]
-
- return DLDICT,VERDICT
-
-
-
-
-
-################################################################################
-# jgarzik'sjj jsonrpc-bitcoin code -- stupid-easy to talk to bitcoind
-from jsonrpc import ServiceProxy, authproxy
-class SatoshiDaemonManager(object):
- """
- Use an existing implementation of bitcoind
- """
-
- class BitcoindError(Exception): pass
- class BitcoindNotAvailableError(Exception): pass
- class BitcoinDotConfError(Exception): pass
- class SatoshiHomeDirDNE(Exception): pass
- class ConfigFileUserDNE(Exception): pass
- class ConfigFilePwdDNE(Exception): pass
-
-
- #############################################################################
- def __init__(self):
- self.executable = None
- self.satoshiHome = None
- self.bitconf = {}
- self.proxy = None
- self.bitcoind = None
- self.isMidQuery = False
- self.last20queries = []
- self.disabled = False
- self.failedFindExe = False
- self.failedFindHome = False
- self.foundExe = []
- self.circBufferState = []
- self.circBufferTime = []
- self.btcOut = None
- self.btcErr = None
- self.lastTopBlockInfo = { \
- 'numblks': -1,
- 'tophash': '',
- 'toptime': -1,
- 'error': 'Uninitialized',
- 'blkspersec': -1 }
-
-
-
- #############################################################################
- def setupSDM(self, pathToBitcoindExe=None, satoshiHome=BTC_HOME_DIR, \
- extraExeSearch=[], createHomeIfDNE=True):
- LOGDEBUG('Exec setupSDM')
- self.failedFindExe = False
- self.failedFindHome = False
- # If we are supplied a path, then ignore the extra exe search paths
- if pathToBitcoindExe==None:
- pathToBitcoindExe = self.findBitcoind(extraExeSearch)
- if len(pathToBitcoindExe)==0:
- LOGDEBUG('Failed to find bitcoind')
- self.failedFindExe = True
- else:
- LOGINFO('Found bitcoind in the following places:')
- for p in pathToBitcoindExe:
- LOGINFO(' %s', p)
- pathToBitcoindExe = pathToBitcoindExe[0]
- LOGINFO('Using: %s', pathToBitcoindExe)
-
- if not os.path.exists(pathToBitcoindExe):
- LOGINFO('Somehow failed to find exe even after finding it...?')
- self.failedFindExe = True
-
- self.executable = pathToBitcoindExe
-
- if not os.path.exists(satoshiHome):
- if createHomeIfDNE:
- LOGINFO('Making satoshi home dir')
- os.makedirs(satoshiHome)
- else:
- LOGINFO('No home dir, makedir not requested')
- self.failedFindHome = True
-
- if self.failedFindExe: raise self.BitcoindError, 'bitcoind not found'
- if self.failedFindHome: raise self.BitcoindError, 'homedir not found'
-
- self.satoshiHome = satoshiHome
- self.disabled = False
- self.proxy = None
- self.bitcoind = None # this will be a Popen object
- self.isMidQuery = False
- self.last20queries = []
-
- self.readBitcoinConf(makeIfDNE=True)
-
-
-
-
-
- #############################################################################
- def setDisabled(self, newBool=True):
- s = self.getSDMState()
-
- if newBool==True:
- if s in ('BitcoindInitializing', 'BitcoindSynchronizing', 'BitcoindReady'):
- self.stopBitcoind()
-
- self.disabled = newBool
-
-
- #############################################################################
- def getAllFoundExe(self):
- return list(self.foundExe)
-
-
- #############################################################################
- def findBitcoind(self, extraSearchPaths=[]):
- self.foundExe = []
-
- searchPaths = list(extraSearchPaths) # create a copy
-
- if OS_WINDOWS:
- # First check desktop for links
- possBaseDir = []
- home = os.path.expanduser('~')
- desktop = os.path.join(home, 'Desktop')
-
- if os.path.exists(desktop):
- dtopfiles = os.listdir(desktop)
- for path in [os.path.join(desktop, fn) for fn in dtopfiles]:
- if 'bitcoin' in path.lower() and path.lower().endswith('.lnk'):
- import win32com.client
- shell = win32com.client.Dispatch('WScript.Shell')
- targ = shell.CreateShortCut(path).Targetpath
- targDir = os.path.dirname(targ)
- LOGINFO('Found Bitcoin-Qt link on desktop: %s', targDir)
- possBaseDir.append( targDir )
-
- # Also look in default place in ProgramFiles dirs
- possBaseDir.append(os.getenv('PROGRAMFILES'))
- if SystemSpecs.IsX64:
- possBaseDir.append(os.getenv('PROGRAMFILES(X86)'))
-
-
- # Now look at a few subdirs of the
- searchPaths.extend(possBaseDir)
- searchPaths.extend([os.path.join(p, 'Bitcoin', 'daemon') for p in possBaseDir])
- searchPaths.extend([os.path.join(p, 'daemon') for p in possBaseDir])
- searchPaths.extend([os.path.join(p, 'Bitcoin') for p in possBaseDir])
-
- for p in searchPaths:
- testPath = os.path.join(p, 'bitcoind.exe')
- if os.path.exists(testPath):
- self.foundExe.append(testPath)
-
- else:
- # In case this was a downloaded copy, make sure we traverse to bin/64 dir
- if SystemSpecs.IsX64:
- searchPaths.extend([os.path.join(p, 'bin/64') for p in extraSearchPaths])
- else:
- searchPaths.extend([os.path.join(p, 'bin/32') for p in extraSearchPaths])
-
- searchPaths.extend(['/usr/bin/', '/usr/lib/bitcoin/'])
-
- for p in searchPaths:
- testPath = os.path.join(p, 'bitcoind')
- if os.path.exists(testPath):
- self.foundExe.append(testPath)
-
- try:
- locs = subprocess_check_output(['whereis','bitcoind']).split()
- if len(locs)>1:
- locs = filter(lambda x: os.path.basename(x)=='bitcoind', locs)
- LOGINFO('"whereis" returned: %s', str(locs))
- self.foundExe.extend(locs)
- except:
- LOGEXCEPT('Error executing "whereis" command')
-
-
- # For logging purposes, check that the first answer matches one of the
- # extra search paths. There should be some kind of notification that
- # their supplied search path was invalid and we are using something else.
- if len(self.foundExe)>0 and len(extraSearchPaths)>0:
- foundIt = False
- for p in extraSearchPaths:
- if self.foundExe[0].startswith(p):
- foundIt=True
-
- if not foundIt:
- LOGERROR('Bitcoind could not be found in the specified installation:')
- for p in extraSearchPaths:
- LOGERROR(' %s', p)
- LOGERROR('Bitcoind is being started from:')
- LOGERROR(' %s', self.foundExe[0])
-
- return self.foundExe
-
- #############################################################################
- def getGuardianPath(self):
- if OS_WINDOWS:
- armoryInstall = os.path.dirname(inspect.getsourcefile(SatoshiDaemonManager))
- # This should return a zip file because of py2exe
- if armoryInstall.endswith('.zip'):
- armoryInstall = os.path.dirname(armoryInstall)
- gpath = os.path.join(armoryInstall, 'guardian.exe')
- else:
- theDir = os.path.dirname(inspect.getsourcefile(SatoshiDaemonManager))
- gpath = os.path.join(theDir, 'guardian.py')
-
- if not os.path.exists(gpath):
- LOGERROR('Could not find guardian script: %s', gpath)
- raise FileExistsError
- return gpath
-
-
-
-
- #############################################################################
- def readBitcoinConf(self, makeIfDNE=False):
- LOGINFO('Reading bitcoin.conf file')
- bitconf = os.path.join( self.satoshiHome, 'bitcoin.conf' )
- if not os.path.exists(bitconf):
- if not makeIfDNE:
- raise self.BitcoinDotConfError, 'Could not find bitcoin.conf'
- else:
- LOGINFO('No bitcoin.conf available. Creating it...')
- touchFile(bitconf)
-
- # Guarantee that bitcoin.conf file has very strict permissions
- if OS_WINDOWS:
- if OS_VARIANT[0].lower()=='xp':
- LOGERROR('Cannot set permissions correctly in XP!')
- LOGERROR('Please confirm permissions on the following file ')
- LOGERROR('are set to exclusive access only for your user ')
- LOGERROR('(it usually is, but Armory cannot guarantee it ')
- LOGERROR('on XP systems):')
- LOGERROR(' %s', bitconf)
- else:
- LOGINFO('Setting permissions on bitcoin.conf')
- import win32api
- username = win32api.GetUserName()
- cmd_icacls = ['icacls',bitconf,'/inheritance:r','/grant:r', '%s:F' % username]
- icacls_out = subprocess_check_output(cmd_icacls, shell=True)
- LOGINFO('icacls returned: %s', icacls_out)
- else:
- LOGINFO('Setting permissions on bitcoin.conf')
- os.chmod(bitconf, stat.S_IRUSR | stat.S_IWUSR)
-
-
- with open(bitconf,'r') as f:
- # Find the last character of the each line: either a newline or '#'
- endchr = lambda line: line.find('#') if line.find('#')>1 else len(line)
-
- # Reduce each line to a list of key,value pairs separated with '='
- allconf = [l[:endchr(l)].strip().split('=') for l in f.readlines()]
-
- # Need to convert to (x[0],x[1:]) in case the password has '=' in it
- allconfPairs = [[x[0], '='.join(x[1:])] for x in allconf if len(x)>1]
-
- # Convert the list of pairs to a dictionary
- self.bitconf = dict(allconfPairs)
-
-
- # Look for rpcport, use default if not there
- self.bitconf['rpcport'] = int(self.bitconf.get('rpcport', BITCOIN_RPC_PORT))
-
- # We must have a username and password. If not, append to file
- if not self.bitconf.has_key('rpcuser'):
- LOGDEBUG('No rpcuser: creating one')
- with open(bitconf,'a') as f:
- f.write('\n')
- f.write('rpcuser=generated_by_armory\n')
- self.bitconf['rpcuser'] = 'generated_by_armory'
-
- if not self.bitconf.has_key('rpcpassword'):
- LOGDEBUG('No rpcpassword: creating one')
- with open(bitconf,'a') as f:
- randBase58 = SecureBinaryData().GenerateRandom(32).toBinStr()
- randBase58 = binary_to_base58(randBase58)
- f.write('\n')
- f.write('rpcpassword=%s' % randBase58)
- self.bitconf['rpcpassword'] = randBase58
-
-
- if not isASCII(self.bitconf['rpcuser']):
- LOGERROR('Non-ASCII character in bitcoin.conf (rpcuser)!')
- if not isASCII(self.bitconf['rpcpassword']):
- LOGERROR('Non-ASCII character in bitcoin.conf (rpcpassword)!')
-
- self.bitconf['host'] = '127.0.0.1'
-
-
-
- #############################################################################
- def startBitcoind(self):
- self.btcOut, self.btcErr = None,None
- if self.disabled:
- LOGERROR('SDM was disabled, must be re-enabled before starting')
- return
-
- LOGINFO('Called startBitcoind')
- import subprocess
-
- if self.isRunningBitcoind():
- raise self.BitcoindError, 'Looks like we have already started bitcoind'
-
- if not os.path.exists(self.executable):
- raise self.BitcoindError, 'Could not find bitcoind'
-
-
- pargs = [self.executable]
- if USE_TESTNET:
- pargs.append('-datadir=%s' % self.satoshiHome.rstrip('/testnet3/') )
- pargs.append('-testnet')
- else:
- pargs.append('-datadir=%s' % self.satoshiHome)
-
- try:
- # Don't want some strange error in this size-check to abort loading
- blocksdir = os.path.join(self.satoshiHome, 'blocks')
- sz = long(0)
- if os.path.exists(blocksdir):
- for fn in os.listdir(blocksdir):
- fnpath = os.path.join(blocksdir, fn)
- sz += long(os.path.getsize(fnpath))
-
- if sz < 5*GIGABYTE:
- if SystemSpecs.Memory>9.0:
- pargs.append('-dbcache=2000')
- elif SystemSpecs.Memory>5.0:
- pargs.append('-dbcache=1000')
- elif SystemSpecs.Memory>3.0:
- pargs.append('-dbcache=500')
- except:
- LOGEXCEPT('Failed size check of blocks directory')
-
-
- # Startup bitcoind and get its process ID (along with our own)
- self.bitcoind = launchProcess(pargs)
-
- self.btcdpid = self.bitcoind.pid
- self.selfpid = os.getpid()
-
- LOGINFO('PID of bitcoind: %d', self.btcdpid)
- LOGINFO('PID of armory: %d', self.selfpid)
-
- # Startup guardian process -- it will watch Armory's PID
- gpath = self.getGuardianPath()
- pargs = [gpath, str(self.selfpid), str(self.btcdpid)]
- if not OS_WINDOWS:
- pargs.insert(0, 'python')
- launchProcess(pargs)
-
-
-
- #############################################################################
- def stopBitcoind(self):
- LOGINFO('Called stopBitcoind')
- if not self.isRunningBitcoind():
- LOGINFO('...but bitcoind is not running, to be able to stop')
- return
-
- killProcessTree(self.bitcoind.pid)
- killProcess(self.bitcoind.pid)
-
- time.sleep(1)
- self.bitcoind = None
-
-
- #############################################################################
- def isRunningBitcoind(self):
- """
- armoryengine satoshiIsAvailable() only tells us whether there's a
- running bitcoind that is actively responding on its port. But it
- won't be responding immediately after we've started it (still doing
- startup operations). If bitcoind was started and still running,
- then poll() will return None. Any othe poll() return value means
- that the process terminated
- """
- if self.bitcoind==None:
- return False
- else:
- if not self.bitcoind.poll()==None:
- LOGDEBUG('Bitcoind is no more')
- if self.btcOut==None:
- self.btcOut, self.btcErr = self.bitcoind.communicate()
- LOGWARN('bitcoind exited, bitcoind STDOUT:')
- for line in self.btcOut.split('\n'):
- LOGWARN(line)
- LOGWARN('bitcoind exited, bitcoind STDERR:')
- for line in self.btcErr.split('\n'):
- LOGWARN(line)
- return self.bitcoind.poll()==None
-
- #############################################################################
- def wasRunningBitcoind(self):
- return (not self.bitcoind==None)
-
- #############################################################################
- def bitcoindIsResponsive(self):
- return satoshiIsAvailable(self.bitconf['host'], self.bitconf['rpcport'])
-
- #############################################################################
- def getSDMState(self):
- """
- As for why I'm doing this: it turns out that between "initializing"
- and "synchronizing", bitcoind temporarily stops responding entirely,
- which causes "not-available" to be the state. I need to smooth that
- out because it wreaks havoc on the GUI which will switch to showing
- a nasty error.
- """
-
- state = self.getSDMStateLogic()
- self.circBufferState.append(state)
- self.circBufferTime.append(RightNow())
- if len(self.circBufferTime)>2 and \
- (self.circBufferTime[-1] - self.circBufferTime[1]) > 5:
- # Only remove the first element if we have at least 5s history
- self.circBufferState = self.circBufferState[1:]
- self.circBufferTime = self.circBufferTime[1:]
-
- # Here's where we modify the output to smooth out the gap between
- # "initializing" and "synchronizing" (which is a couple seconds
- # of "not available"). "NotAvail" keeps getting added to the
- # buffer, but if it was "initializing" in the last 5 seconds,
- # we will keep "initializing"
- if state=='BitcoindNotAvailable':
- if 'BitcoindInitializing' in self.circBufferState:
- LOGWARN('Overriding not-available message. This should happen 0-5 times')
- return 'BitcoindInitializing'
-
- return state
-
- #############################################################################
- def getSDMStateLogic(self):
-
- if self.disabled:
- return 'BitcoindMgmtDisabled'
-
- if self.failedFindExe:
- return 'BitcoindExeMissing'
-
- if self.failedFindHome:
- return 'BitcoindHomeMissing'
-
- latestInfo = self.getTopBlockInfo()
-
- if self.bitcoind==None and latestInfo['error']=='Uninitialized':
- return 'BitcoindNeverStarted'
-
- if not self.isRunningBitcoind():
- # Not running at all: either never started, or process terminated
- if not self.btcErr==None and len(self.btcErr)>0:
- errstr = self.btcErr.replace(',',' ').replace('.',' ').replace('!',' ')
- errPcs = set([a.lower() for a in errstr.split()])
- runPcs = set(['cannot','obtain','lock','already','running'])
- dbePcs = set(['database', 'recover','backup','except','wallet','dat'])
- if len(errPcs.intersection(runPcs))>=(len(runPcs)-1):
- return 'BitcoindAlreadyRunning'
- elif len(errPcs.intersection(dbePcs))>=(len(dbePcs)-1):
- return 'BitcoindDatabaseEnvError'
- else:
- return 'BitcoindUnknownCrash'
- else:
- return 'BitcoindNotAvailable'
- elif not self.bitcoindIsResponsive():
- # Running but not responsive... must still be initializing
- return 'BitcoindInitializing'
- else:
- # If it's responsive, get the top block and check
- # TODO: These conditionals are based on experimental results. May
- # not be accurate what the specific errors mean...
- if latestInfo['error']=='ValueError':
- return 'BitcoindWrongPassword'
- elif latestInfo['error']=='JsonRpcException':
- return 'BitcoindInitializing'
- elif latestInfo['error']=='SocketError':
- return 'BitcoindNotAvailable'
-
- if 'BitcoindReady' in self.circBufferState:
- # If ready, always ready
- return 'BitcoindReady'
-
- # If we get here, bitcoind is gave us a response.
- secSinceLastBlk = RightNow() - latestInfo['toptime']
- blkspersec = latestInfo['blkspersec']
- #print 'Blocks per 10 sec:', ('UNKNOWN' if blkspersec==-1 else blkspersec*10)
- if secSinceLastBlk > 4*HOUR or blkspersec==-1:
- return 'BitcoindSynchronizing'
- else:
- if blkspersec*20 > 2 and not 'BitcoindReady' in self.circBufferState:
- return 'BitcoindSynchronizing'
- else:
- return 'BitcoindReady'
-
-
-
-
- #############################################################################
- def createProxy(self, forceNew=False):
- if self.proxy==None or forceNew:
- LOGDEBUG('Creating proxy')
- usr,pas,hst,prt = [self.bitconf[k] for k in ['rpcuser','rpcpassword',\
- 'host', 'rpcport']]
- pstr = 'http://%s:%s@%s:%d' % (usr,pas,hst,prt)
- LOGINFO('Creating proxy in SDM: host=%s, port=%s', hst,prt)
- self.proxy = ServiceProxy(pstr)
-
-
- #############################################################################
- def __backgroundRequestTopBlock(self):
- self.createProxy()
- self.isMidQuery = True
- tstart = RightNow()
- try:
- numblks = self.proxy.getinfo()['blocks']
- blkhash = self.proxy.getblockhash(numblks)
- toptime = self.proxy.getblock(blkhash)['time']
- #LOGDEBUG('RPC Call: numBlks=%d, toptime=%d', numblks, toptime)
- # Only overwrite once all outputs are retrieved
- self.lastTopBlockInfo['numblks'] = numblks
- self.lastTopBlockInfo['tophash'] = blkhash
- self.lastTopBlockInfo['toptime'] = toptime
- self.lastTopBlockInfo['error'] = None # Holds error info
-
- if len(self.last20queries)==0 or \
- (RightNow()-self.last20queries[-1][0]) > 0.99:
- # This conditional guarantees last 20 queries spans at least 20s
- self.last20queries.append([RightNow(), numblks])
- self.last20queries = self.last20queries[-20:]
- t0,b0 = self.last20queries[0]
- t1,b1 = self.last20queries[-1]
-
- # Need at least 10s of data to give meaning answer
- if (t1-t0)<10:
- self.lastTopBlockInfo['blkspersec'] = -1
- else:
- self.lastTopBlockInfo['blkspersec'] = float(b1-b0)/float(t1-t0)
-
- except ValueError:
- # I believe this happens when you used the wrong password
- LOGEXCEPT('ValueError in bkgd req top blk')
- self.lastTopBlockInfo['error'] = 'ValueError'
- except authproxy.JSONRPCException:
- # This seems to happen when bitcoind is overwhelmed... not quite ready
- LOGDEBUG('generic jsonrpc exception')
- self.lastTopBlockInfo['error'] = 'JsonRpcException'
- except socket.error:
- # Connection isn't available... is bitcoind not running anymore?
- LOGDEBUG('generic socket error')
- self.lastTopBlockInfo['error'] = 'SocketError'
- except:
- LOGEXCEPT('generic error')
- self.lastTopBlockInfo['error'] = 'UnknownError'
- raise
- finally:
- self.isMidQuery = False
-
-
- #############################################################################
- def updateTopBlockInfo(self):
- """
- We want to get the top block information, but if bitcoind is rigorously
- downloading and verifying the blockchain, it can sometimes take 10s to
- to respond to JSON-RPC calls! We must do it in the background...
-
- If it's already querying, no need to kick off another background request,
- just return the last value, which may be "stale" but we don't really
- care for this particular use-case
- """
- if not self.isRunningBitcoind():
- return
-
- if self.isMidQuery:
- return
-
- self.createProxy()
- self.queryThread = PyBackgroundThread(self.__backgroundRequestTopBlock)
- self.queryThread.start()
-
-
- #############################################################################
- def getTopBlockInfo(self):
- if self.isRunningBitcoind():
- self.updateTopBlockInfo()
- self.queryThread.join(0.001) # In most cases, result should come in 1 ms
- # We return a copy so that the data is not changing as we use it
-
- return self.lastTopBlockInfo.copy()
-
-
- #############################################################################
- def callJSON(self, func, *args):
- state = self.getSDMState()
- if not state in ('BitcoindReady', 'BitcoindSynchronizing'):
- LOGERROR('Called callJSON(%s, %s)', func, str(args))
- LOGERROR('Current SDM state: %s', state)
- raise self.BitcoindError, 'callJSON while %s'%state
-
- return self.proxy.__getattr__(func)(*args)
-
-
- #############################################################################
- def returnSDMInfo(self):
- sdminfo = {}
- for key,val in self.bitconf.iteritems():
- sdminfo['bitconf_%s'%key] = val
-
- for key,val in self.lastTopBlockInfo.iteritems():
- sdminfo['topblk_%s'%key] = val
-
- sdminfo['executable'] = self.executable
- sdminfo['isrunning'] = self.isRunningBitcoind()
- sdminfo['homedir'] = self.satoshiHome
- sdminfo['proxyinit'] = (not self.proxy==None)
- sdminfo['ismidquery'] = self.isMidQuery
- sdminfo['querycount'] = len(self.last20queries)
-
- return sdminfo
-
- #############################################################################
- def printSDMInfo(self):
- print '\nCurrent SDM State:'
- print '\t', 'SDM State Str'.ljust(20), ':', self.getSDMState()
- for key,value in self.returnSDMInfo().iteritems():
- print '\t', str(key).ljust(20), ':', str(value)
-
-
-
-################################################################################
-################################################################################
-class SettingsFile(object):
- """
- This class could be replaced by the built-in QSettings in PyQt, except
- that older versions of PyQt do not support the QSettings (or at least
- I never figured it out). Easy enough to do it here
-
- All settings must populated with a simple datatype -- non-simple
- datatypes should be broken down into pieces that are simple: numbers
- and strings, or lists/tuples of them.
-
- Will write all the settings to file. Each line will look like:
- SingleValueSetting1 | 3824.8
- SingleValueSetting2 | this is a string
- Tuple Or List Obj 1 | 12 $ 43 $ 13 $ 33
- Tuple Or List Obj 2 | str1 $ another str
- """
-
- #############################################################################
- def __init__(self, path=None):
- self.settingsPath = path
- self.settingsMap = {}
- if not path:
- self.settingsPath = os.path.join(ARMORY_HOME_DIR, 'ArmorySettings.txt')
-
- LOGINFO('Using settings file: %s', self.settingsPath)
- if os.path.exists(self.settingsPath):
- self.loadSettingsFile(path)
-
-
-
- #############################################################################
- def pprint(self, nIndent=0):
- indstr = indent*nIndent
- print indstr + 'Settings:'
- for k,v in self.settingsMap.iteritems():
- print indstr + indent + k.ljust(15), v
-
-
- #############################################################################
- def hasSetting(self, name):
- return self.settingsMap.has_key(name)
-
- #############################################################################
- def set(self, name, value):
- if isinstance(value, tuple):
- self.settingsMap[name] = list(value)
- else:
- self.settingsMap[name] = value
- self.writeSettingsFile()
-
- #############################################################################
- def extend(self, name, value):
- """ Adds/converts setting to list, appends value to the end of it """
- if not self.settingsMap.has_key(name):
- if isinstance(value, list):
- self.set(name, value)
- else:
- self.set(name, [value])
- else:
- origVal = self.get(name, expectList=True)
- if isinstance(value, list):
- origVal.extend(value)
- else:
- origVal.append(value)
- self.settingsMap[name] = origVal
- self.writeSettingsFile()
-
- #############################################################################
- def get(self, name, expectList=False):
- if not self.hasSetting(name) or self.settingsMap[name]=='':
- return ([] if expectList else '')
- else:
- val = self.settingsMap[name]
- if expectList:
- if isinstance(val, list):
- return val
- else:
- return [val]
- else:
- return val
-
- #############################################################################
- def getAllSettings(self):
- return self.settingsMap
-
- #############################################################################
- def getSettingOrSetDefault(self, name, defaultVal, expectList=False):
- output = defaultVal
- if self.hasSetting(name):
- output = self.get(name)
- else:
- self.set(name, defaultVal)
-
- return output
-
-
-
- #############################################################################
- def delete(self, name):
- if self.hasSetting(name):
- del self.settingsMap[name]
- self.writeSettingsFile()
-
- #############################################################################
- def writeSettingsFile(self, path=None):
- if not path:
- path = self.settingsPath
- f = open(path, 'w')
- for key,val in self.settingsMap.iteritems():
- try:
- # Skip anything that throws an exception
- valStr = ''
- if isinstance(val, basestring):
- valStr = val
- elif isinstance(val, int) or \
- isinstance(val, float) or \
- isinstance(val, long):
- valStr = str(val)
- elif isinstance(val, list) or \
- isinstance(val, tuple):
- valStr = ' $ '.join([str(v) for v in val])
- f.write(key.ljust(36))
- f.write(' | ')
- f.write(toBytes(valStr))
- f.write('\n')
- except:
- LOGEXCEPT('Invalid entry in SettingsFile... skipping')
- f.close()
-
-
- #############################################################################
- def loadSettingsFile(self, path=None):
- if not path:
- path = self.settingsPath
-
- if not os.path.exists(path):
- raise FileExistsError, 'Settings file DNE:', path
-
- f = open(path, 'rb')
- sdata = f.read()
- f.close()
-
- # Automatically convert settings to numeric if possible
- def castVal(v):
- v = v.strip()
- a,b = v.isdigit(), v.replace('.','').isdigit()
- if a:
- return int(v)
- elif b:
- return float(v)
- else:
- if v.lower()=='true':
- return True
- elif v.lower()=='false':
- return False
- else:
- return toUnicode(v)
-
-
- sdata = [line.strip() for line in sdata.split('\n')]
- for line in sdata:
- if len(line.strip())==0:
- continue
-
- try:
- key,vals = line.split('|')
- valList = [castVal(v) for v in vals.split('$')]
- if len(valList)==1:
- self.settingsMap[key.strip()] = valList[0]
- else:
- self.settingsMap[key.strip()] = valList
- except:
- LOGEXCEPT('Invalid setting in %s (skipping...)', path)
-
-
-
-
-
-################################################################################
-################################################################################
-# Read Satoshi Wallets (wallet.dat) to import into Armory wallet
-# BSDDB wallet-reading code taken from Joric's pywallet: he declared it
-# public domain.
-#try:
-# from bsddb.db import *
-#except ImportError:
-# # Apparently bsddb3 is needed on OSX
-# from bsddb3.db import *
-#
-#import json
-#import struct
-#
-#class BCDataStream(object):
-# def __init__(self):
-# self.input = None
-# self.read_cursor = 0
-#
-# def clear(self):
-# self.input = None
-# self.read_cursor = 0
-#
-# def write(self, bytes): # Initialize with string of bytes
-# if self.input is None:
-# self.input = bytes
-# else:
-# self.input += bytes
-#
-# def map_file(self, file, start): # Initialize with bytes from file
-# self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
-# self.read_cursor = start
-# def seek_file(self, position):
-# self.read_cursor = position
-# def close_file(self):
-# self.input.close()
-#
-# def read_string(self):
-# # Strings are encoded depending on length:
-# # 0 to 252 : 1-byte-length followed by bytes (if any)
-# # 253 to 65,535 : byte'253' 2-byte-length followed by bytes
-# # 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
-# # ... and the Bitcoin client is coded to understand:
-# # greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
-# # ... but I don't think it actually handles any strings that big.
-# if self.input is None:
-# raise SerializationError("call write(bytes) before trying to deserialize")
-#
-# try:
-# length = self.read_compact_size()
-# except IndexError:
-# raise SerializationError("attempt to read past end of buffer")
-#
-# return self.read_bytes(length)
-#
-# def write_string(self, string):
-# # Length-encoded as with read-string
-# self.write_compact_size(len(string))
-# self.write(string)
-#
-# def read_bytes(self, length):
-# try:
-# result = self.input[self.read_cursor:self.read_cursor+length]
-# self.read_cursor += length
-# return result
-# except IndexError:
-# raise SerializationError("attempt to read past end of buffer")
-#
-# return ''
-#
-# def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
-# def read_int16(self): return self._read_num('0:
-# # Satoshi Wallet is encrypted!
-# plainkeys = []
-# if not passphrase:
-# raise EncryptionError, 'Satoshi wallet is encrypted but no passphrase supplied'
-#
-# pKey,IV = GetKeyFromPassphraseSatoshi( passphrase, \
-# mkey['salt'], \
-# mkey['iter'], \
-# mkey['mthd'])
-#
-# masterKey = CryptoAES().DecryptCBC( SecureBinaryData(mkey['mkey']), \
-# SecureBinaryData(pKey), \
-# SecureBinaryData(IV) )
-# masterKey.resize(32)
-#
-# checkedCorrectPassphrase = False
-# for pub,ckey in crypt:
-# iv = hash256(pub)[:16]
-# privKey = CryptoAES().DecryptCBC( SecureBinaryData(ckey), \
-# SecureBinaryData(masterKey), \
-# SecureBinaryData(iv))
-# privKey.resize(32)
-# if not checkedCorrectPassphrase:
-# checkedCorrectPassphrase = True
-# if not CryptoECDSA().CheckPubPrivKeyMatch(privKey, SecureBinaryData(pub)):
-# raise EncryptionError, 'Incorrect Passphrase!'
-# plainkeys.append(privKey)
-#
-# outputList = []
-# for key in plainkeys:
-# addr = hash160_to_addrStr(convertKeyDataToAddress(key.toBinStr()))
-# strName = ''
-# if names.has_key(addr):
-# strName = names[addr]
-# outputList.append( [addr, key, (not addr in pool), strName] )
-# return outputList
-#
-#
-#
-#def checkSatoshiEncrypted(wltPath):
-# try:
-# extractSatoshiKeys(wltPath, '')
-# return False
-# except EncryptionError:
-# return True
-
-
-
-
-
-class PyBackgroundThread(threading.Thread):
- """
- Wraps a function in a threading.Thread object which will run
- that function in a separate thread. Calling self.start() will
- return immediately, but will start running that function in
- separate thread. You can check its progress later by using
- self.isRunning() or self.isFinished(). If the function returns
- a value, use self.getOutput(). Use self.getElapsedSeconds()
- to find out how long it took.
- """
-
- def __init__(self, *args, **kwargs):
- threading.Thread.__init__(self)
-
- self.output = None
- self.startedAt = UNINITIALIZED
- self.finishedAt = UNINITIALIZED
-
- if len(args)==0:
- self.func = lambda: ()
- else:
- if not hasattr(args[0], '__call__'):
- raise TypeError, ('PyBkgdThread constructor first arg '
- '(if any) must be a function')
- else:
- self.setThreadFunction(args[0], *args[1:], **kwargs)
-
- def setThreadFunction(self, thefunc, *args, **kwargs):
- def funcPartial():
- return thefunc(*args, **kwargs)
- self.func = funcPartial
-
- def isFinished(self):
- return not (self.finishedAt==UNINITIALIZED)
-
- def isStarted(self):
- return not (self.startedAt==UNINITIALIZED)
-
- def isRunning(self):
- return (self.isStarted() and not self.isFinished())
-
- def getElapsedSeconds(self):
- if not self.isFinished():
- LOGERROR('Thread is not finished yet!')
- return None
- else:
- return self.finishedAt - self.startedAt
-
- def getOutput(self):
- if not self.isFinished():
- if self.isRunning():
- LOGERROR('Cannot get output while thread is running')
- else:
- LOGERROR('Thread was never .start()ed')
- return None
-
- return self.output
-
-
- def start(self):
- # The prefunc is blocking. Probably preparing something
- # that needs to be in place before we start the thread
- self.startedAt = RightNow()
- super(PyBackgroundThread, self).start()
-
- def run(self):
- # This should not be called manually. Only call start()
- self.output = self.func()
- self.finishedAt = RightNow()
-
- def reset(self):
- self.output = None
- self.startedAt = UNINITIALIZED
- self.finishedAt = UNINITIALIZED
-
- def restart(self):
- self.reset()
- self.start()
-
-
-# Define a decorator that allows the function to be called asynchronously
-def AllowAsync(func):
- def wrappedFunc(*args, **kwargs):
-
- if not 'async' in kwargs or not kwargs['async']==True:
- # Run the function normally
- if 'async' in kwargs:
- del kwargs['async']
- return func(*args, **kwargs)
- else:
- # Run the function as a background thread
- del kwargs['async']
- thr = PyBackgroundThread(func, *args, **kwargs)
- thr.start()
- return thr
-
- return wrappedFunc
-
-
-
-
-################################################################################
-# Let's create a thread-wrapper for the blockchain utilities. Enable the
-# ability for multi-threaded blockchain scanning -- have a main thread and
-# a blockchain thread: blockchain can scan, and main thread will check back
-# every now and then to see if it's done
-
-import Queue
-BLOCKCHAINMODE = enum('Offline', \
- 'Uninitialized', \
- 'Full', \
- 'Rescanning', \
- 'LiteScanning', \
- 'FullPrune', \
- 'Lite')
-
-BDMINPUTTYPE = enum('RegisterAddr', \
- 'ZeroConfTxToInsert', \
- 'HeaderRequested', \
- 'TxRequested', \
- 'BlockRequested', \
- 'AddrBookRequested', \
- 'BlockAtHeightRequested', \
- 'HeaderAtHeightRequested', \
- 'ForceRebuild', \
- 'RescanRequested', \
- 'WalletRecoveryScan', \
- 'UpdateWallets', \
- 'ReadBlkUpdate', \
- 'GoOnlineRequested', \
- 'GoOfflineRequested', \
- 'Passthrough', \
- 'Reset', \
- 'Shutdown')
-
-################################################################################
-class BlockDataManagerThread(threading.Thread):
- """
- A note about this class:
-
- It was mainly created to allow for asynchronous blockchain scanning,
- but the act of splitting the BDM into it's own thread meant that ALL
- communication with the BDM requires thread-safe access. So basically,
- I had to wrap EVERYTHING. And then make it flexible.
-
- For this reason, any calls not explicitly related to rescanning will
- block by default, which could be a long time if the BDM is in the
- middle of rescanning. For this reason, you are expected to either
- pass wait=False if you just want to queue the function call and move
- on in the main thread, or check the BDM state first, to make sure
- it's not currently scanning and can expect immediate response.
-
- This makes using the BDM much more complicated. But comes with the
- benefit of all rescanning being able to happen in the background.
- If you want to run it like single-threaded, you can use
- TheBDM.setBlocking(True) and all calls will block. Always (unless
- you pass wait=False explicitly to one of those calls).
-
- Any calls that retrieve data from the BDM should block, even if you
- technically can specify wait=False. This is because the class was
- not designed to maintain organization of output data asynchronously.
- So a call like TheBDM.getTopBlockHeader() will always block, and you
- should check the BDM state if you want to make sure it returns
- immediately. Since there is only one main thread, There is really no
- way for a rescan to be started between the time you check the state
- and the time you call the method (so if you want to access the BDM
- from multiple threads, this class will need some redesign).
-
-
- This serves as a layer between the GUI and the Blockchain utilities.
- If a request is made to mess with the BDM while it is in the
- middle of scanning, it will queue it for when it's done
-
- All private methods (those starting with two underscores, like __method),
- are executed only by the BDM thread. These should never be called
- externally, and are only safe to run when the BDM is ready to execute
- them.
-
- You can use any non-private methods at any time, and if you set wait=True,
- the main thread will block until that operation is complete. If the BDM
- is in the middle of a scan, the main thread could block for minutes until
- the scanning is complete and then it processes your request.
-
- Therefore, use setBlocking(True) to make sure you always wait/block after
- every call, if you are interested in simplicity and don't mind waiting.
-
- Use setBlocking(False) along with wait=False for the appropriate calls
- to queue up your request and continue the main thread immediately. You
- can finish up something else, and then come back and check whether the
- job is finished (usually using TheBDM.getBDMState()=='BlockchainReady')
-
- Any methods not defined explicitly in this class will "passthrough" the
- __getattr__() method, which will then call that exact method name on
- the BDM. All calls block by default. All such calls can also include
- wait=False if you want to queue it and then continue asynchronously.
-
-
- Implementation notes:
-
- Before the multi-threaded BDM, there was wallets, and there was the BDM.
- We always gave the wallets to the BDM and said "please search the block-
- chain for relevant transactions". Now that this is asynchronous, the
- calling thread is going to queue the blockchain scan, and then run off
- and do other things: which may include address/wallet operations that
- would collide with the BDM updating it.
-
- THEREFORE, the BDM now has a single, master wallet. Any address you add
- to any of your wallets, should be added to the master wallet, too. The
- PyBtcWallet class does this for you, but if you are using raw BtcWallets
- (the C++ equivalent), you need to do:
-
- cppWallet.addScrAddress_1_(Hash160ToScrAddr(newAddr))
- TheBDM.registerScrAddr(newAddr, isFresh=?)
-
- This will add the address to the TheBDM.masterCppWallet. Then when you
- queue up the TheBDM to do a rescan (if necessary), it will update only
- its own wallet. Luckily, I designed the BDM so that data for addresses
- in one wallet (the master), can be applied immediately to other/new
- wallets that have the same addresses.
-
- If you say isFresh=False, then the BDM will set isDirty=True. This means
- that a full rescan will have to be performed, and wallet information may
- not be accurate until it is performed. isFresh=True should be used for
- addresses/wallets you just created, and thus there's no reason to rescan,
- because there's no chance they could have any history in the blockchain.
-
- Tying this all together: if you add an address to a PYTHON wallet, you
- just add it through an existing call. If you add it with a C++ wallet,
- you need to explicitly register it with TheBDM, too. Then you need to
- tell the BDM to do a rescan (if isDirty==True), and then call the method
- updateWalletsAfterScan(
- are ready, you can chec
-
- """
- #############################################################################
- def __init__(self, isOffline=False, blocking=False):
- super(BlockDataManagerThread, self).__init__()
-
- if isOffline:
- self.blkMode = BLOCKCHAINMODE.Offline
- self.prefMode = BLOCKCHAINMODE.Offline
- else:
- self.blkMode = BLOCKCHAINMODE.Uninitialized
- self.prefMode = BLOCKCHAINMODE.Full
-
- self.bdm = Cpp.BlockDataManager().getBDM()
-
- # These are for communicating with the master (GUI) thread
- self.inputQueue = Queue.Queue()
- self.outputQueue = Queue.Queue()
-
- # Flags
- self.startBDM = False
- self.doShutdown = False
- self.aboutToRescan = False
- self.errorOut = 0
-
- self.setBlocking(blocking)
-
- self.currentActivity = 'None'
-
- # Lists of wallets that should be checked after blockchain updates
- self.pyWltList = [] # these will be python refs
- self.cppWltList = [] # these will be python refs
-
- # The BlockDataManager is easier to use if you put all your addresses
- # into a C++ BtcWallet object, and let it
- self.masterCppWallet = Cpp.BtcWallet()
- self.bdm.registerWallet(self.masterCppWallet)
-
- self.btcdir = BTC_HOME_DIR
- self.ldbdir = LEVELDB_DIR
- self.lastPctLoad = 0
-
-
-
-
- #############################################################################
- def __getattr__(self, name):
- '''
- Anything that is not explicitly defined in this class should
- passthrough to the C++ BlockDataManager class
-
- This remaps such calls into "passthrough" requests via the input
- queue. This makes sure that the requests are processed only when
- the BDM is ready. Hopefully, this will prevent multi-threaded
- disasters, such as seg faults due to trying to read memory that is
- in the process of being updated.
-
- Specifically, any passthrough call is expected to return output
- unless you add 'waitForReturn=False' to the arg list. i.e. all
- calls that "passthrough" will always block unless you explicitly
- tell it not to.
- '''
-
-
- rndID = int(random.uniform(0,100000000))
- if not hasattr(self.bdm, name):
- LOGERROR('No BDM method: %s', name)
- raise AttributeError
- else:
- def passthruFunc(*args, **kwargs):
- #LOGDEBUG('External thread requesting: %s (%d)', name, rndID)
- waitForReturn = True
- if len(kwargs)>0 and \
- kwargs.has_key('wait') and \
- not kwargs['wait']:
- waitForReturn = False
-
-
- # If this was ultimately called from the BDM thread, don't go
- # through the queue, just do it!
- if len(kwargs)>0 and \
- kwargs.has_key('calledFromBDM') and \
- kwargs['calledFromBDM']:
- return getattr(self.bdm, name)(*args)
-
- self.inputQueue.put([BDMINPUTTYPE.Passthrough, rndID, waitForReturn, name] + list(args))
-
-
- if waitForReturn:
- try:
- out = self.outputQueue.get(True, self.mtWaitSec)
- return out
- except Queue.Empty:
- LOGERROR('BDM was not ready for your request! Waited %d sec.' % self.mtWaitSec)
- LOGERROR(' getattr name: %s', name)
- LOGERROR('BDM currently doing: %s (%d)', self.currentActivity,self.currentID )
- LOGERROR('Waiting for completion: ID= %d', rndID)
- LOGERROR('Direct traceback')
- traceback.print_stack()
- self.errorOut += 1
- LOGEXCEPT('Traceback:')
- return passthruFunc
-
-
-
- #############################################################################
- def waitForOutputIfNecessary(self, expectOutput, rndID=0):
- # The get() command will block until the thread puts something there.
- # We don't always expect output, but we use this method to
- # replace inputQueue.join(). The reason for doing it is so
- # that we can guarantee that BDM thread knows whether we are waiting
- # for output or not, and any additional requests put on the inputQueue
- # won't extend our wait time for this request
- if expectOutput:
- try:
- return self.outputQueue.get(True, self.mtWaitSec)
- except Queue.Empty:
- stkOneUp = traceback.extract_stack()[-2]
- filename,method = stkOneUp[0], stkOneUp[1]
- LOGERROR('Waiting for BDM output that didn\'t come after %ds.' % self.mtWaitSec)
- LOGERROR('BDM state is currently: %s', self.getBDMState())
- LOGERROR('Called from: %s:%d (%d)', os.path.basename(filename), method, rndID)
- LOGERROR('BDM currently doing: %s (%d)', self.currentActivity, self.currentID)
- LOGERROR('Direct traceback')
- traceback.print_stack()
- LOGEXCEPT('Traceback:')
- self.errorOut += 1
- else:
- return None
-
-
- #############################################################################
- def setBlocking(self, doblock=True, newTimeout=MT_WAIT_TIMEOUT_SEC):
- """
- If we want TheBDM to behave as a single-threaded app, we need to disable
- the timeouts so that long operations (such as reading the blockchain) do
- not crash the process.
-
- So setting wait=True is NOT identical to setBlocking(True), since using
- wait=True with blocking=False will break when the timeout has been reached
- """
- if doblock:
- self.alwaysBlock = True
- self.mtWaitSec = None
- else:
- self.alwaysBlock = False
- self.mtWaitSec = newTimeout
-
-
- #############################################################################
- def Reset(self, wait=None):
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.Reset, rndID, expectOutput] )
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
- #############################################################################
- def getBlkMode(self):
- return self.blkMode
-
- #############################################################################
- def getBDMState(self):
- if self.blkMode == BLOCKCHAINMODE.Offline:
- # BDM will not be able to provide any blockchain data, or scan
- return 'Offline'
- elif self.blkMode == BLOCKCHAINMODE.Full and not self.aboutToRescan:
- # The BDM is idle, waiting for things to do
- return 'BlockchainReady'
- elif self.blkMode == BLOCKCHAINMODE.LiteScanning and not self.aboutToRescan:
- # The BDM is doing some processing but it is expected to be done within
- # 0.1s. For instance, readBlkFileUpdate requires processing, but can be
- # performed 100/sec. For the outside calling thread, this is not any
- # different than BlockchainReady.
- return 'BlockchainReady'
- elif self.blkMode == BLOCKCHAINMODE.Rescanning or self.aboutToRescan:
- # BDM is doing a FULL scan of the blockchain, and expected to take
-
- return 'Scanning'
- elif self.blkMode == BLOCKCHAINMODE.Uninitialized and not self.aboutToRescan:
- # BDM wants to be online, but the calling thread never initiated the
- # loadBlockchain() call. Usually setOnlineMode, registerWallets, then
- # load the blockchain.
- return 'Uninitialized'
- elif self.blkMode == BLOCKCHAINMODE.FullPrune:
- # NOT IMPLEMENTED
- return 'FullPrune'
- elif self.blkMode == BLOCKCHAINMODE.Lite:
- # NOT IMPLEMENTED
- return 'Lite'
- else:
- return '' % self.blkMode
-
-
- #############################################################################
- def predictLoadTime(self):
- # Apparently we can't read the C++ state while it's scanning,
- # specifically getLoadProgress* methods. Thus we have to resort
- # to communicating via files... bleh
- bfile = os.path.join(ARMORY_HOME_DIR,'blkfiles.txt')
- if not os.path.exists(bfile):
- return [-1,-1,-1,-1]
-
- try:
- with open(bfile,'r') as f:
- tmtrx = [line.split() for line in f.readlines() if len(line.strip())>0]
- phases = [float(row[0]) for row in tmtrx]
- currPhase = phases[-1]
- startat = [float(row[1]) for row in tmtrx if float(row[0])==currPhase]
- sofar = [float(row[2]) for row in tmtrx if float(row[0])==currPhase]
- total = [float(row[3]) for row in tmtrx if float(row[0])==currPhase]
- times = [float(row[4]) for row in tmtrx if float(row[0])==currPhase]
-
- startRow = 0 if len(startat)<=10 else -10
- todo = total[0] - startat[0]
- pct0 = sofar[0] / todo
- pct1 = sofar[-1] / todo
- t0,t1 = times[0], times[-1]
- if (not t1>t0) or todo<0:
- return [-1,-1,-1,-1]
- rate = (pct1-pct0) / (t1-t0)
- tleft = (1-pct1)/rate
- totalPct = (startat[-1] + sofar[-1]) / total[-1]
- if not self.lastPctLoad == pct1:
- LOGINFO('Reading blockchain, pct complete: %0.1f', 100*totalPct)
- self.lastPctLoad = totalPct
- return [currPhase,totalPct,rate,tleft]
- except:
- raise
- return [-1,-1,-1,-1]
-
-
-
-
- #############################################################################
- def execCleanShutdown(self, wait=True):
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.Shutdown, rndID, expectOutput])
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
- #############################################################################
- def setSatoshiDir(self, newBtcDir):
- if not os.path.exists(newBtcDir):
- LOGERROR('setSatoshiDir: directory does not exist: %s', newBtcDir)
- return
-
- if not self.blkMode in (BLOCKCHAINMODE.Offline, BLOCKCHAINMODE.Uninitialized):
- LOGERROR('Cannot set blockchain/satoshi path after BDM is started')
- return
-
- self.btcdir = newBtcDir
-
- #############################################################################
- def setLevelDBDir(self, ldbdir):
-
- if not self.blkMode in (BLOCKCHAINMODE.Offline, BLOCKCHAINMODE.Uninitialized):
- LOGERROR('Cannot set blockchain/satoshi path after BDM is started')
- return
-
- if not os.path.exists(ldbdir):
- os.makedirs(ldbdir)
-
- self.ldbdir = ldbdir
-
-
- #############################################################################
- def setOnlineMode(self, goOnline=True, wait=None):
- LOGINFO('Setting online mode: %s (wait=%s)' % (str(goOnline), str(wait)))
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- rndID = int(random.uniform(0,100000000))
-
- if goOnline:
- if TheBDM.getBDMState() in ('Offline','Uninitialized'):
- self.inputQueue.put([BDMINPUTTYPE.GoOnlineRequested, rndID, expectOutput])
- else:
- if TheBDM.getBDMState() in ('Scanning','BlockchainReady'):
- self.inputQueue.put([BDMINPUTTYPE.GoOfflineRequested, rndID, expectOutput])
-
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
- #############################################################################
- def isScanning(self):
- return (self.aboutToRescan or self.blkMode==BLOCKCHAINMODE.Rescanning)
-
-
- #############################################################################
- def readBlkFileUpdate(self, wait=True):
- """
- This method can be blocking... it always has been without a problem,
- because the block file updates are always fast. But I have to assume
- that it theoretically *could* take a while. Consider using wait=False
- if you want it to do its thing and not wait for it (this matters, because
- you'll want to call TheBDM.updateWalletsAfterScan() when this is
- finished to make sure that
- """
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.ReadBlkUpdate, rndID, expectOutput])
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
-
- #############################################################################
- def isInitialized(self):
- return self.blkMode==BLOCKCHAINMODE.Full and self.bdm.isInitialized()
-
-
- #############################################################################
- def isDirty(self):
- return self.bdm.isDirty()
-
-
-
-
- #############################################################################
- def rescanBlockchain(self, scanType='AsNeeded', wait=None):
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- self.aboutToRescan = True
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.RescanRequested, rndID, expectOutput, scanType])
- LOGINFO('Blockchain rescan requested')
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
-
- #############################################################################
- def updateWalletsAfterScan(self, wait=True):
- """
- Be careful with this method: it is asking the BDM thread to update
- the wallets in the main thread. If you do this with wait=False, you
- need to avoid any wallet operations in the main thread until it's done.
- However, this is usually very fast as long as you know the BDM is not
- in the middle of a rescan, so you might as well set wait=True.
-
- In fact, I highly recommend you always use wait=True, in order to
- guarantee thread-safety.
-
- NOTE: If there are multiple wallet-threads, this might not work. It
- might require specifying which wallets to update after a scan,
- so that other threads don't collide with the BDM updating its
- wallet when called from this thread.
- """
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.UpdateWallets, rndID, expectOutput])
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
-
- #############################################################################
- def startWalletRecoveryScan(self, pywlt, wait=None):
- """
- A wallet recovery scan may require multiple, independent rescans. This
- is because we don't know how many addresses to pre-calculate for the
- initial scan. So, we will calculate the first X addresses in the wallet,
- do a scan, and then if any addresses have tx history beyond X/2, calculate
- another X and rescan. This will usually only have to be done once, but
- may need to be repeated for super-active wallets.
- (In the future, I may add functionality to sample the gap between address
- usage, so I can more-intelligently determine when we're at the end...)
- """
-
-
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- self.aboutToRescan = True
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.WalletRecoveryScan, rndID, expectOutput, pywlt])
- LOGINFO('Wallet recovery scan requested')
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
-
-
- #############################################################################
- def __checkBDMReadyToServeData(self):
- if self.blkMode==BLOCKCHAINMODE.Rescanning:
- LOGERROR('Requested blockchain data while scanning. Don\'t do this!')
- LOGERROR('Check self.getBlkModeStr()==BLOCKCHAINMODE.Full before')
- LOGERROR('making requests! Skipping request')
- return False
- if self.blkMode==BLOCKCHAINMODE.Offline:
- LOGERROR('Requested blockchain data while BDM is in offline mode.')
- LOGERROR('Please start the BDM using TheBDM.setOnlineMode() before,')
- LOGERROR('and then wait for it to complete, before requesting data.')
- return False
- if not self.bdm.isInitialized():
- LOGERROR('The BDM thread declares the BDM is ready, but the BDM ')
- LOGERROR('itself reports that it is not initialized! What is ')
- LOGERROR('going on...?')
- return False
-
-
- return True
-
- #############################################################################
- def getTxByHash(self, txHash):
- """
- All calls that retrieve blockchain data are blocking calls. You have
- no choice in the matter!
- """
- #if not self.__checkBDMReadyToServeData():
- #return None
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.TxRequested, rndID, True, txHash])
-
- try:
- result = self.outputQueue.get(True, 10)
- if result==None:
- LOGERROR('Requested tx does not exist:\n%s', binary_to_hex(txHash))
- return result
- except Queue.Empty:
- LOGERROR('Waited 10s for tx to be returned. Abort')
- LOGERROR('ID: getTxByHash (%d)', rndID)
- return None
- #LOGERROR('Going to block until we get something...')
- #return self.outputQueue.get(True)
-
- return None
-
-
- ############################################################################
- def getHeaderByHash(self, headHash):
- """
- All calls that retrieve blockchain data are blocking calls. You have
- no choice in the matter!
- """
- #if not self.__checkBDMReadyToServeData():
- #return None
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.HeaderRequested, rndID, True, headHash])
-
- try:
- result = self.outputQueue.get(True, 10)
- if result==None:
- LOGERROR('Requested header does not exist:\n%s', \
- binary_to_hex(headHash))
- return result
- except Queue.Empty:
- LOGERROR('Waited 10s for header to be returned. Abort')
- LOGERROR('ID: getTxByHash (%d)', rndID)
- #LOGERROR('Going to block until we get something...')
- #return self.outputQueue.get(True)
-
- return None
-
-
- #############################################################################
- def getBlockByHash(self,headHash):
- """
- All calls that retrieve blockchain data are blocking calls. You have
- no choice in the matter!
-
- This retrives the full block, not just the header, encoded the same
- way as it is in the blkXXXX.dat files (including magic bytes and
- block 4-byte block size)
- """
- #if not self.__checkBDMReadyToServeData():
- #return None
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.BlockRequested, rndID, True, headHash])
-
- try:
- result = self.outputQueue.get(True, 10)
- if result==None:
- LOGERROR('Requested block does not exist:\n%s', \
- binary_to_hex(headHash))
- return result
- except Queue.Empty:
- LOGERROR('Waited 10s for block to be returned. Abort')
- LOGERROR('ID: getTxByHash (%d)', rndID)
- #LOGERROR('Going to block until we get something...')
- #return self.outputQueue.get(True)
-
- return None
-
-
- #############################################################################
- def getAddressBook(self, wlt):
- """
- Address books are constructed from Blockchain data, which means this
- must be a blocking method.
- """
- rndID = int(random.uniform(0,100000000))
- if isinstance(wlt, PyBtcWallet):
- self.inputQueue.put([BDMINPUTTYPE.AddrBookRequested, rndID, True, wlt.cppWallet])
- elif isinstance(wlt, Cpp.BtcWallet):
- self.inputQueue.put([BDMINPUTTYPE.AddrBookRequested, rndID, True, wlt])
-
- try:
- result = self.outputQueue.get(True, self.mtWaitSec)
- return result
- except Queue.Empty:
- LOGERROR('Waited %ds for addrbook to be returned. Abort' % self.mtWaitSec)
- LOGERROR('ID: getTxByHash (%d)', rndID)
- #LOGERROR('Going to block until we get something...')
- #return self.outputQueue.get(True)
-
- return None
-
- #############################################################################
- def addNewZeroConfTx(self, rawTx, timeRecv, writeToFile, wait=None):
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.ZeroConfTxToInsert, rndID, expectOutput, rawTx, timeRecv])
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
- #############################################################################
- def registerScrAddr(self, scrAddr, isFresh=False, wait=None):
- """
- This is for a generic address: treat it as imported (requires rescan)
- unless specifically specified otherwise
- """
- if isFresh:
- self.registerNewScrAddr(scrAddr, wait=wait)
- else:
- self.registerImportedScrAddr(scrAddr, wait=wait)
-
-
- #############################################################################
- def registerNewScrAddr(self, scrAddr, wait=None):
- """
- Variable isFresh==True means the address was just [freshly] created,
- and we need to watch for transactions with it, but we don't need
- to rescan any blocks
- """
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.RegisterAddr, rndID, expectOutput, scrAddr, True])
-
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
-
-
- #############################################################################
- def registerImportedScrAddr(self, scrAddr, \
- firstTime=UINT32_MAX, \
- firstBlk=UINT32_MAX, \
- lastTime=0, \
- lastBlk=0, wait=None):
- """
- TODO: Need to clean up the first/last blk/time variables. Rather,
- I need to make sure they are maintained and applied intelligently
- and consistently
- """
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- rndID = int(random.uniform(0,100000000))
- self.inputQueue.put([BDMINPUTTYPE.RegisterAddr, rndID, expectOutput, scrAddr, \
- [firstTime, firstBlk, lastTime, lastBlk]])
-
- return self.waitForOutputIfNecessary(expectOutput, rndID)
-
-
- #############################################################################
- def registerWallet(self, wlt, isFresh=False, wait=None):
- """
- Will register a C++ wallet or Python wallet
- """
-
- expectOutput = False
- if not wait==False and (self.alwaysBlock or wait==True):
- expectOutput = True
-
- if isinstance(wlt, PyBtcWallet):
- scrAddrs = [Hash160ToScrAddr(a.getAddr160()) for a in wlt.getAddrList()]
-
- if isFresh:
- for scrad in scrAddrs:
- self.registerNewScrAddr(scrad, wait=wait)
- else:
- for scrad in scrAddrs:
- self.registerImportedScrAddr(scrad, wait=wait)
-
- if not wlt in self.pyWltList:
- self.pyWltList.append(wlt)
-
- elif isinstance(wlt, Cpp.BtcWallet):
- naddr = wlt.getNumScrAddr()
-
- for a in range(naddr):
- self.registerScrAddr(wlt.getScrAddrObjByIndex(a).getScrAddr(), isFresh, wait=wait)
-
- if not wlt in self.cppWltList:
- self.cppWltList.append(wlt)
- else:
- LOGERROR('Unrecognized object passed to registerWallet function')
-
-
-
-
-
- #############################################################################
- # These bdm_direct methods feel like a hack. They probably are. I need
- # find an elegant way to get the code normally run outside the BDM thread,
- # to be able to run inside the BDM thread without using the BDM queue (since
- # the queue is specifically FOR non-BDM-thread calls). For now, the best
- # I can do is create non-private versions of these methods that access BDM
- # methods directly, but should not be used under any circumstances, unless
- # we know for sure that the BDM ultimately called this method.
- def registerScrAddr_bdm_direct(self, scrAddr, timeInfo):
- """
- Something went awry calling __registerScrAddrNow from the PyBtcWallet
- code (apparently I don't understand __methods). Use this method to
- externally bypass the BDM thread queue and register the address
- immediately.
-
- THIS METHOD IS UNSAFE UNLESS CALLED FROM A METHOD RUNNING IN THE BDM THREAD
- This method can be called from a non BDM class, but should only do so if
- that class method was called by the BDM (thus, no conflicts)
- """
- self.__registerScrAddrNow(scrAddr, timeInfo)
-
-
- #############################################################################
- def scanBlockchainForTx_bdm_direct(self, cppWlt, startBlk=0, endBlk=UINT32_MAX):
- """
- THIS METHOD IS UNSAFE UNLESS CALLED FROM A METHOD RUNNING IN THE BDM THREAD
- This method can be called from a non BDM class, but should only do so if
- that class method was called by the BDM (thus, no conflicts)
- """
- self.bdm.scanRegisteredTxForWallet(cppWlt, startBlk, endBlk)
-
- #############################################################################
- def scanRegisteredTxForWallet_bdm_direct(self, cppWlt, startBlk=0, endBlk=UINT32_MAX):
- """
- THIS METHOD IS UNSAFE UNLESS CALLED FROM A METHOD RUNNING IN THE BDM THREAD
- This method can be called from a non BDM class, but should only do so if
- that class method was called by the BDM (thus, no conflicts)
- """
- self.bdm.scanRegisteredTxForWallet(cppWlt, startBlk, endBlk)
-
- #############################################################################
- def getTopBlockHeight_bdm_direct(self):
- """
- THIS METHOD IS UNSAFE UNLESS CALLED FROM A METHOD RUNNING IN THE BDM THREAD
- This method can be called from a non BDM class, but should only do so if
- that class method was called by the BDM (thus, no conflicts)
- """
- return self.bdm.getTopBlockHeight()
-
-
-
- #############################################################################
- def getLoadProgress(self):
- """
- This method does not actually work! The load progress in bytes is not
- updated properly while the BDM thread is scanning. It might have to
- emit this information explicitly in order to be useful.
- """
- return (self.bdm.getLoadProgressBytes(), self.bdm.getTotalBlockchainBytes())
-
-
- #############################################################################
- def __registerScrAddrNow(self, scrAddr, timeInfo):
- """
- Do the registration right now. This should not be called directly
- outside of this class. This is only called by the BDM thread when
- any previous scans have been completed
- """
-
- if isinstance(timeInfo, bool):
- isFresh = timeInfo
- if isFresh:
- # We claimed to have just created this ScrAddr...(so no rescan needed)
- self.masterCppWallet.addNewScrAddress_1_(scrAddr)
- else:
- self.masterCppWallet.addScrAddress_1_(scrAddr)
- else:
- if isinstance(timeInfo, (list,tuple)) and len(timeInfo)==4:
- self.masterCppWallet.addScrAddress_5_(scrAddr, *timeInfo)
- else:
- LOGWARN('Unrecognized time information in register method.')
- LOGWARN(' Data: %s', str(timeInfo))
- LOGWARN('Assuming imported key requires full rescan...')
- self.masterCppWallet.addScrAddress_1_(scrAddr)
-
-
-
- #############################################################################
- def __startLoadBlockchain(self):
- """
- This should only be called by the threaded BDM, and thus there should
- never be a conflict.
- """
-
- LOGINFO('Called __startLoadBlockchain()')
-
- TimerStart('__startLoadBlockchain')
-
- if self.blkMode == BLOCKCHAINMODE.Rescanning:
- LOGERROR('Blockchain is already scanning. Was this called already?')
- return
- elif self.blkMode == BLOCKCHAINMODE.Full:
- LOGERROR('Blockchain has already been loaded -- maybe we meant')
- LOGERROR('to call startRescanBlockchain()...?')
- return
- elif not self.blkMode == BLOCKCHAINMODE.Uninitialized:
- LOGERROR('BDM should be in "Uninitialized" mode before starting ')
- LOGERROR('the initial scan. If BDM is in offline mode, you should ')
- LOGERROR('switch it to online-mode, first, then request the scan.')
- LOGERROR('Continuing with the scan, anyway.')
-
-
- # Remove "blkfiles.txt" to make sure we get accurate TGO
- bfile = os.path.join(ARMORY_HOME_DIR,'blkfiles.txt')
- if os.path.exists(bfile):
- os.remove(bfile)
-
- # Check for the existence of the Bitcoin-Qt directory
- if not os.path.exists(self.btcdir):
- raise FileExistsError, ('Directory does not exist: %s' % self.btcdir)
-
- blkdir = os.path.join(self.btcdir, 'blocks')
- blk1st = os.path.join(blkdir, 'blk00000.dat')
-
- # ... and its blk000X.dat files
- if not os.path.exists(blk1st):
- LOGERROR('Blockchain data not available: %s', blk1st)
- self.prefMode = BLOCKCHAINMODE.Offline
- raise FileExistsError, ('Blockchain data not available: %s' % self.blk1st)
-
- # We have the data, we're ready to go
- self.blkMode = BLOCKCHAINMODE.Rescanning
- self.aboutToRescan = False
-
- self.bdm.SetDatabaseModes(ARMORY_DB_BARE, DB_PRUNE_NONE);
- self.bdm.SetHomeDirLocation(ARMORY_HOME_DIR)
- self.bdm.SetBlkFileLocation(str(blkdir))
- self.bdm.SetLevelDBLocation(self.ldbdir)
- self.bdm.SetBtcNetworkParams( GENESIS_BLOCK_HASH, \
- GENESIS_TX_HASH, \
- MAGIC_BYTES)
-
- # The master wallet contains all addresses of all wallets registered
- self.bdm.registerWallet(self.masterCppWallet)
-
- # Now we actually startup the BDM and run with it
- if CLI_OPTIONS.rebuild:
- self.bdm.doInitialSyncOnLoad_Rebuild()
- elif CLI_OPTIONS.rescan:
- self.bdm.doInitialSyncOnLoad_Rescan()
- else:
- self.bdm.doInitialSyncOnLoad()
-
- # The above op populates the BDM with all relevent tx, but those tx
- # still need to be scanned to collect the wallet ledger and UTXO sets
- self.bdm.scanBlockchainForTx(self.masterCppWallet)
-
- TimerStop('__startLoadBlockchain')
-
-
- #############################################################################
- def __startRescanBlockchain(self, scanType='AsNeeded'):
- """
- This should only be called by the threaded BDM, and thus there should
- never be a conflict.
-
- If we don't force a full scan, we let TheBDM figure out how much of the
- chain needs to be rescanned. Which may not be very much. We may
- force a full scan if we think there's an issue with balances.
- """
- if self.blkMode==BLOCKCHAINMODE.Offline:
- LOGERROR('Blockchain is in offline mode. How can we rescan?')
- elif self.blkMode==BLOCKCHAINMODE.Uninitialized:
- LOGERROR('Blockchain was never loaded. Why did we request rescan?')
-
- # Remove "blkfiles.txt" to make sure we get accurate TGO
- bfile = os.path.join(ARMORY_HOME_DIR,'blkfiles.txt')
- if os.path.exists(bfile):
- os.remove(bfile)
-
- if not self.isDirty():
- LOGWARN('It does not look like we need a rescan... doing it anyway')
-
- if scanType=='AsNeeded':
- if self.bdm.numBlocksToRescan(self.masterCppWallet) < 144:
- LOGINFO('Rescan requested, but <1 day\'s worth of block to rescan')
- self.blkMode = BLOCKCHAINMODE.LiteScanning
- else:
- LOGINFO('Rescan requested, and very large scan is necessary')
- self.blkMode = BLOCKCHAINMODE.Rescanning
-
-
- self.aboutToRescan = False
-
- if scanType=='AsNeeded':
- self.bdm.doSyncIfNeeded()
- elif scanType=='ForceRescan':
- LOGINFO('Forcing full rescan of blockchain')
- self.bdm.doFullRescanRegardlessOfSync()
- self.blkMode = BLOCKCHAINMODE.Rescanning
- elif scanType=='ForceRebuild':
- LOGINFO('Forcing full rebuild of blockchain database')
- self.bdm.doRebuildDatabases()
- self.blkMode = BLOCKCHAINMODE.Rescanning
-
- self.bdm.scanBlockchainForTx(self.masterCppWallet)
-
-
- #############################################################################
- def __startRecoveryRescan(self, pywlt):
- """
- This should only be called by the threaded BDM, and thus there should
- never be a conflict.
-
- In order to work cleanly with the threaded BDM, the search code
- needed to be integrated directly here, instead of being called
- from the PyBtcWallet method. Because that method is normally called
- from outside the BDM thread, but this method is only called from
- _inside_ the BDM thread. Those calls use the BDM stack which will
- deadlock waiting for the itself before it can move on...
-
- Unfortunately, because of this, we have to break a python-class
- privacy rules: we are accessing the PyBtcWallet object as if this
- were PyBtcWallet code (accessing properties directly).
- """
- if not isinstance(pywlt, PyBtcWallet):
- LOGERROR('Only python wallets can be passed for recovery scans')
- return
-
- if self.blkMode==BLOCKCHAINMODE.Offline:
- LOGERROR('Blockchain is in offline mode. How can we rescan?')
- elif self.blkMode==BLOCKCHAINMODE.Uninitialized:
- LOGERROR('Blockchain was never loaded. Why did we request rescan?')
-
-
- self.blkMode = BLOCKCHAINMODE.Rescanning
- self.aboutToRescan = False
-
- #####
-
- # Whenever calling PyBtcWallet methods from BDM, set flag
- prevCalledFromBDM = pywlt.calledFromBDM
- pywlt.calledFromBDM = True
-
- # Do the scan...
- TimerStart('WalletRecoveryScan')
- pywlt.freshImportFindHighestIndex()
- TimerStop('WalletRecoveryScan')
-
- # Unset flag when done
- pywlt.calledFromBDM = prevCalledFromBDM
-
- #####
- self.bdm.scanRegisteredTxForWallet(self.masterCppWallet)
-
-
-
- #############################################################################
- def __readBlockfileUpdates(self):
- '''
- This method can be blocking... it always has been without a problem,
- because the block file updates are always fast. But I have to assume
- that it theoretically *could* take a while, and the caller might care.
- '''
- if self.blkMode == BLOCKCHAINMODE.Offline:
- LOGERROR('Can\'t update blockchain in %s mode!', self.getBDMState())
- return
-
- self.blkMode = BLOCKCHAINMODE.LiteScanning
- nblk = self.bdm.readBlkFileUpdate()
- return nblk
-
-
- #############################################################################
- def __updateWalletsAfterScan(self):
- """
- This will actually do a scan regardless of whether it is currently
- "after scan", but it will usually only be requested right after a
- full rescan
- """
-
- numToRescan = 0
- for pyWlt in self.pyWltList:
- thisNum = self.bdm.numBlocksToRescan(pyWlt.cppWallet)
- numToRescan = max(numToRescan, thisNum)
-
- for cppWlt in self.cppWltList:
- thisNum = self.bdm.numBlocksToRescan(cppWlt)
- numToRescan = max(numToRescan, thisNum)
-
- if numToRescan<144:
- self.blkMode = BLOCKCHAINMODE.LiteScanning
- else:
- self.blkMode = BLOCKCHAINMODE.Rescanning
-
-
- for pyWlt in self.pyWltList:
- pyWlt.syncWithBlockchain()
-
- for cppWlt in self.cppWltList:
- # The pre-leveldb version of Armory specifically required to call
- #
- # scanRegisteredTxForWallet (scan already-collected reg tx)
- #
- # instead of
- #
- # scanBlockchainForTx (search for reg tx then scan)
- #
- # Because the second one will induce a full rescan to find all new
- # registeredTx, if we recently imported an addr or wallet. If we
- # imported but decided not to rescan yet, we wan tthe first one,
- # which only scans the registered tx that are already collected
- # (including new blocks, but not previous blocks).
- #
- # However, with the leveldb stuff only supporting super-node, there
- # is no rescanning, thus it's safe to always call scanBlockchainForTx,
- # which grabs everything from the database almost instantaneously.
- # However we may want to re-examine this after we implement new
- # database modes of operation
- #self.bdm.scanRegisteredTxForWallet(cppWlt)
- self.bdm.scanBlockchainForTx(cppWlt)
-
-
-
-
- #############################################################################
- def __shutdown(self):
- if not self.blkMode == BLOCKCHAINMODE.Rescanning:
- self.bdm.shutdownSaveScrAddrHistories()
-
- self.__reset()
- self.blkMode = BLOCKCHAINMODE.Offline
- self.doShutdown = True
-
- #############################################################################
- def __fullRebuild(self):
- self.bdm.destroyAndResetDatabases()
- self.__reset()
- self.__startLoadBlockchain()
-
- #############################################################################
- def __reset(self):
- LOGERROR('Resetting BDM and all wallets')
- self.bdm.Reset()
-
- if self.blkMode in (BLOCKCHAINMODE.Full, BLOCKCHAINMODE.Rescanning):
- # Uninitialized means we want to be online, but haven't loaded yet
- self.blkMode = BLOCKCHAINMODE.Uninitialized
- elif not self.blkMode==BLOCKCHAINMODE.Offline:
- return
-
- self.bdm.resetRegisteredWallets()
-
- # Flags
- self.startBDM = False
- #self.btcdir = BTC_HOME_DIR
-
- # Lists of wallets that should be checked after blockchain updates
- self.pyWltList = [] # these will be python refs
- self.cppWltList = [] # these will be C++ refs
-
-
- # The BlockDataManager is easier to use if you put all your addresses
- # into a C++ BtcWallet object, and let it
- self.masterCppWallet = Cpp.BtcWallet()
- self.bdm.registerWallet(self.masterCppWallet)
-
-
- #############################################################################
- def __getFullBlock(self, headerHash):
- headerObj = self.bdm.getHeaderByHash(headerHash)
- if not headerObj:
- return None
-
- rawTxList = []
- txList = headerObj.getTxRefPtrList()
- for txref in txList:
- tx = txref.getTxCopy()
- rawTxList.append(tx.serialize())
-
- numTxVarInt = len(rawTxList)
- blockBytes = 80 + len(numTxVarInt) + sum([len(tx) for tx in rawTxList])
-
- rawBlock = MAGIC_BYTES
- rawBlock += int_to_hex(blockBytes, endOut=LITTLEENDIAN, widthBytes=4)
- rawBlock += headerObj.serialize()
- rawBlock += packVarInt(numTx)
- rawBlock += ''.join(rawTxList)
- return rawBlock
-
-
- #############################################################################
- def getBDMInputName(self, i):
- for name in dir(BDMINPUTTYPE):
- if getattr(BDMINPUTTYPE, name)==i:
- return name
-
- #############################################################################
- def run(self):
- """
- This thread runs in an infinite loop, waiting for things to show up
- on the self.inputQueue, and then processing those entries. If there
- are no requests to the BDM from the main thread, this thread will just
- sit idle (in a CPU-friendly fashion) until something does.
- """
-
- while not self.doShutdown:
- # If there were any errors, we will have that many extra output
- # entries on the outputQueue. We clear them off so that this
- # thread can be re-sync'd with the main thread
- try:
- while self.errorOut>0:
- self.outputQueue.get_nowait()
- self.errorOut -= 1
- except Queue.Empty:
- LOGERROR('ErrorOut var over-represented number of errors!')
- self.errorOut = 0
-
-
- # Now start the main
- try:
- try:
- inputTuple = self.inputQueue.get_nowait()
- # If we don't error out, we have stuff to process right now
- except Queue.Empty:
- # We only switch to offline/full/uninitialzed when the queue
- # is empty. After that, then we block in a CPU-friendly way
- # until data shows up on the Queue
- if self.prefMode==BLOCKCHAINMODE.Full:
- if self.bdm.isInitialized():
- self.blkMode = BLOCKCHAINMODE.Full
- else:
- self.blkMode = BLOCKCHAINMODE.Uninitialized
- else:
- self.blkMode = BLOCKCHAINMODE.Offline
-
- self.currentActivity = 'None'
-
- # Block until something shows up.
- inputTuple = self.inputQueue.get()
- except:
- LOGERROR('Unknown error in BDM thread')
-
-
-
- # The first list element is always the BDMINPUTTYPE (command)
- # The second argument is whether the caller will be waiting
- # for the output: which means even if it's None, we need to
- # put something on the output queue.
- cmd = inputTuple[0]
- rndID = inputTuple[1]
- expectOutput = inputTuple[2]
- output = None
-
- # Some variables that can be queried externally to figure out
- # what the BDM is currently doing
- self.currentActivity = self.getBDMInputName(inputTuple[0])
- self.currentID = rndID
-
- if CLI_OPTIONS.mtdebug:
- #LOGDEBUG('BDM Start Exec: %s (%d): %s', self.getBDMInputName(inputTuple[0]), rndID, str(inputTuple))
- tstart = RightNow()
-
-
- if cmd == BDMINPUTTYPE.RegisterAddr:
- scrAddr,timeInfo = inputTuple[3:]
- self.__registerScrAddrNow(scrAddr, timeInfo)
-
- elif cmd == BDMINPUTTYPE.ZeroConfTxToInsert:
- rawTx = inputTuple[3]
- timeIn = inputTuple[4]
- if isinstance(rawTx, PyTx):
- rawTx = rawTx.serialize()
- self.bdm.addNewZeroConfTx(rawTx, timeIn, True)
-
- elif cmd == BDMINPUTTYPE.HeaderRequested:
- headHash = inputTuple[3]
- rawHeader = self.bdm.getHeaderByHash(headHash)
- if rawHeader:
- output = rawHeader
- else:
- output = None
-
- elif cmd == BDMINPUTTYPE.TxRequested:
- txHash = inputTuple[3]
- rawTx = self.bdm.getTxByHash(txHash)
- if rawTx:
- output = rawTx
- else:
- output = None
-
- elif cmd == BDMINPUTTYPE.BlockRequested:
- headHash = inputTuple[3]
- rawBlock = self.__getFullBlock(headHash)
- if rawBlock:
- output = rawBlock
- else:
- output = None
- LOGERROR('Requested header does not exist:\n%s', \
- binary_to_hex(headHash))
-
- elif cmd == BDMINPUTTYPE.HeaderAtHeightRequested:
- height = inputTuple[3]
- rawHeader = self.bdm.getHeaderByHeight(height)
- if rawHeader:
- output = rawHeader
- else:
- output = None
- LOGERROR('Requested header does not exist:\nHeight=%s', height)
-
- elif cmd == BDMINPUTTYPE.BlockAtHeightRequested:
- height = inputTuple[3]
- rawBlock = self.__getFullBlock(height)
- if rawBlock:
- output = rawBlock
- else:
- output = None
- LOGERROR('Requested header does not exist:\nHeight=%s', height)
-
- elif cmd == BDMINPUTTYPE.AddrBookRequested:
- cppWlt = inputTuple[3]
- TimerStart('createAddressBook')
- output = cppWlt.createAddressBook()
- TimerStop('createAddressBook')
-
- elif cmd == BDMINPUTTYPE.UpdateWallets:
- TimerStart('updateWltsAfterScan')
- self.__updateWalletsAfterScan()
- TimerStop('updateWltsAfterScan')
-
- elif cmd == BDMINPUTTYPE.RescanRequested:
- TimerStart('rescanBlockchain')
- scanType = inputTuple[3]
- if not scanType in ('AsNeeded', 'ForceRescan', 'ForceRebuild'):
- LOGERROR('Invalid scan type for rescanning: ' + scanType)
- scanType = 'AsNeeded'
- self.__startRescanBlockchain(scanType)
- TimerStop('rescanBlockchain')
-
- elif cmd == BDMINPUTTYPE.WalletRecoveryScan:
- LOGINFO('Wallet Recovery Scan Requested')
- pywlt = inputTuple[3]
- TimerStart('recoveryRescan')
- self.__startRecoveryRescan(pywlt)
- TimerStop('recoveryRescan')
-
-
- elif cmd == BDMINPUTTYPE.ReadBlkUpdate:
-
- TimerStart('readBlkFileUpdate')
- output = self.__readBlockfileUpdates()
- TimerStop('readBlkFileUpdate')
-
-
- elif cmd == BDMINPUTTYPE.Passthrough:
- # If the caller is waiting, then it is notified by output
- funcName = inputTuple[3]
- funcArgs = inputTuple[4:]
- output = getattr(self.bdm, funcName)(*funcArgs)
-
- elif cmd == BDMINPUTTYPE.Shutdown:
- LOGINFO('Shutdown Requested')
- self.__shutdown()
-
- elif cmd == BDMINPUTTYPE.ForceRebuild:
- LOGINFO('Rebuild databases requested')
- self.__fullRebuild()
-
- elif cmd == BDMINPUTTYPE.Reset:
- LOGINFO('Reset Requested')
- self.__reset()
-
- elif cmd == BDMINPUTTYPE.GoOnlineRequested:
- LOGINFO('Go online requested')
- # This only sets the blkMode to what will later be
- # recognized as online-requested, or offline
- self.prefMode = BLOCKCHAINMODE.Full
- if self.bdm.isInitialized():
- # The BDM was started and stopped at one point, without
- # being reset. It can safely pick up from where it
- # left off
- self.__readBlockfileUpdates()
- else:
- self.blkMode = BLOCKCHAINMODE.Uninitialized
- self.__startLoadBlockchain()
-
- elif cmd == BDMINPUTTYPE.GoOfflineRequested:
- LOGINFO('Go offline requested')
- self.prefMode = BLOCKCHAINMODE.Offline
-
- self.inputQueue.task_done()
- if expectOutput:
- self.outputQueue.put(output)
-
- except Queue.Empty:
- continue
- except:
- inputName = self.getBDMInputName(inputTuple[0])
- LOGERROR('Error processing BDM input')
- LOGERROR('Received inputTuple: ' + inputName + ' ' + str(inputTuple))
- LOGERROR('Error processing ID (%d)', rndID)
- LOGEXCEPT('ERROR:')
- if expectOutput:
- self.outputQueue.put('BDM_REQUEST_ERROR')
- self.inputQueue.task_done()
- continue
-
- LOGINFO('BDM is shutdown.')
-
-
-
-
-
-################################################################################
-# Make TheBDM reference the asyncrhonous BlockDataManager wrapper if we are
-# running
-
-if CLI_OPTIONS.offline:
- LOGINFO('Armory loaded in offline-mode. Will not attempt to load ')
- LOGINFO('blockchain without explicit command to do so.')
- TheBDM = BlockDataManagerThread(isOffline=True, blocking=False)
- TheBDM.start()
-
- # Also create the might-be-needed SatoshiDaemonManager
- TheSDM = SatoshiDaemonManager()
-
-else:
- # NOTE: "TheBDM" is sometimes used in the C++ code to reference the
- # singleton BlockDataManager_LevelDB class object. Here,
- # "TheBDM" refers to a python BlockDataManagerThead class
- # object that wraps the C++ version. It implements some of
- # it's own methods, and then passes through anything it
- # doesn't recognize to the C++ object.
- LOGINFO('Using the asynchronous/multi-threaded BlockDataManager.')
- LOGINFO('Blockchain operations will happen in the background. ')
- LOGINFO('Devs: check TheBDM.getBDMState() before asking for data.')
- LOGINFO('Registering addresses during rescans will queue them for ')
- LOGINFO('inclusion after the current scan is completed.')
- TheBDM = BlockDataManagerThread(isOffline=False, blocking=False)
- TheBDM.setDaemon(True)
- TheBDM.start()
-
- #if CLI_OPTIONS.doDebug or CLI_OPTIONS.netlog or CLI_OPTIONS.mtdebug:
- cppLogFile = os.path.join(ARMORY_HOME_DIR, 'armorycpplog.txt')
- TheBDM.StartCppLogging(cppLogFile, 3)
- TheBDM.EnableCppLogStdOut()
-
- # 32-bit linux has an issue with max open files. Rather than modifying
- # the system, we can tell LevelDB to take it easy with max files to open
- if OS_LINUX and not SystemSpecs.IsX64:
- LOGINFO('Lowering max-open-files parameter in LevelDB for 32-bit linux')
- TheBDM.setMaxOpenFiles(75)
-
- # Override the above if they explicitly specify it as CLI arg
- if CLI_OPTIONS.maxOpenFiles > 0:
- LOGINFO('Overriding max files via command-line arg')
- TheBDM.setMaxOpenFiles( CLI_OPTIONS.maxOpenFiles )
-
- #LOGINFO('LevelDB max-open-files is %d', TheBDM.getMaxOpenFiles())
-
- # Also load the might-be-needed SatoshiDaemonManager
- TheSDM = SatoshiDaemonManager()
-
-
-
-
-
-
-
-
-################################################################################
-#
-# Keep track of lots of different timers:
-#
-# Key: timerName
-# Value: [cumulTime, numStart, lastStart, isRunning]
-#
-TimerMap = {}
-
-def TimerStart(timerName):
- if not TimerMap.has_key(timerName):
- TimerMap[timerName] = [0, 0, 0, False]
-
- timerEntry = TimerMap[timerName]
- timerEntry[1] += 1
- timerEntry[2] = RightNow()
- timerEntry[3] = True
-
-def TimerStop(timerName):
- if not TimerMap.has_key(timerName):
- LOGWARN('Requested stop timer that does not exist! (%s)' % timerName)
- return
-
- if not TimerMap[timerName][3]:
- LOGWARN('Requested stop timer that is not running! (%s)' % timerName)
- return
-
- timerEntry = TimerMap[timerName]
- timerEntry[0] += RightNow() - timerEntry[2]
- timerEntry[2] = 0
- timerEntry[3] = False
-
-
-
-def TimerReset(timerName):
- if not TimerMap.has_key(timerName):
- LOGERROR('Requested reset timer that does not exist! (%s)' % timerName)
-
- # Even if it didn't exist, it will be created now
- TimerMap[timerName] = [0, 0, 0, False]
-
-
-def ReadTimer(timerName):
- if not TimerMap.has_key(timerName):
- LOGERROR('Requested read timer that does not exist! (%s)' % timerName)
- return
-
- timerEntry = TimerMap[timerName]
- return timerEntry[0] + (RightNow() - timerEntry[2])
-
-
-def PrintTimings():
- print 'Timings: '.ljust(30),
- print 'nCall'.rjust(13),
- print 'cumulTime'.rjust(13),
- print 'avgTime'.rjust(13)
- print '-'*70
- for tname,quad in TimerMap.iteritems():
- print ('%s' % tname).ljust(30),
- print ('%d' % quad[1]).rjust(13),
- print ('%0.6f' % quad[0]).rjust(13),
- avg = quad[0]/quad[1]
- print ('%0.6f' % avg).rjust(13)
- print '-'*70
-
-
-def SaveTimingsCSV(fname):
- f = open(fname, 'w')
- f.write( 'TimerName,')
- f.write( 'nCall,')
- f.write( 'cumulTime,')
- f.write( 'avgTime\n\n')
- for tname,quad in TimerMap.iteritems():
- f.write('%s,' % tname)
- f.write('%d,' % quad[1])
- f.write('%0.6f,' % quad[0])
- avg = quad[0]/quad[1]
- f.write('%0.6f\n' % avg)
- f.write('\n\nNote: timings may be incorrect if errors '
- 'were triggered in the timed functions')
- print 'Saved timings to file: %s' % fname
-
-
-
-
-
-
-################################################################################
-# I ORIGINALLY UPDATED THE TIMER TO USE collections MODULE, but it turns out
-# that module is not available on many python versions. So I'm reverting to
-# the older version of the timers ... will update to the version below...
-# at some point...
-#
-# Keep track of lots of different timers:
-#
-# Key: timerName
-# Value: [cumulTime, numStart, lastStart, isRunning]
-#
-
-"""
-import collections
-TimerMap = collections.OrderedDict()
-# Wanted to used namedtuple, but that would be immutable
-class TimerObj(object):
- def __init__(self):
- self.cumulTime = 0
- self.callCount = 0
- self.lastStart = 0
- self.isRunning = False
-
-
-################################################################################
-def TimerStart(timerName, nCall=1):
- if not TimerMap.has_key(timerName):
- TimerMap[timerName] = TimerObj()
-
- timerEntry = TimerMap[timerName]
- timerEntry.callCount += nCall
- timerEntry.lastStart = RightNow()
- timerEntry.isRunning = True
-
-################################################################################
-def TimerStop(timerName):
- if not TimerMap.has_key(timerName):
- LOGWARN('Requested stop timer that does not exist! (%s)' % timerName)
- return
-
- if not TimerMap[timerName].isRunning:
- LOGWARN('Requested stop timer that is not running! (%s)' % timerName)
- return
-
- timerEntry = TimerMap[timerName]
- timerEntry.cumulTime += RightNow() - timerEntry.lastStart
- timerEntry.lastStart = 0
- timerEntry.isRunning = False
-
-
-################################################################################
-def TimerReset(timerName):
- if not TimerMap.has_key(timerName):
- LOGERROR('Requested reset timer that does not exist! (%s)' % timerName)
-
- # Even if it didn't exist, it will be created now
- TimerMap[timerName] = TimerObj(0,0,0,False)
-
-
-################################################################################
-def ReadTimer(timerName):
- if not TimerMap.has_key(timerName):
- LOGERROR('Requested read timer that does not exist! (%s)' % timerName)
- return
-
- timerEntry = TimerMap[timerName]
- return timerEntry.cumulTime + (RightNow() - timerEntry.lastStart)
-
-
-def PrintTimings():
- print 'Timings: '.ljust(22),
- print 'nCall'.rjust(8),
- print 'cumulTime'.rjust(12),
- print 'avgTime'.rjust(12),
- print 'ops/sec'.rjust(12)
- print '-'*80
- for tname,tobj in TimerMap.iteritems():
- print ('%s' % tname).ljust(22),
- print ('%d' % tobj.callCount).rjust(8),
- print ('%0.6f' % tobj.cumulTime).rjust(12),
- avg = tobj.cumulTime/tobj.callCount
- ops = tobj.callCount/tobj.cumulTime
- print ('%0.6f' % avg).rjust(12),
- print ('%0.2f' % ops).rjust(12)
- print '-'*80
-
-
-##
-def SaveTimingsCSV(fname):
- f = open(fname, 'w')
- f.write( 'TimerName,')
- f.write( 'nCall,')
- f.write( 'cumulTime,')
- f.write( 'avgTime\n\n')
- for tname,quad in TimerMap.iteritems():
- f.write('"%s",' % tname)
- f.write('%d,' % quad.callCount)
- f.write('%0.6f,' % quad.cumulTime)
- avg = quad.cumulTime/quad.callCount
- f.write('%0.6f\n' % avg)
- f.write('\n\nNote: timings may be incorrect if errors '
- 'were triggered in the timed functions')
- print 'Saved timings to file: %s' % fname
-"""
-def EstimateCumulativeBlockchainSize(blkNum):
- # I tried to make a "static" variable here so that
- # the string wouldn't be parsed on every call, but
- # I botched that, somehow.
- #
- # It doesn't *have to* be fast, but why not?
- # Oh well..
- blksizefile = """
- 0 285
- 20160 4496226
- 40320 9329049
- 60480 16637208
- 80640 31572990
- 82656 33260320
- 84672 35330575
- 86688 36815335
- 88704 38386205
- 100800 60605119
- 102816 64795352
- 104832 68697265
- 108864 79339447
- 112896 92608525
- 116928 116560952
- 120960 140607929
- 124992 170059586
- 129024 217718109
- 133056 303977266
- 137088 405836779
- 141120 500934468
- 145152 593217668
- 149184 673064617
- 153216 745173386
- 157248 816675650
- 161280 886105443
- 165312 970660768
- 169344 1058290613
- 173376 1140721593
- 177408 1240616018
- 179424 1306862029
- 181440 1463634913
- 183456 1639027360
- 185472 1868851317
- 187488 2019397056
- 189504 2173291204
- 191520 2352873908
- 193536 2530862533
- 195552 2744361593
- 197568 2936684028
- 199584 3115432617
- 201600 3282437367
- 203616 3490737816
- 205632 3669806064
- 207648 3848901149
- 209664 4064972247
- 211680 4278148686
- 213696 4557787597
- 215712 4786120879
- 217728 5111707340
- 219744 5419128115
- 221760 5733907456
- 223776 6053668460
- 225792 6407870776
- 227808 6652067986
- 228534 6778529822
- 257568 10838081536
- 259542 11106516992
- """
- strList = [line.strip().split() for line in blksizefile.strip().split('\n')]
- BLK_SIZE_LIST = [[int(x[0]), int(x[1])] for x in strList]
-
- if blkNum < BLK_SIZE_LIST[-1][0]:
- # Interpolate
- bprev,bcurr = None, None
- for i,blkpair in enumerate(BLK_SIZE_LIST):
- if blkNum < blkpair[0]:
- b0,d0 = BLK_SIZE_LIST[i-1]
- b1,d1 = blkpair
- ratio = float(blkNum-b0)/float(b1-b0)
- return int(ratio*d1 + (1-ratio)*d0)
- raise ValueError, 'Interpolation failed for %d' % blkNum
-
- else:
- bend, dend = BLK_SIZE_LIST[-1]
- bend2, dend2 = BLK_SIZE_LIST[-3]
- rate = float(dend - dend2) / float(bend - bend2) # bytes per block
- extraOnTop = (blkNum - bend) * rate
- return dend+extraOnTop
-
-
-
-
-
-
-
-
-
diff --git a/armoryengine/ALL.py b/armoryengine/ALL.py
new file mode 100644
index 000000000..07523697f
--- /dev/null
+++ b/armoryengine/ALL.py
@@ -0,0 +1,11 @@
+from armoryengine.ArmoryUtils import *
+from armoryengine.BinaryUnpacker import *
+from armoryengine.BDM import *
+from armoryengine.CoinSelection import *
+from armoryengine.Networking import *
+from armoryengine.PyBtcWallet import *
+from armoryengine.Script import *
+from SDM import *
+from armoryengine.Timer import *
+from armoryengine.Transaction import *
+
diff --git a/armoryengine/ArmoryUtils.py b/armoryengine/ArmoryUtils.py
new file mode 100644
index 000000000..08eb169db
--- /dev/null
+++ b/armoryengine/ArmoryUtils.py
@@ -0,0 +1,3231 @@
+################################################################################
+#
+# Copyright (C) 2011-2014, Armory Technologies, Inc.
+# Distributed under the GNU Affero General Public License (AGPL v3)
+# See LICENSE or http://www.gnu.org/licenses/agpl.html
+#
+################################################################################
+#
+# Project: Armory
+# Author: Alan Reiner
+# Website: www.bitcoinarmory.com
+# Orig Date: 20 November, 2011
+#
+################################################################################
+import ast
+from datetime import datetime
+import hashlib
+import inspect
+import locale
+import logging
+import math
+import multiprocessing
+import optparse
+import os
+import platform
+import random
+import signal
+from struct import pack, unpack
+#from subprocess import PIPE
+import sys
+import threading
+import time
+import traceback
+import shutil
+
+#from psutil import Popen
+import psutil
+
+from CppBlockUtils import KdfRomix, CryptoAES
+from qrcodenative import QRCode, QRErrorCorrectLevel
+
+
+# Version Numbers
+BTCARMORY_VERSION = (0, 91, 1, 0) # (Major, Minor, Bugfix, AutoIncrement)
+PYBTCWALLET_VERSION = (1, 35, 0, 0) # (Major, Minor, Bugfix, AutoIncrement)
+
+ARMORY_DONATION_ADDR = '1ArmoryXcfq7TnCSuZa9fQjRYwJ4bkRKfv'
+ARMORY_DONATION_PUBKEY = ( '04'
+ '11d14f8498d11c33d08b0cd7b312fb2e6fc9aebd479f8e9ab62b5333b2c395c5'
+ 'f7437cab5633b5894c4a5c2132716bc36b7571cbe492a7222442b75df75b9a84')
+ARMORY_INFO_SIGN_ADDR = '1NWvhByxfTXPYNT4zMBmEY3VL8QJQtQoei'
+ARMORY_INFO_SIGN_PUBLICKEY = ('04'
+ 'af4abc4b24ef57547dd13a1110e331645f2ad2b99dfe1189abb40a5b24e4ebd8'
+ 'de0c1c372cc46bbee0ce3d1d49312e416a1fa9c7bb3e32a7eb3867d1c6d1f715')
+SATOSHI_PUBLIC_KEY = ( '04'
+ 'fc9702847840aaf195de8442ebecedf5b095cdbb9bc716bda9110971b28a49e0'
+ 'ead8564ff0db22209e0374782c093bb899692d524e9d6a6956e7c5ecbcd68284')
+
+
+indent = ' '*3
+haveGUI = [False, None]
+
+parser = optparse.OptionParser(usage="%prog [options]\n")
+parser.add_option("--settings", dest="settingsPath",default='DEFAULT', type="str", help="load Armory with a specific settings file")
+parser.add_option("--datadir", dest="datadir", default='DEFAULT', type="str", help="Change the directory that Armory calls home")
+parser.add_option("--satoshi-datadir", dest="satoshiHome", default='DEFAULT', type='str', help="The Bitcoin-Qt/bitcoind home directory")
+parser.add_option("--satoshi-port", dest="satoshiPort", default='DEFAULT', type="str", help="For Bitcoin-Qt instances operating on a non-standard port")
+parser.add_option("--satoshi-rpcport", dest="satoshiRpcport",default='DEFAULT',type="str", help="RPC port Bitcoin-Qt instances operating on a non-standard port")
+#parser.add_option("--bitcoind-path", dest="bitcoindPath",default='DEFAULT', type="str", help="Path to the location of bitcoind on your system")
+parser.add_option("--dbdir", dest="leveldbDir", default='DEFAULT', type='str', help="Location to store blocks database (defaults to --datadir)")
+parser.add_option("--rpcport", dest="rpcport", default='DEFAULT', type="str", help="RPC port for running armoryd.py")
+parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the testnet protocol")
+parser.add_option("--offline", dest="offline", default=False, action="store_true", help="Force Armory to run in offline mode")
+parser.add_option("--nettimeout", dest="nettimeout", default=2, type="int", help="Timeout for detecting internet connection at startup")
+parser.add_option("--interport", dest="interport", default=-1, type="int", help="Port for inter-process communication between Armory instances")
+parser.add_option("--debug", dest="doDebug", default=False, action="store_true", help="Increase amount of debugging output")
+parser.add_option("--nologging", dest="logDisable", default=False, action="store_true", help="Disable all logging")
+parser.add_option("--netlog", dest="netlog", default=False, action="store_true", help="Log networking messages sent and received by Armory")
+parser.add_option("--logfile", dest="logFile", default='DEFAULT', type='str', help="Specify a non-default location to send logging information")
+parser.add_option("--mtdebug", dest="mtdebug", default=False, action="store_true", help="Log multi-threaded call sequences")
+parser.add_option("--skip-online-check", dest="forceOnline", default=False, action="store_true", help="Go into online mode, even if internet connection isn't detected")
+parser.add_option("--skip-version-check", dest="skipVerCheck", default=False, action="store_true", help="Do not contact bitcoinarmory.com to check for new versions")
+parser.add_option("--skip-announce-check", dest="skipAnnounceCheck", default=False, action="store_true", help="Do not query for Armory announcements")
+parser.add_option("--keypool", dest="keypool", default=100, type="int", help="Default number of addresses to lookahead in Armory wallets")
+parser.add_option("--redownload", dest="redownload", default=False, action="store_true", help="Delete Bitcoin-Qt/bitcoind databases; redownload")
+parser.add_option("--rebuild", dest="rebuild", default=False, action="store_true", help="Rebuild blockchain database and rescan")
+parser.add_option("--rescan", dest="rescan", default=False, action="store_true", help="Rescan existing blockchain DB")
+parser.add_option("--maxfiles", dest="maxOpenFiles",default=0, type="int", help="Set maximum allowed open files for LevelDB databases")
+parser.add_option("--disable-torrent", dest="disableTorrent", default=False, action="store_true", help="Only download blockchain data via P2P network (slow)")
+parser.add_option("--test-announce", dest="testAnnounceCode", default=False, action="store_true", help="Only used for developers needing to test announcement code with non-offline keys")
+#parser.add_option("--rebuildwithblocksize", dest="newBlockSize",default='32kB', type="str", help="Rebuild databases with new blocksize")
+parser.add_option("--nospendzeroconfchange",dest="ignoreAllZC",default=False, action="store_true", help="All zero-conf funds will be unspendable, including sent-to-self coins")
+parser.add_option("--force-wallet-check", dest="forceWalletCheck", default=False, action="store_true", help="Force the wallet sanity check on startup")
+
+# Pre-10.9 OS X sometimes passes a process serial number as -psn_0_xxxxxx. Nuke!
+if sys.platform == 'darwin':
+ parser.add_option('-p', '--psn')
+
+# These are arguments passed by running unit-tests that need to be handled
+parser.add_option("--port", dest="port", default=None, type="int", help="Unit Test Argument - Do not consume")
+parser.add_option("--verbosity", dest="verbosity", default=None, type="int", help="Unit Test Argument - Do not consume")
+parser.add_option("--coverage_output_dir", dest="coverageOutputDir", default=None, type="str", help="Unit Test Argument - Do not consume")
+parser.add_option("--coverage_include", dest="coverageInclude", default=None, type="str", help="Unit Test Argument - Do not consume")
+
+
+
+# Some useful constants to be used throughout everything
+BASE58CHARS = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
+BASE16CHARS = '0123 4567 89ab cdef'.replace(' ','')
+LITTLEENDIAN = '<';
+BIGENDIAN = '>';
+NETWORKENDIAN = '!';
+ONE_BTC = long(100000000)
+DONATION = long(5000000)
+CENT = long(1000000)
+UNINITIALIZED = None
+UNKNOWN = -2
+MIN_TX_FEE = 10000
+MIN_RELAY_TX_FEE = 10000
+MT_WAIT_TIMEOUT_SEC = 20;
+
+UINT8_MAX = 2**8-1
+UINT16_MAX = 2**16-1
+UINT32_MAX = 2**32-1
+UINT64_MAX = 2**64-1
+
+RightNow = time.time
+SECOND = 1
+MINUTE = 60
+HOUR = 3600
+DAY = 24*HOUR
+WEEK = 7*DAY
+MONTH = 30*DAY
+YEAR = 365*DAY
+
+KILOBYTE = 1024.0
+MEGABYTE = 1024*KILOBYTE
+GIGABYTE = 1024*MEGABYTE
+TERABYTE = 1024*GIGABYTE
+PETABYTE = 1024*TERABYTE
+
+# Set the default-default
+DEFAULT_DATE_FORMAT = '%Y-%b-%d %I:%M%p'
+FORMAT_SYMBOLS = [ \
+ ['%y', 'year, two digit (00-99)'], \
+ ['%Y', 'year, four digit'], \
+ ['%b', 'month name (abbrev)'], \
+ ['%B', 'month name (full)'], \
+ ['%m', 'month number (01-12)'], \
+ ['%d', 'day of month (01-31)'], \
+ ['%H', 'hour 24h (00-23)'], \
+ ['%I', 'hour 12h (01-12)'], \
+ ['%M', 'minute (00-59)'], \
+ ['%p', 'morning/night (am,pm)'], \
+ ['%a', 'day of week (abbrev)'], \
+ ['%A', 'day of week (full)'], \
+ ['%%', 'percent symbol'] ]
+
+
+class UnserializeError(Exception): pass
+class BadAddressError(Exception): pass
+class VerifyScriptError(Exception): pass
+class FileExistsError(Exception): pass
+class ECDSA_Error(Exception): pass
+class UnitializedBlockDataError(Exception): pass
+class WalletLockError(Exception): pass
+class SignatureError(Exception): pass
+class KeyDataError(Exception): pass
+class ChecksumError(Exception): pass
+class WalletAddressError(Exception): pass
+class PassphraseError(Exception): pass
+class EncryptionError(Exception): pass
+class InterruptTestError(Exception): pass
+class NetworkIDError(Exception): pass
+class WalletExistsError(Exception): pass
+class ConnectionError(Exception): pass
+class BlockchainUnavailableError(Exception): pass
+class InvalidHashError(Exception): pass
+class InvalidScriptError(Exception): pass
+class BadURIError(Exception): pass
+class CompressedKeyError(Exception): pass
+class TooMuchPrecisionError(Exception): pass
+class NegativeValueError(Exception): pass
+class FiniteFieldError(Exception): pass
+class BitcoindError(Exception): pass
+class ShouldNotGetHereError(Exception): pass
+class BadInputError(Exception): pass
+class TxdpError(Exception): pass
+class P2SHNotSupportedError(Exception): pass
+
+# Get the host operating system
+opsys = platform.system()
+OS_WINDOWS = 'win32' in opsys.lower() or 'windows' in opsys.lower()
+OS_LINUX = 'nix' in opsys.lower() or 'nux' in opsys.lower()
+OS_MACOSX = 'darwin' in opsys.lower() or 'osx' in opsys.lower()
+
+
+if getattr(sys, 'frozen', False):
+ sys.argv = [arg.decode('utf8') for arg in sys.argv]
+
+CLI_OPTIONS = None
+CLI_ARGS = None
+(CLI_OPTIONS, CLI_ARGS) = parser.parse_args()
+
+
+# This is probably an abuse of the CLI_OPTIONS structure, but not
+# automatically expanding "~" symbols is killing me
+for opt,val in CLI_OPTIONS.__dict__.iteritems():
+ if not isinstance(val, basestring) or not val.startswith('~'):
+ continue
+
+ if os.path.exists(os.path.expanduser(val)):
+ CLI_OPTIONS.__dict__[opt] = os.path.expanduser(val)
+ else:
+ # If the path doesn't exist, it still won't exist when we don't
+ # modify it, and I'd like to modify as few vars as possible
+ pass
+
+
+# Use CLI args to determine testnet or not
+USE_TESTNET = CLI_OPTIONS.testnet
+
+# Set default port for inter-process communication
+if CLI_OPTIONS.interport < 0:
+ CLI_OPTIONS.interport = 8223 + (1 if USE_TESTNET else 0)
+
+
+# Pass this bool to all getSpendable* methods, and it will consider
+# all zero-conf UTXOs as unspendable, including sent-to-self (change)
+IGNOREZC = CLI_OPTIONS.ignoreAllZC
+
+
+# Figure out the default directories for Satoshi client, and BicoinArmory
+OS_NAME = ''
+OS_VARIANT = ''
+USER_HOME_DIR = ''
+BTC_HOME_DIR = ''
+ARMORY_HOME_DIR = ''
+LEVELDB_DIR = ''
+SUBDIR = 'testnet3' if USE_TESTNET else ''
+if OS_WINDOWS:
+ OS_NAME = 'Windows'
+ OS_VARIANT = platform.win32_ver()
+ USER_HOME_DIR = os.getenv('APPDATA')
+ BTC_HOME_DIR = os.path.join(USER_HOME_DIR, 'Bitcoin', SUBDIR)
+ ARMORY_HOME_DIR = os.path.join(USER_HOME_DIR, 'Armory', SUBDIR)
+ BLKFILE_DIR = os.path.join(BTC_HOME_DIR, 'blocks')
+ BLKFILE_1stFILE = os.path.join(BLKFILE_DIR, 'blk00000.dat')
+elif OS_LINUX:
+ OS_NAME = 'Linux'
+ OS_VARIANT = platform.linux_distribution()
+ USER_HOME_DIR = os.getenv('HOME')
+ BTC_HOME_DIR = os.path.join(USER_HOME_DIR, '.bitcoin', SUBDIR)
+ ARMORY_HOME_DIR = os.path.join(USER_HOME_DIR, '.armory', SUBDIR)
+ BLKFILE_DIR = os.path.join(BTC_HOME_DIR, 'blocks')
+ BLKFILE_1stFILE = os.path.join(BLKFILE_DIR, 'blk00000.dat')
+elif OS_MACOSX:
+ platform.mac_ver()
+ OS_NAME = 'MacOSX'
+ OS_VARIANT = platform.mac_ver()
+ USER_HOME_DIR = os.path.expanduser('~/Library/Application Support')
+ BTC_HOME_DIR = os.path.join(USER_HOME_DIR, 'Bitcoin', SUBDIR)
+ ARMORY_HOME_DIR = os.path.join(USER_HOME_DIR, 'Armory', SUBDIR)
+ BLKFILE_DIR = os.path.join(BTC_HOME_DIR, 'blocks')
+ BLKFILE_1stFILE = os.path.join(BLKFILE_DIR, 'blk00000.dat')
+else:
+ print '***Unknown operating system!'
+ print '***Cannot determine default directory locations'
+
+
+
+
+# Get the host operating system
+opsys = platform.system()
+OS_WINDOWS = 'win32' in opsys.lower() or 'windows' in opsys.lower()
+OS_LINUX = 'nix' in opsys.lower() or 'nux' in opsys.lower()
+OS_MACOSX = 'darwin' in opsys.lower() or 'osx' in opsys.lower()
+
+BLOCKCHAINS = {}
+BLOCKCHAINS['\xf9\xbe\xb4\xd9'] = "Main Network"
+BLOCKCHAINS['\xfa\xbf\xb5\xda'] = "Old Test Network"
+BLOCKCHAINS['\x0b\x11\x09\x07'] = "Test Network (testnet3)"
+
+NETWORKS = {}
+NETWORKS['\x00'] = "Main Network"
+NETWORKS['\x05'] = "Main Network"
+NETWORKS['\x6f'] = "Test Network"
+NETWORKS['\xc4'] = "Test Network"
+NETWORKS['\x34'] = "Namecoin Network"
+
+
+# We disable wallet checks on ARM for the sake of resources (unless forced)
+DO_WALLET_CHECK = CLI_OPTIONS.forceWalletCheck or \
+ not platform.machine().lower().startswith('arm')
+
+# Version Handling Code
+def getVersionString(vquad, numPieces=4):
+ vstr = '%d.%02d' % vquad[:2]
+ if (vquad[2] > 0 or vquad[3] > 0) and numPieces>2:
+ vstr += '.%d' % vquad[2]
+ if vquad[3] > 0 and numPieces>3:
+ vstr += '.%d' % vquad[3]
+ return vstr
+
+def getVersionInt(vquad, numPieces=4):
+ vint = int(vquad[0] * 1e7)
+ vint += int(vquad[1] * 1e5)
+ if numPieces>2:
+ vint += int(vquad[2] * 1e3)
+ if numPieces>3:
+ vint += int(vquad[3])
+ return vint
+
+def readVersionString(verStr):
+ verList = [int(piece) for piece in verStr.split('.')]
+ while len(verList)<4:
+ verList.append(0)
+ return tuple(verList)
+
+def readVersionInt(verInt):
+ verStr = str(verInt).rjust(10,'0')
+ verList = []
+ verList.append( int(verStr[ -3:]) )
+ verList.append( int(verStr[ -5:-3 ]) )
+ verList.append( int(verStr[ -7:-5 ]) )
+ verList.append( int(verStr[:-7 ]) )
+ return tuple(verList[::-1])
+# Allow user to override default bitcoin-qt/bitcoind home directory
+if not CLI_OPTIONS.satoshiHome.lower()=='default':
+ success = True
+ if USE_TESTNET:
+ testnetTry = os.path.join(CLI_OPTIONS.satoshiHome, 'testnet3')
+ if os.path.exists(testnetTry):
+ CLI_OPTIONS.satoshiHome = testnetTry
+
+ if not os.path.exists(CLI_OPTIONS.satoshiHome):
+ print 'Directory "%s" does not exist! Using default!' % \
+ CLI_OPTIONS.satoshiHome
+ else:
+ BTC_HOME_DIR = CLI_OPTIONS.satoshiHome
+
+
+
+
+
+# Allow user to override default Armory home directory
+if not CLI_OPTIONS.datadir.lower()=='default':
+ if not os.path.exists(CLI_OPTIONS.datadir):
+ print 'Directory "%s" does not exist! Using default!' % \
+ CLI_OPTIONS.datadir
+ else:
+ ARMORY_HOME_DIR = CLI_OPTIONS.datadir
+
+# Same for the directory that holds the LevelDB databases
+LEVELDB_DIR = os.path.join(ARMORY_HOME_DIR, 'databases')
+
+if not CLI_OPTIONS.leveldbDir.lower()=='default':
+ if not os.path.exists(CLI_OPTIONS.leveldbDir):
+ print 'Directory "%s" does not exist! Using default!' % \
+ CLI_OPTIONS.leveldbDir
+ os.makedirs(CLI_OPTIONS.leveldbDir)
+ else:
+ LEVELDB_DIR = CLI_OPTIONS.leveldbDir
+
+
+# Change the log file to use
+ARMORY_LOG_FILE = os.path.join(ARMORY_HOME_DIR, 'armorylog.txt')
+ARMCPP_LOG_FILE = os.path.join(ARMORY_HOME_DIR, 'armorycpplog.txt')
+if not sys.argv[0] in ['ArmoryQt.py', 'ArmoryQt.exe', 'Armory.exe']:
+ basename = os.path.basename(sys.argv[0])
+ CLI_OPTIONS.logFile = os.path.join(ARMORY_HOME_DIR, '%s.log.txt' % basename)
+
+
+# Change the settings file to use
+if CLI_OPTIONS.settingsPath.lower()=='default':
+ CLI_OPTIONS.settingsPath = os.path.join(ARMORY_HOME_DIR, 'ArmorySettings.txt')
+
+# Change the log file to use
+if CLI_OPTIONS.logFile.lower()=='default':
+ if sys.argv[0] in ['ArmoryQt.py', 'ArmoryQt.exe', 'Armory.exe']:
+ CLI_OPTIONS.logFile = os.path.join(ARMORY_HOME_DIR, 'armorylog.txt')
+ else:
+ basename = os.path.basename(sys.argv[0])
+ CLI_OPTIONS.logFile = os.path.join(ARMORY_HOME_DIR, '%s.log.txt' % basename)
+
+
+SETTINGS_PATH = CLI_OPTIONS.settingsPath
+MULT_LOG_FILE = os.path.join(ARMORY_HOME_DIR, 'multipliers.txt')
+
+
+# If this is the first Armory has been run, create directories
+if ARMORY_HOME_DIR and not os.path.exists(ARMORY_HOME_DIR):
+ os.makedirs(ARMORY_HOME_DIR)
+
+
+if not os.path.exists(LEVELDB_DIR):
+ os.makedirs(LEVELDB_DIR)
+
+##### MAIN NETWORK IS DEFAULT #####
+if not USE_TESTNET:
+ # TODO: The testnet genesis tx hash can't be the same...?
+ BITCOIN_PORT = 8333
+ BITCOIN_RPC_PORT = 8332
+ ARMORY_RPC_PORT = 8225
+ MAGIC_BYTES = '\xf9\xbe\xb4\xd9'
+ GENESIS_BLOCK_HASH_HEX = '6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000'
+ GENESIS_BLOCK_HASH = 'o\xe2\x8c\n\xb6\xf1\xb3r\xc1\xa6\xa2F\xaec\xf7O\x93\x1e\x83e\xe1Z\x08\x9ch\xd6\x19\x00\x00\x00\x00\x00'
+ GENESIS_TX_HASH_HEX = '3ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a'
+ GENESIS_TX_HASH = ';\xa3\xed\xfdz{\x12\xb2z\xc7,>gv\x8fa\x7f\xc8\x1b\xc3\x88\x8aQ2:\x9f\xb8\xaaK\x1e^J'
+ ADDRBYTE = '\x00'
+ P2SHBYTE = '\x05'
+ PRIVKEYBYTE = '\x80'
+else:
+ BITCOIN_PORT = 18333
+ BITCOIN_RPC_PORT = 18332
+ ARMORY_RPC_PORT = 18225
+ MAGIC_BYTES = '\x0b\x11\x09\x07'
+ GENESIS_BLOCK_HASH_HEX = '43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea330900000000'
+ GENESIS_BLOCK_HASH = 'CI\x7f\xd7\xf8&\x95q\x08\xf4\xa3\x0f\xd9\xce\xc3\xae\xbay\x97 \x84\xe9\x0e\xad\x01\xea3\t\x00\x00\x00\x00'
+ GENESIS_TX_HASH_HEX = '3ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a'
+ GENESIS_TX_HASH = ';\xa3\xed\xfdz{\x12\xb2z\xc7,>gv\x8fa\x7f\xc8\x1b\xc3\x88\x8aQ2:\x9f\xb8\xaaK\x1e^J'
+ ADDRBYTE = '\x6f'
+ P2SHBYTE = '\xc4'
+ PRIVKEYBYTE = '\xef'
+
+# These are the same regardless of network
+# They are the way data is stored in the database which is network agnostic
+SCRADDR_P2PKH_BYTE = '\x00'
+SCRADDR_P2SH_BYTE = '\x05'
+SCRADDR_MULTISIG_BYTE = '\xfe'
+SCRADDR_NONSTD_BYTE = '\xff'
+SCRADDR_BYTE_LIST = [SCRADDR_P2PKH_BYTE, \
+ SCRADDR_P2SH_BYTE, \
+ SCRADDR_MULTISIG_BYTE, \
+ SCRADDR_NONSTD_BYTE]
+
+# Copied from cppForSwig/BtcUtils.h::getTxOutScriptTypeInt(script)
+CPP_TXOUT_STDHASH160 = 0
+CPP_TXOUT_STDPUBKEY65 = 1
+CPP_TXOUT_STDPUBKEY33 = 2
+CPP_TXOUT_MULTISIG = 3
+CPP_TXOUT_P2SH = 4
+CPP_TXOUT_NONSTANDARD = 5
+CPP_TXOUT_HAS_ADDRSTR = [CPP_TXOUT_STDHASH160, \
+ CPP_TXOUT_STDPUBKEY65,
+ CPP_TXOUT_STDPUBKEY33,
+ CPP_TXOUT_P2SH]
+CPP_TXOUT_STDSINGLESIG = [CPP_TXOUT_STDHASH160, \
+ CPP_TXOUT_STDPUBKEY65,
+ CPP_TXOUT_STDPUBKEY33]
+
+CPP_TXOUT_SCRIPT_NAMES = ['']*6
+CPP_TXOUT_SCRIPT_NAMES[CPP_TXOUT_STDHASH160] = 'Standard (PKH)'
+CPP_TXOUT_SCRIPT_NAMES[CPP_TXOUT_STDPUBKEY65] = 'Standard (PK65)'
+CPP_TXOUT_SCRIPT_NAMES[CPP_TXOUT_STDPUBKEY33] = 'Standard (PK33)'
+CPP_TXOUT_SCRIPT_NAMES[CPP_TXOUT_MULTISIG] = 'Multi-Signature'
+CPP_TXOUT_SCRIPT_NAMES[CPP_TXOUT_P2SH] = 'Standard (P2SH)'
+CPP_TXOUT_SCRIPT_NAMES[CPP_TXOUT_NONSTANDARD] = 'Non-Standard'
+
+# Copied from cppForSwig/BtcUtils.h::getTxInScriptTypeInt(script)
+CPP_TXIN_STDUNCOMPR = 0
+CPP_TXIN_STDCOMPR = 1
+CPP_TXIN_COINBASE = 2
+CPP_TXIN_SPENDPUBKEY = 3
+CPP_TXIN_SPENDMULTI = 4
+CPP_TXIN_SPENDP2SH = 5
+CPP_TXIN_NONSTANDARD = 6
+
+CPP_TXIN_SCRIPT_NAMES = ['']*7
+CPP_TXIN_SCRIPT_NAMES[CPP_TXIN_STDUNCOMPR] = 'Sig + PubKey65'
+CPP_TXIN_SCRIPT_NAMES[CPP_TXIN_STDCOMPR] = 'Sig + PubKey33'
+CPP_TXIN_SCRIPT_NAMES[CPP_TXIN_COINBASE] = 'Coinbase'
+CPP_TXIN_SCRIPT_NAMES[CPP_TXIN_SPENDPUBKEY] = 'Plain Signature'
+CPP_TXIN_SCRIPT_NAMES[CPP_TXIN_SPENDMULTI] = 'Spend Multisig'
+CPP_TXIN_SCRIPT_NAMES[CPP_TXIN_SPENDP2SH] = 'Spend P2SH'
+CPP_TXIN_SCRIPT_NAMES[CPP_TXIN_NONSTANDARD] = 'Non-Standard'
+
+
+################################################################################
+if not CLI_OPTIONS.satoshiPort == 'DEFAULT':
+ try:
+ BITCOIN_PORT = int(CLI_OPTIONS.satoshiPort)
+ except:
+ raise TypeError('Invalid port for Bitcoin-Qt, using ' + str(BITCOIN_PORT))
+
+if not CLI_OPTIONS.satoshiRpcport == 'DEFAULT':
+ try:
+ BITCOIN_RPC_PORT = int(CLI_OPTIONS.satoshiRpcport)
+ except:
+ raise TypeError('Invalid rpc port for Bitcoin-Qt, using ' + str(BITCOIN_RPC_PORT))
+
+if not CLI_OPTIONS.rpcport == 'DEFAULT':
+ try:
+ ARMORY_RPC_PORT = int(CLI_OPTIONS.rpcport)
+ except:
+ raise TypeError('Invalid RPC port for armoryd ' + str(ARMORY_RPC_PORT))
+
+
+
+if sys.argv[0]=='ArmoryQt.py':
+ print '********************************************************************************'
+ print 'Loading Armory Engine:'
+ print ' Armory Version: ', getVersionString(BTCARMORY_VERSION)
+ print ' PyBtcWallet Version:', getVersionString(PYBTCWALLET_VERSION)
+ print 'Detected Operating system:', OS_NAME
+ print ' OS Variant :', OS_VARIANT
+ print ' User home-directory :', USER_HOME_DIR
+ print ' Satoshi BTC directory :', BTC_HOME_DIR
+ print ' Armory home dir :', ARMORY_HOME_DIR
+ print ' LevelDB directory :', LEVELDB_DIR
+ print ' Armory settings file :', SETTINGS_PATH
+ print ' Armory log file :', ARMORY_LOG_FILE
+ print ' Do wallet checking :', DO_WALLET_CHECK
+
+
+
+################################################################################
+def launchProcess(cmd, useStartInfo=True, *args, **kwargs):
+ LOGINFO('Executing popen: %s', str(cmd))
+ if not OS_WINDOWS:
+ from subprocess import Popen, PIPE
+ return Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, *args, **kwargs)
+ else:
+ from subprocess_win import Popen, PIPE, STARTUPINFO, STARTF_USESHOWWINDOW
+
+ if useStartInfo:
+ startinfo = STARTUPINFO()
+ startinfo.dwFlags |= STARTF_USESHOWWINDOW
+ return Popen(cmd, \
+ *args, \
+ stdin=PIPE, \
+ stdout=PIPE, \
+ stderr=PIPE, \
+ startupinfo=startinfo, \
+ **kwargs)
+ else:
+ return Popen(cmd, \
+ *args, \
+ stdin=PIPE, \
+ stdout=PIPE, \
+ stderr=PIPE, \
+ **kwargs)
+
+
+################################################################################
+def killProcess(pid, sig='default'):
+ # I had to do this, because killing a process in Windows has issues
+ # when using py2exe (yes, os.kill does not work, for the same reason
+ # I had to pass stdin/stdout/stderr everywhere...
+ LOGWARN('Killing process pid=%d', pid)
+ if not OS_WINDOWS:
+ import os
+ sig = signal.SIGKILL if sig=='default' else sig
+ os.kill(pid, sig)
+ else:
+ import sys, os.path, ctypes, ctypes.wintypes
+ k32 = ctypes.WinDLL('kernel32.dll')
+ k32.OpenProcess.restype = ctypes.wintypes.HANDLE
+ k32.TerminateProcess.restype = ctypes.wintypes.BOOL
+ hProcess = k32.OpenProcess(1, False, pid)
+ k32.TerminateProcess(hProcess, 1)
+ k32.CloseHandle(hProcess)
+
+
+
+################################################################################
+def subprocess_check_output(*popenargs, **kwargs):
+ """
+ Run command with arguments and return its output as a byte string.
+ Backported from Python 2.7, because it's stupid useful, short, and
+ won't exist on systems using Python 2.6 or earlier
+ """
+ from subprocess import CalledProcessError
+ process = launchProcess(*popenargs, **kwargs)
+ output, unused_err = process.communicate()
+ retcode = process.poll()
+ if retcode:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ error = CalledProcessError(retcode, cmd)
+ error.output = output
+ raise error
+ return output
+
+
+################################################################################
+def killProcessTree(pid):
+ # In this case, Windows is easier because we know it has the get_children
+ # call, because have bundled a recent version of psutil. Linux, however,
+ # does not have that function call in earlier versions.
+ from subprocess import Popen, PIPE
+ if not OS_LINUX:
+ for child in psutil.Process(pid).get_children():
+ killProcess(child.pid)
+ else:
+ proc = Popen("ps -o pid --ppid %d --noheaders" % pid, shell=True, stdout=PIPE)
+ out,err = proc.communicate()
+ for pid_str in out.split("\n")[:-1]:
+ killProcess(int(pid_str))
+
+
+################################################################################
+# Similar to subprocess_check_output, but used for long-running commands
+def execAndWait(cli_str, timeout=0, useStartInfo=True):
+ """
+ There may actually still be references to this function where check_output
+ would've been more appropriate. But I didn't know about check_output at
+ the time...
+ """
+
+ process = launchProcess(cli_str, shell=True, useStartInfo=useStartInfo)
+ pid = process.pid
+ start = RightNow()
+ while process.poll() == None:
+ time.sleep(0.1)
+ if timeout>0 and (RightNow() - start)>timeout:
+ print 'Process exceeded timeout, killing it'
+ killProcess(pid)
+ out,err = process.communicate()
+ return [out,err]
+
+
+
+
+######### INITIALIZE LOGGING UTILITIES ##########
+#
+# Setup logging to write INFO+ to file, and WARNING+ to console
+# In debug mode, will write DEBUG+ to file and INFO+ to console
+#
+
+# Want to get the line in which an error was triggered, but by wrapping
+# the logger function (as I will below), the displayed "file:linenum"
+# references the logger function, not the function that called it.
+# So I use traceback to find the file and line number two up in the
+# stack trace, and return that to be displayed instead of default
+# [Is this a hack? Yes and no. I see no other way to do this]
+def getCallerLine():
+ stkTwoUp = traceback.extract_stack()[-3]
+ filename,method = stkTwoUp[0], stkTwoUp[1]
+ return '%s:%d' % (os.path.basename(filename),method)
+
+# When there's an error in the logging function, it's impossible to find!
+# These wrappers will print the full stack so that it's possible to find
+# which line triggered the error
+def LOGDEBUG(msg, *a):
+ try:
+ logstr = msg if len(a)==0 else (msg%a)
+ callerStr = getCallerLine() + ' - '
+ logging.debug(callerStr + logstr)
+ except TypeError:
+ traceback.print_stack()
+ raise
+
+def LOGINFO(msg, *a):
+ try:
+ logstr = msg if len(a)==0 else (msg%a)
+ callerStr = getCallerLine() + ' - '
+ logging.info(callerStr + logstr)
+ except TypeError:
+ traceback.print_stack()
+ raise
+def LOGWARN(msg, *a):
+ try:
+ logstr = msg if len(a)==0 else (msg%a)
+ callerStr = getCallerLine() + ' - '
+ logging.warn(callerStr + logstr)
+ except TypeError:
+ traceback.print_stack()
+ raise
+def LOGERROR(msg, *a):
+ try:
+ logstr = msg if len(a)==0 else (msg%a)
+ callerStr = getCallerLine() + ' - '
+ logging.error(callerStr + logstr)
+ except TypeError:
+ traceback.print_stack()
+ raise
+def LOGCRIT(msg, *a):
+ try:
+ logstr = msg if len(a)==0 else (msg%a)
+ callerStr = getCallerLine() + ' - '
+ logging.critical(callerStr + logstr)
+ except TypeError:
+ traceback.print_stack()
+ raise
+def LOGEXCEPT(msg, *a):
+ try:
+ logstr = msg if len(a)==0 else (msg%a)
+ callerStr = getCallerLine() + ' - '
+ logging.exception(callerStr + logstr)
+ except TypeError:
+ traceback.print_stack()
+ raise
+
+
+
+DEFAULT_CONSOLE_LOGTHRESH = logging.WARNING
+DEFAULT_FILE_LOGTHRESH = logging.INFO
+
+DEFAULT_PPRINT_LOGLEVEL = logging.DEBUG
+DEFAULT_RAWDATA_LOGLEVEL = logging.DEBUG
+
+rootLogger = logging.getLogger('')
+if CLI_OPTIONS.doDebug or CLI_OPTIONS.netlog or CLI_OPTIONS.mtdebug:
+ # Drop it all one level: console will see INFO, file will see DEBUG
+ DEFAULT_CONSOLE_LOGTHRESH -= 20
+ DEFAULT_FILE_LOGTHRESH -= 20
+
+
+def chopLogFile(filename, size):
+ if not os.path.exists(filename):
+ print 'Log file doesn\'t exist [yet]'
+ return
+
+ logfile = open(filename, 'r')
+ allLines = logfile.readlines()
+ logfile.close()
+
+ nBytes,nLines = 0,0;
+ for line in allLines[::-1]:
+ nBytes += len(line)
+ nLines += 1
+ if nBytes>size:
+ break
+
+ logfile = open(filename, 'w')
+ for line in allLines[-nLines:]:
+ logfile.write(line)
+ logfile.close()
+
+
+# Cut down the log file to just the most recent 1 MB
+chopLogFile(ARMORY_LOG_FILE, 1024*1024)
+
+
+# Now set loglevels
+DateFormat = '%Y-%m-%d %H:%M'
+logging.getLogger('').setLevel(logging.DEBUG)
+fileFormatter = logging.Formatter('%(asctime)s (%(levelname)s) -- %(message)s', \
+ datefmt=DateFormat)
+fileHandler = logging.FileHandler(ARMORY_LOG_FILE)
+fileHandler.setLevel(DEFAULT_FILE_LOGTHRESH)
+fileHandler.setFormatter(fileFormatter)
+logging.getLogger('').addHandler(fileHandler)
+
+consoleFormatter = logging.Formatter('(%(levelname)s) %(message)s')
+consoleHandler = logging.StreamHandler()
+consoleHandler.setLevel(DEFAULT_CONSOLE_LOGTHRESH)
+consoleHandler.setFormatter( consoleFormatter )
+logging.getLogger('').addHandler(consoleHandler)
+
+
+
+class stringAggregator(object):
+ def __init__(self):
+ self.theStr = ''
+ def getStr(self):
+ return self.theStr
+ def write(self, theStr):
+ self.theStr += theStr
+
+
+# A method to redirect pprint() calls to the log file
+# Need a way to take a pprint-able object, and redirect its output to file
+# Do this by swapping out sys.stdout temporarily, execute theObj.pprint()
+# then set sys.stdout back to the original.
+def LOGPPRINT(theObj, loglevel=DEFAULT_PPRINT_LOGLEVEL):
+ sys.stdout = stringAggregator()
+ theObj.pprint()
+ printedStr = sys.stdout.getStr()
+ sys.stdout = sys.__stdout__
+ stkOneUp = traceback.extract_stack()[-2]
+ filename,method = stkOneUp[0], stkOneUp[1]
+ methodStr = '(PPRINT from %s:%d)\n' % (filename,method)
+ logging.log(loglevel, methodStr + printedStr)
+
+# For super-debug mode, we'll write out raw data
+def LOGRAWDATA(rawStr, loglevel=DEFAULT_RAWDATA_LOGLEVEL):
+ dtype = isLikelyDataType(rawStr)
+ stkOneUp = traceback.extract_stack()[-2]
+ filename,method = stkOneUp[0], stkOneUp[1]
+ methodStr = '(PPRINT from %s:%d)\n' % (filename,method)
+ pstr = rawStr[:]
+ if dtype==DATATYPE.Binary:
+ pstr = binary_to_hex(rawStr)
+ pstr = prettyHex(pstr, indent=' ', withAddr=False)
+ elif dtype==DATATYPE.Hex:
+ pstr = prettyHex(pstr, indent=' ', withAddr=False)
+ else:
+ pstr = ' ' + '\n '.join(pstr.split('\n'))
+
+ logging.log(loglevel, methodStr + pstr)
+
+
+cpplogfile = None
+if CLI_OPTIONS.logDisable:
+ print 'Logging is disabled'
+ rootLogger.disabled = True
+
+
+
+def logexcept_override(type, value, tback):
+ import traceback
+ import logging
+ strList = traceback.format_exception(type,value,tback)
+ logging.error(''.join([s for s in strList]))
+ # then call the default handler
+ sys.__excepthook__(type, value, tback)
+
+sys.excepthook = logexcept_override
+
+
+# If there is a rebuild or rescan flag, let's do the right thing.
+fileRedownload = os.path.join(ARMORY_HOME_DIR, 'redownload.flag')
+fileRebuild = os.path.join(ARMORY_HOME_DIR, 'rebuild.flag')
+fileRescan = os.path.join(ARMORY_HOME_DIR, 'rescan.flag')
+fileDelSettings = os.path.join(ARMORY_HOME_DIR, 'delsettings.flag')
+
+# Flag to remove everything in Bitcoin dir except wallet.dat (if requested)
+if os.path.exists(fileRedownload):
+ # Flag to remove *BITCOIN-QT* databases so it will have to re-download
+ LOGINFO('Found %s, will delete Bitcoin DBs & redownload' % fileRedownload)
+
+ os.remove(fileRedownload)
+
+ if os.path.exists(fileRebuild):
+ os.remove(fileRebuild)
+
+ if os.path.exists(fileRescan):
+ os.remove(fileRescan)
+
+ CLI_OPTIONS.redownload = True
+ CLI_OPTIONS.rebuild = True
+
+elif os.path.exists(fileRebuild):
+ # Flag to remove Armory databases so it will have to rebuild
+ LOGINFO('Found %s, will destroy and rebuild databases' % fileRebuild)
+ os.remove(fileRebuild)
+
+ if os.path.exists(fileRescan):
+ os.remove(fileRescan)
+
+ CLI_OPTIONS.rebuild = True
+elif os.path.exists(fileRescan):
+ LOGINFO('Found %s, will throw out saved history, rescan' % fileRescan)
+ os.remove(fileRescan)
+ if os.path.exists(fileRebuild):
+ os.remove(fileRebuild)
+ CLI_OPTIONS.rescan = True
+
+
+# Separately, we may want to delete the settings file, which couldn't
+# be done easily from the GUI, because it frequently gets rewritten to
+# file before shutdown is complete. The best way is to delete it on start.
+if os.path.exists(fileDelSettings):
+ os.remove(SETTINGS_PATH)
+ os.remove(fileDelSettings)
+
+
+
+################################################################################
+def deleteBitcoindDBs():
+ if not os.path.exists(BTC_HOME_DIR):
+ LOGERROR('Could not find Bitcoin-Qt/bitcoind home dir to remove blk data')
+ LOGERROR(' Does not exist: %s' % BTC_HOME_DIR)
+ else:
+ LOGINFO('Found bitcoin home dir, removing blocks and databases')
+
+ # Remove directories
+ for btcDir in ['blocks', 'chainstate', 'database']:
+ fullPath = os.path.join(BTC_HOME_DIR, btcDir)
+ if os.path.exists(fullPath):
+ LOGINFO(' Removing dir: %s' % fullPath)
+ shutil.rmtree(fullPath)
+
+ # Remove files
+ for btcFile in ['DB_CONFIG', 'db.log', 'debug.log', 'peers.dat']:
+ fullPath = os.path.join(BTC_HOME_DIR, btcFile)
+ if os.path.exists(fullPath):
+ LOGINFO(' Removing file: %s' % fullPath)
+ os.remove(fullPath)
+
+
+
+#####
+if CLI_OPTIONS.redownload:
+ deleteBitcoindDBs()
+ if os.path.exists(fileRedownload):
+ os.remove(fileRedownload)
+
+
+#####
+if CLI_OPTIONS.rebuild and os.path.exists(LEVELDB_DIR):
+ LOGINFO('Found existing databases dir; removing before rebuild')
+ shutil.rmtree(LEVELDB_DIR)
+ os.mkdir(LEVELDB_DIR)
+
+
+####
+if CLI_OPTIONS.testAnnounceCode:
+ LOGERROR('*'*60)
+ LOGERROR('You are currently using a developer mode intended for ')
+ LOGERROR('to help with testing of announcements, which is considered')
+ LOGERROR('a security risk. ')
+ LOGERROR('*'*60)
+ ARMORY_INFO_SIGN_ADDR = '1PpAJyNoocJt38Vcf4AfPffaxo76D4AAEe'
+ ARMORY_INFO_SIGN_PUBLICKEY = ('04'
+ '601c891a2cbc14a7b2bb1ecc9b6e42e166639ea4c2790703f8e2ed126fce432c'
+ '62fe30376497ad3efcd2964aa0be366010c11b8d7fc8209f586eac00bb763015')
+
+
+
+################################################################################
+# Load the C++ utilites here
+#
+# The SWIG/C++ block utilities give us access to the blockchain, fast ECDSA
+# operations, and general encryption/secure-binary containers
+################################################################################
+try:
+ import CppBlockUtils as Cpp
+ from CppBlockUtils import CryptoECDSA, SecureBinaryData
+ LOGINFO('C++ block utilities loaded successfully')
+except:
+ LOGCRIT('C++ block utilities not available.')
+ LOGCRIT(' Make sure that you have the SWIG-compiled modules')
+ LOGCRIT(' in the current directory (or added to the PATH)')
+ LOGCRIT(' Specifically, you need:')
+ LOGCRIT(' CppBlockUtils.py and')
+ if OS_LINUX or OS_MACOSX:
+ LOGCRIT(' _CppBlockUtils.so')
+ elif OS_WINDOWS:
+ LOGCRIT(' _CppBlockUtils.pyd')
+ else:
+ LOGCRIT('\n\n... UNKNOWN operating system')
+ raise
+
+################################################################################
+# Get system details for logging purposes
+class DumbStruct(object): pass
+def GetSystemDetails():
+ """Checks memory of a given system"""
+
+ out = DumbStruct()
+
+ CPU,COR,X64,MEM = range(4)
+ sysParam = [None,None,None,None]
+ out.CpuStr = 'UNKNOWN'
+ out.Machine = platform.machine().lower()
+ if OS_LINUX:
+ # Get total RAM
+ freeStr = subprocess_check_output('free -m', shell=True)
+ totalMemory = freeStr.split('\n')[1].split()[1]
+ out.Memory = int(totalMemory) * 1024
+
+ # Get CPU name
+ out.CpuStr = 'Unknown'
+ cpuinfo = subprocess_check_output(['cat','/proc/cpuinfo'])
+ for line in cpuinfo.split('\n'):
+ if line.strip().lower().startswith('model name'):
+ out.CpuStr = line.split(':')[1].strip()
+ break
+
+
+ elif OS_WINDOWS:
+ import ctypes
+ class MEMORYSTATUSEX(ctypes.Structure):
+ _fields_ = [
+ ("dwLength", ctypes.c_ulong),
+ ("dwMemoryLoad", ctypes.c_ulong),
+ ("ullTotalPhys", ctypes.c_ulonglong),
+ ("ullAvailPhys", ctypes.c_ulonglong),
+ ("ullTotalPageFile", ctypes.c_ulonglong),
+ ("ullAvailPageFile", ctypes.c_ulonglong),
+ ("ullTotalVirtual", ctypes.c_ulonglong),
+ ("ullAvailVirtual", ctypes.c_ulonglong),
+ ("sullAvailExtendedVirtual", ctypes.c_ulonglong),
+ ]
+ def __init__(self):
+ # have to initialize this to the size of MEMORYSTATUSEX
+ self.dwLength = ctypes.sizeof(self)
+ super(MEMORYSTATUSEX, self).__init__()
+
+ stat = MEMORYSTATUSEX()
+ ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
+ out.Memory = stat.ullTotalPhys/1024.
+ out.CpuStr = platform.processor()
+ elif OS_MACOSX:
+ memsizeStr = subprocess_check_output('sysctl hw.memsize', shell=True)
+ out.Memory = int(memsizeStr.split(": ")[1]) / 1024
+ out.CpuStr = subprocess_check_output('sysctl -n machdep.cpu.brand_string', shell=True)
+ else:
+ out.CpuStr = 'Unknown'
+ raise OSError("Can't get system specs in: %s" % platform.system())
+
+ out.NumCores = multiprocessing.cpu_count()
+ out.IsX64 = platform.machine().lower() == 'x86_64'
+ out.Memory = out.Memory / (1024*1024.)
+
+ def getHddSize(adir):
+ if OS_WINDOWS:
+ free_bytes = ctypes.c_ulonglong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(adir), \
+ None, None, \
+ ctypes.pointer(free_bytes))
+ return free_bytes.value
+ else:
+ s = os.statvfs(adir)
+ return s.f_bavail * s.f_frsize
+ out.HddAvailA = getHddSize(ARMORY_HOME_DIR) / (1024**3)
+ out.HddAvailB = getHddSize(BTC_HOME_DIR) / (1024**3)
+ return out
+
+SystemSpecs = None
+try:
+ SystemSpecs = GetSystemDetails()
+except:
+ LOGEXCEPT('Error getting system details:')
+ LOGERROR('Skipping.')
+ SystemSpecs = DumbStruct()
+ SystemSpecs.Memory = -1
+ SystemSpecs.CpuStr = 'Unknown'
+ SystemSpecs.NumCores = -1
+ SystemSpecs.IsX64 = 'Unknown'
+ SystemSpecs.Machine = platform.machine().lower()
+ SystemSpecs.HddAvailA = -1
+ SystemSpecs.HddAvailB = -1
+
+
+LOGINFO('')
+LOGINFO('')
+LOGINFO('')
+LOGINFO('************************************************************')
+LOGINFO('Invoked: ' + ' '.join(sys.argv))
+LOGINFO('************************************************************')
+LOGINFO('Loading Armory Engine:')
+LOGINFO(' Armory Version : ' + getVersionString(BTCARMORY_VERSION))
+LOGINFO(' PyBtcWallet Version : ' + getVersionString(PYBTCWALLET_VERSION))
+LOGINFO('Detected Operating system: ' + OS_NAME)
+LOGINFO(' OS Variant : ' + (OS_VARIANT[0] if OS_MACOSX else '-'.join(OS_VARIANT)))
+LOGINFO(' User home-directory : ' + USER_HOME_DIR)
+LOGINFO(' Satoshi BTC directory : ' + BTC_HOME_DIR)
+LOGINFO(' Armory home dir : ' + ARMORY_HOME_DIR)
+LOGINFO('Detected System Specs : ')
+LOGINFO(' Total Available RAM : %0.2f GB', SystemSpecs.Memory)
+LOGINFO(' CPU ID string : ' + SystemSpecs.CpuStr)
+LOGINFO(' Number of CPU cores : %d cores', SystemSpecs.NumCores)
+LOGINFO(' System is 64-bit : ' + str(SystemSpecs.IsX64))
+LOGINFO(' Preferred Encoding : ' + locale.getpreferredencoding())
+LOGINFO(' Machine Arch : ' + SystemSpecs.Machine)
+LOGINFO(' Available HDD (ARM) : %d GB' % SystemSpecs.HddAvailA)
+LOGINFO(' Available HDD (BTC) : %d GB' % SystemSpecs.HddAvailB)
+LOGINFO('')
+LOGINFO('Network Name: ' + NETWORKS[ADDRBYTE])
+LOGINFO('Satoshi Port: %d', BITCOIN_PORT)
+LOGINFO('Do wlt check: %s', str(DO_WALLET_CHECK))
+LOGINFO('Named options/arguments to armoryengine.py:')
+for key,val in ast.literal_eval(str(CLI_OPTIONS)).iteritems():
+ LOGINFO(' %-16s: %s', key,val)
+LOGINFO('Other arguments:')
+for val in CLI_ARGS:
+ LOGINFO(' %s', val)
+LOGINFO('************************************************************')
+
+
+def GetExecDir():
+ """
+ Return the path from where armoryengine was imported. Inspect method
+ expects a function or module name, it can actually inspect its own
+ name...
+ """
+ srcfile = inspect.getsourcefile(GetExecDir)
+ srcpath = os.path.dirname(srcfile)
+ srcpath = os.path.abspath(srcpath)
+ return srcpath
+
+
+
+
+def coin2str(nSatoshi, ndec=8, rJust=True, maxZeros=8):
+ """
+ Converts a raw value (1e-8 BTC) into a formatted string for display
+
+ ndec, guarantees that we get get a least N decimal places in our result
+
+ maxZeros means we will replace zeros with spaces up to M decimal places
+ in order to declutter the amount field
+
+ """
+
+ nBtc = float(nSatoshi) / float(ONE_BTC)
+ s = ('%%0.%df' % ndec) % nBtc
+ s = s.rjust(18, ' ')
+
+ if maxZeros < ndec:
+ maxChop = ndec - maxZeros
+ nChop = min(len(s) - len(str(s.strip('0'))), maxChop)
+ if nChop>0:
+ s = s[:-nChop] + nChop*' '
+
+ if nSatoshi < 10000*ONE_BTC:
+ s.lstrip()
+
+ if not rJust:
+ s = s.strip(' ')
+
+ s = s.replace('. ', '')
+
+ return s
+
+
+def coin2strNZ(nSatoshi):
+ """ Right-justified, minimum zeros, but with padding for alignment"""
+ return coin2str(nSatoshi, 8, True, 0)
+
+def coin2strNZS(nSatoshi):
+ """ Right-justified, minimum zeros, stripped """
+ return coin2str(nSatoshi, 8, True, 0).strip()
+
+def coin2str_approx(nSatoshi, sigfig=3):
+ posVal = nSatoshi
+ isNeg = False
+ if nSatoshi<0:
+ isNeg = True
+ posVal *= -1
+
+ nDig = max(round(math.log(posVal+1, 10)-0.5), 0)
+ nChop = max(nDig-2, 0 )
+ approxVal = round((10**nChop) * round(posVal / (10**nChop)))
+ return coin2str( (-1 if isNeg else 1)*approxVal, maxZeros=0)
+
+
+def str2coin(theStr, negAllowed=True, maxDec=8, roundHighPrec=True):
+ coinStr = str(theStr)
+ if len(coinStr.strip())==0:
+ raise ValueError
+
+ isNeg = ('-' in coinStr)
+ coinStrPos = coinStr.replace('-','')
+ if not '.' in coinStrPos:
+ if not negAllowed and isNeg:
+ raise NegativeValueError
+ return (int(coinStrPos)*ONE_BTC)*(-1 if isNeg else 1)
+ else:
+ lhs,rhs = coinStrPos.strip().split('.')
+ if len(lhs.strip('-'))==0:
+ lhs='0'
+ if len(rhs)>maxDec and not roundHighPrec:
+ raise TooMuchPrecisionError
+ if not negAllowed and isNeg:
+ raise NegativeValueError
+ fullInt = (int(lhs + rhs[:9].ljust(9,'0')) + 5) / 10
+ return fullInt*(-1 if isNeg else 1)
+
+
+
+################################################################################
+def replacePlurals(txt, *args):
+ """
+ Use this like regular string formatting, but with pairs of strings:
+
+ replacePlurals("I have @{one cat|%d cats}@. @{It is|They are}@ cute!", nCat)
+
+ Then you can supply a single number which will select all supplied pairs.
+ or one number per @{|}@ object. If you use with format
+ strings (such as above, with "%d") make sure to replace those strings FIRST,
+ then call this function. Otherwise the %d will disappear depending on the
+ plurality and cause an error. Hence why I made the function below:
+ formatWithPlurals
+ """
+ if len(args)==0:
+ if ('@{' in txt) and ('}@' in txt):
+ raise IndexError('Not enough arguments for plural formatting')
+ return txt
+
+ argList = list(args[::-1])
+ n = argList[0]
+ nRepl = 0
+ while '@{' in txt:
+ idx0 = txt.find('@{')
+ idx1 = txt.find('}@')+2
+ sep = txt.find('|', idx0)
+ if idx1==1 or sep==-1:
+ raise TypeError('Invalid replacement format')
+
+ strOne = txt[idx0+2:sep]
+ strMany = txt[sep+1:idx1-2]
+ strReplace = txt[idx0:idx1]
+
+ if not len(args) == 1:
+ try:
+ n = argList.pop()
+ except IndexError:
+ raise IndexError('Not enough arguments for plural formatting')
+
+ txt = txt.replace(strReplace, strOne if n==1 else strMany)
+ nRepl += 1
+
+ if (len(args)>1 and len(argList)>0) or (nRepl < len(args)):
+ raise TypeError('Too many arguments supplied for plural formatting')
+
+ return txt
+
+
+
+################################################################################
+def formatWithPlurals(txt, replList=None, pluralList=None):
+ """
+ Where you would normally supply X extra arguments for either regular string
+ formatting or the plural function, you will instead supply a X-element list
+ for each one (actually, the two lists are likely to be different sizes).
+ """
+ # Do the string formatting/replacement first, since the post-pluralized
+ # string may remove some of the replacement objects (i.e. if you have
+ # "The @{cat|%d cats}@ danced", the %d won't be there if the singular
+ # is chosen and replaced before applying the string formatting objects).
+ if replList is not None:
+ if not isinstance(replList, (list,tuple)):
+ replList = [replList]
+ txt = txt % tuple(replList)
+
+ if pluralList is not None:
+ if not isinstance(pluralList, (list,tuple)):
+ pluralList = [pluralList]
+ txt = replacePlurals(txt, *pluralList)
+
+ return txt
+
+
+################################################################################
+# A bunch of convenience methods for converting between:
+# -- Raw binary scripts (as seen in the blockchain)
+# -- Address strings (exchanged between people for paying each other)
+# -- ScrAddr strings (A unique identifier used by the DB)
+################################################################################
+
+################################################################################
+# Convert a 20-byte hash to a "pay-to-public-key-hash" script to be inserted
+# into a TxOut script
+def hash160_to_p2pkhash_script(binStr20):
+ if not len(binStr20)==20:
+ raise InvalidHashError('Tried to convert non-20-byte str to p2pkh script')
+
+ from Transaction import getOpCode
+ outScript = ''.join([ getOpCode('OP_DUP' ), \
+ getOpCode('OP_HASH160' ), \
+ '\x14', \
+ binStr20,
+ getOpCode('OP_EQUALVERIFY'), \
+ getOpCode('OP_CHECKSIG' )])
+ return outScript
+
+
+################################################################################
+# Convert a 20-byte hash to a "pay-to-script-hash" script to be inserted
+# into a TxOut script
+def hash160_to_p2sh_script(binStr20):
+ if not len(binStr20)==20:
+ raise InvalidHashError('Tried to convert non-20-byte str to p2sh script')
+
+ from Transaction import getOpCode
+ outScript = ''.join([ getOpCode('OP_HASH160'), \
+ '\x14', \
+ binStr20,
+ getOpCode('OP_EQUAL')])
+ return outScript
+
+################################################################################
+# Convert an arbitrary script into a P2SH script
+def script_to_p2sh_script(binScript):
+ scriptHash = hash160(binScript)
+ return hash160_to_p2sh_script(scriptHash)
+
+
+################################################################################
+# Convert a 33-byte or 65-byte hash to a "pay-to-pubkey" script to be inserted
+# into a TxOut script
+def pubkey_to_p2pk_script(binStr33or65):
+
+ if not len(binStr33or65) in [33, 65]:
+ raise KeyDataError('Invalid public key supplied to p2pk script')
+
+ from Transaction import getOpCode
+ lenByte = int_to_binary(len(binStr33or65), widthBytes=1)
+ outScript = ''.join([ lenByte,
+ binStr33or65,
+ getOpCode('OP_CHECKSIG')])
+ return outScript
+
+
+################################################################################
+# Convert a list of public keys to an OP_CHECKMULTISIG script. There will be
+# use cases where we require the keys to be sorted lexicographically, so we
+# will do that by default. If you require a different order, pre-sort them
+# and pass withSort=False.
+#
+# NOTE: About the hardcoded bytes in here:
+# I made a mistake when making the databases, and hardcoded the
+# mainnet addrByte and P2SH bytes into DB format. This means that
+# that any ScrAddr object will use the mainnet prefix bytes, despite
+# being in testnet. I will at some point fix this.
+def pubkeylist_to_multisig_script(pkList, M, withSort=True):
+
+ if sum([ (0 if len(pk) in [33,65] else 1) for pk in pkList]) > 0:
+ raise KeyDataError('Not all strings in pkList are 33 or 65 bytes!')
+
+ from Transaction import getOpCode
+ opM = getOpCode('OP_%d' % M)
+ opN = getOpCode('OP_%d' % len(pkList))
+
+ newPkList = pkList[:] # copy
+ if withSort:
+ newPkList = sorted(pkList)
+
+ outScript = opM
+ for pk in newPkList:
+ outScript += int_to_binary(len(pk), widthBytes=1)
+ outScript += pk
+ outScript += opN
+ outScript += getOpCode('OP_CHECKMULTISIG')
+
+ return outScript
+
+################################################################################
+def scrAddr_to_script(scraddr):
+ """ Convert a scrAddr string (used by BDM) to the correct TxOut script """
+ if len(scraddr)==0:
+ raise BadAddressError('Empty scraddr')
+
+ prefix = scraddr[0]
+ if not prefix in SCRADDR_BYTE_LIST or not len(scraddr)==21:
+ LOGERROR('Bad scraddr: "%s"' % binary_to_hex(scraddr))
+ raise BadAddressError('Invalid ScrAddress')
+
+ if prefix==SCRADDR_P2PKH_BYTE:
+ return hash160_to_p2pkhash_script(scraddr[1:])
+ elif prefix==SCRADDR_P2SH_BYTE:
+ return hash160_to_p2sh_script(scraddr[1:])
+ else:
+ LOGERROR('Unsupported scraddr type: "%s"' % binary_to_hex(scraddr))
+ raise BadAddressError('Can only convert P2PKH and P2SH scripts')
+
+
+################################################################################
+def script_to_scrAddr(binScript):
+ """ Convert a binary script to scrAddr string (used by BDM) """
+ return Cpp.BtcUtils().getScrAddrForScript(binScript)
+
+################################################################################
+def script_to_addrStr(binScript):
+ """ Convert a binary script to scrAddr string (used by BDM) """
+ return scrAddr_to_addrStr(script_to_scrAddr(binScript))
+
+################################################################################
+def scrAddr_to_addrStr(scrAddr):
+ if len(scrAddr)==0:
+ raise BadAddressError('Empty scrAddr')
+
+ prefix = scrAddr[0]
+ if not prefix in SCRADDR_BYTE_LIST or not len(scrAddr)==21:
+ LOGERROR('Bad scrAddr: "%s"' % binary_to_hex(scrAddr))
+ raise BadAddressError('Invalid ScrAddress')
+
+ if prefix==SCRADDR_P2PKH_BYTE:
+ return hash160_to_addrStr(scrAddr[1:])
+ elif prefix==SCRADDR_P2SH_BYTE:
+ return hash160_to_p2shStr(scrAddr[1:])
+ else:
+ LOGERROR('Unsupported scrAddr type: "%s"' % binary_to_hex(scrAddr))
+ raise BadAddressError('Can only convert P2PKH and P2SH scripts')
+
+################################################################################
+# We beat around the bush here, to make sure it goes through addrStr which
+# triggers errors if this isn't a regular addr or P2SH addr
+def scrAddr_to_hash160(scrAddr):
+ addr = scrAddr_to_addrStr(scrAddr)
+ atype, a160 = addrStr_to_hash160(addr)
+ return (atype, a160)
+
+
+################################################################################
+def addrStr_to_scrAddr(addrStr):
+ if not checkAddrStrValid(addrStr):
+ BadAddressError('Invalid address: "%s"' % addrStr)
+
+ # Okay this doesn't work because of the issue outlined before, where the
+ # SCRADDR prefixes don't match the ADDRSTR prefixes. Whoops
+ #return addrBin[:21]
+
+ atype, a160 = addrStr_to_hash160(addrStr)
+ if atype==ADDRBYTE:
+ return SCRADDR_P2PKH_BYTE + a160
+ elif atype==P2SHBYTE:
+ return SCRADDR_P2SH_BYTE + a160
+ else:
+ BadAddressError('Invalid address: "%s"' % addrStr)
+
+
+
+
+
+################################################################################
+# Load the C++ utilites here
+#
+# The SWIG/C++ block utilities give us access to the blockchain, fast ECDSA
+# operations, and general encryption/secure-binary containers
+################################################################################
+try:
+ import CppBlockUtils as Cpp
+ from CppBlockUtils import CryptoECDSA, SecureBinaryData
+ LOGINFO('C++ block utilities loaded successfully')
+except:
+ LOGCRIT('C++ block utilities not available.')
+ LOGCRIT(' Make sure that you have the SWIG-compiled modules')
+ LOGCRIT(' in the current directory (or added to the PATH)')
+ LOGCRIT(' Specifically, you need:')
+ LOGCRIT(' CppBlockUtils.py and')
+ if OS_LINUX or OS_MACOSX:
+ LOGCRIT(' _CppBlockUtils.so')
+ elif OS_WINDOWS:
+ LOGCRIT(' _CppBlockUtils.pyd')
+ else:
+ LOGCRIT('\n\n... UNKNOWN operating system')
+ raise
+
+
+################################################################################
+# We need to have some methods for casting ASCII<->Unicode<->Preferred
+DEFAULT_ENCODING = 'utf-8'
+
+def isASCII(theStr):
+ try:
+ theStr.decode('ascii')
+ return True
+ except UnicodeEncodeError:
+ return False
+ except UnicodeDecodeError:
+ return False
+ except:
+ LOGEXCEPT('What was passed to this function? %s', theStr)
+ return False
+
+
+def toBytes(theStr, theEncoding=DEFAULT_ENCODING):
+ if isinstance(theStr, unicode):
+ return theStr.encode(theEncoding)
+ elif isinstance(theStr, str):
+ return theStr
+ else:
+ LOGERROR('toBytes() not been defined for input: %s', str(type(theStr)))
+
+def toUnicode(theStr, theEncoding=DEFAULT_ENCODING):
+ if isinstance(theStr, unicode):
+ return theStr
+ elif isinstance(theStr, str):
+ return unicode(theStr, theEncoding)
+ else:
+ LOGERROR('toUnicode() not been defined for input: %s', str(type(theStr)))
+
+
+def toPreferred(theStr):
+ if OS_WINDOWS:
+ return theStr.encode('utf-8')
+ else:
+ return toUnicode(theStr).encode(locale.getpreferredencoding())
+
+
+def lenBytes(theStr, theEncoding=DEFAULT_ENCODING):
+ return len(toBytes(theStr, theEncoding))
+
+# Stolen from stackoverflow (google "stackoverflow 1809531")
+def unicode_truncate(theStr, length, encoding='utf-8'):
+ encoded = theStr.encode(encoding)[:length]
+ return encoded.decode(encoding, 'ignore')
+
+################################################################################
+
+
+
+# This is a sweet trick for create enum-like dictionaries.
+# Either automatically numbers (*args), or name-val pairs (**kwargs)
+#http://stackoverflow.com/questions/36932/whats-the-best-way-to-implement-an-enum-in-python
+def enum(*sequential, **named):
+ enums = dict(zip(sequential, range(len(sequential))), **named)
+ return type('Enum', (), enums)
+
+DATATYPE = enum("Binary", 'Base58', 'Hex')
+def isLikelyDataType(theStr, dtype=None):
+ """
+ This really shouldn't be used on short strings. Hence
+ why it's called "likely" datatype...
+ """
+ ret = None
+ hexCount = sum([1 if c in BASE16CHARS else 0 for c in theStr])
+ b58Count = sum([1 if c in BASE58CHARS else 0 for c in theStr])
+ canBeHex = hexCount==len(theStr)
+ canBeB58 = b58Count==len(theStr)
+ if canBeHex:
+ ret = DATATYPE.Hex
+ elif canBeB58 and not canBeHex:
+ ret = DATATYPE.Base58
+ else:
+ ret = DATATYPE.Binary
+
+ if dtype==None:
+ return ret
+ else:
+ return dtype==ret
+
+cpplogfile = None
+if CLI_OPTIONS.logDisable:
+ print 'Logging is disabled'
+ rootLogger.disabled = True
+
+
+
+
+
+# The database uses prefixes to identify type of address. Until the new
+# wallet format is created that supports more than just hash160 addresses
+# we have to explicitly add the prefix to any hash160 values that are being
+# sent to any of the C++ utilities. For instance, the BlockDataManager (BDM)
+# (C++ stuff) tracks regular hash160 addresses, P2SH, multisig, and all
+# non-standard scripts. Any such "scrAddrs" (script-addresses) will eventually
+# be valid entities for tracking in a wallet. Until then, all of our python
+# utilities all use just hash160 values, and we manually add the prefix
+# before talking to the BDM.
+HASH160PREFIX = '\x00'
+P2SHPREFIX = '\x05'
+MSIGPREFIX = '\xfe'
+NONSTDPREFIX = '\xff'
+def CheckHash160(scrAddr):
+ if not len(scrAddr)==21:
+ raise BadAddressError("Supplied scrAddr is not a Hash160 value!")
+ if not scrAddr[0] == HASH160PREFIX:
+ raise BadAddressError("Supplied scrAddr is not a Hash160 value!")
+ return scrAddr[1:]
+
+def Hash160ToScrAddr(a160):
+ if not len(a160)==20:
+ LOGERROR('Invalid hash160 value!')
+ return HASH160PREFIX + a160
+
+def HexHash160ToScrAddr(a160):
+ if not len(a160)==40:
+ LOGERROR('Invalid hash160 value!')
+ return HASH160PREFIX + hex_to_binary(a160)
+
+# Some more constants that are needed to play nice with the C++ utilities
+ARMORY_DB_BARE = 0
+ARMORY_DB_LITE = 1
+ARMORY_DB_PARTIAL = 2
+ARMORY_DB_FULL = 3
+ARMORY_DB_SUPER = 4
+DB_PRUNE_ALL = 0
+DB_PRUNE_NONE = 1
+
+
+# Some time methods (RightNow() return local unix timestamp)
+RightNow = time.time
+def RightNowUTC():
+ return time.mktime(time.gmtime(RightNow()))
+
+def RightNowStr(fmt=DEFAULT_DATE_FORMAT):
+ return unixTimeToFormatStr(RightNow(), fmt)
+
+
+# Define all the hashing functions we're going to need. We don't actually
+# use any of the first three directly (sha1, sha256, ripemd160), we only
+# use hash256 and hash160 which use the first three to create the ONLY hash
+# operations we ever do in the bitcoin network
+# UPDATE: mini-private-key format requires vanilla sha256...
+def sha1(bits):
+ return hashlib.new('sha1', bits).digest()
+def sha256(bits):
+ return hashlib.new('sha256', bits).digest()
+def sha512(bits):
+ return hashlib.new('sha512', bits).digest()
+def ripemd160(bits):
+ # It turns out that not all python has ripemd160...?
+ #return hashlib.new('ripemd160', bits).digest()
+ return Cpp.BtcUtils().ripemd160_SWIG(bits)
+def hash256(s):
+ """ Double-SHA256 """
+ return sha256(sha256(s))
+def hash160(s):
+ """ RIPEMD160( SHA256( binaryStr ) ) """
+ return Cpp.BtcUtils().getHash160_SWIG(s)
+
+
+def HMAC(key, msg, hashfunc=sha512, hashsz=None):
+ """ This is intended to be simple, not fast. For speed, use HDWalletCrypto() """
+ hashsz = len(hashfunc('')) if hashsz==None else hashsz
+ key = (hashfunc(key) if len(key)>hashsz else key)
+ key = key.ljust(hashsz, '\x00')
+ okey = ''.join([chr(ord('\x5c')^ord(c)) for c in key])
+ ikey = ''.join([chr(ord('\x36')^ord(c)) for c in key])
+ return hashfunc( okey + hashfunc(ikey + msg) )
+
+HMAC256 = lambda key,msg: HMAC(key, msg, sha256, 32)
+HMAC512 = lambda key,msg: HMAC(key, msg, sha512, 64)
+
+
+################################################################################
+def prettyHex(theStr, indent='', withAddr=True, major=8, minor=8):
+ """
+ This is the same as pprintHex(), but returns the string instead of
+ printing it to console. This is useful for redirecting output to
+ files, or doing further modifications to the data before display
+ """
+ outStr = ''
+ sz = len(theStr)
+ nchunk = int((sz-1)/minor) + 1;
+ for i in range(nchunk):
+ if i%major==0:
+ outStr += '\n' + indent
+ if withAddr:
+ locStr = int_to_hex(i*minor/2, widthBytes=2, endOut=BIGENDIAN)
+ outStr += '0x' + locStr + ': '
+ outStr += theStr[i*minor:(i+1)*minor] + ' '
+ return outStr
+
+
+
+
+
+################################################################################
+def pprintHex(theStr, indent='', withAddr=True, major=8, minor=8):
+ """
+ This method takes in a long hex string and prints it out into rows
+ of 64 hex chars, in chunks of 8 hex characters, and with address
+ markings on each row. This means that each row displays 32 bytes,
+ which is usually pleasant.
+
+ The format is customizable: you can adjust the indenting of the
+ entire block, remove address markings, or change the major/minor
+ grouping size (major * minor = hexCharsPerRow)
+ """
+ print prettyHex(theStr, indent, withAddr, major, minor)
+
+
+
+def pprintDiff(str1, str2, indent=''):
+ if not len(str1)==len(str2):
+ print 'pprintDiff: Strings are different length!'
+ return
+
+ byteDiff = []
+ for i in range(len(str1)):
+ if str1[i]==str2[i]:
+ byteDiff.append('-')
+ else:
+ byteDiff.append('X')
+
+ pprintHex(''.join(byteDiff), indent=indent)
+
+
+
+
+##### Switch endian-ness #####
+def hex_switchEndian(s):
+ """ Switches the endianness of a hex string (in pairs of hex chars) """
+ pairList = [s[i]+s[i+1] for i in xrange(0,len(s),2)]
+ return ''.join(pairList[::-1])
+def binary_switchEndian(s):
+ """ Switches the endianness of a binary string """
+ return s[::-1]
+
+
+##### INT/HEXSTR #####
+def int_to_hex(i, widthBytes=0, endOut=LITTLEENDIAN):
+ """
+ Convert an integer (int() or long()) to hexadecimal. Default behavior is
+ to use the smallest even number of hex characters necessary, and using
+ little-endian. Use the widthBytes argument to add 0-padding where needed
+ if you are expecting constant-length output.
+ """
+ h = hex(i)[2:]
+ if isinstance(i,long):
+ h = h[:-1]
+ if len(h)%2 == 1:
+ h = '0'+h
+ if not widthBytes==0:
+ nZero = 2*widthBytes - len(h)
+ if nZero > 0:
+ h = '0'*nZero + h
+ if endOut==LITTLEENDIAN:
+ h = hex_switchEndian(h)
+ return h
+
+
+def hex_to_int(h, endIn=LITTLEENDIAN):
+ """
+ Convert hex-string to integer (or long). Default behavior is to interpret
+ hex string as little-endian
+ """
+ hstr = h.replace(' ','') # copies data, no references
+ if endIn==LITTLEENDIAN:
+ hstr = hex_switchEndian(hstr)
+ return( int(hstr, 16) )
+
+
+##### HEXSTR/BINARYSTR #####
+def hex_to_binary(h, endIn=LITTLEENDIAN, endOut=LITTLEENDIAN):
+ """
+ Converts hexadecimal to binary (in a python string). Endianness is
+ only switched if (endIn != endOut)
+ """
+ bout = h.replace(' ','') # copies data, no references
+ if not endIn==endOut:
+ bout = hex_switchEndian(bout)
+ return bout.decode('hex_codec')
+
+
+def binary_to_hex(b, endOut=LITTLEENDIAN, endIn=LITTLEENDIAN):
+ """
+ Converts binary to hexadecimal. Endianness is only switched
+ if (endIn != endOut)
+ """
+ hout = b.encode('hex_codec')
+ if not endOut==endIn:
+ hout = hex_switchEndian(hout)
+ return hout
+
+##### Shorthand combo of prettyHex and binary_to_hex intended for use in debugging
+def ph(binaryInput):
+ return prettyHex(binary_to_hex(binaryInput))
+
+##### INT/BINARYSTR #####
+def int_to_binary(i, widthBytes=0, endOut=LITTLEENDIAN):
+ """
+ Convert integer to binary. Default behavior is use as few bytes
+ as necessary, and to use little-endian. This can be changed with
+ the two optional input arguemnts.
+ """
+ h = int_to_hex(i,widthBytes)
+ return hex_to_binary(h, endOut=endOut)
+
+def binary_to_int(b, endIn=LITTLEENDIAN):
+ """
+ Converts binary to integer (or long). Interpret as LE by default
+ """
+ h = binary_to_hex(b, endIn, LITTLEENDIAN)
+ return hex_to_int(h)
+
+##### INT/BITS #####
+
+def int_to_bitset(i, widthBytes=0):
+ bitsOut = []
+ while i>0:
+ i,r = divmod(i,2)
+ bitsOut.append(['0','1'][r])
+ result = ''.join(bitsOut)
+ if widthBytes != 0:
+ result = result.ljust(widthBytes*8,'0')
+ return result
+
+def bitset_to_int(bitset):
+ n = 0
+ for i,bit in enumerate(bitset):
+ n += (0 if bit=='0' else 1) * 2**i
+ return n
+
+
+
+EmptyHash = hex_to_binary('00'*32)
+
+
+################################################################################
+# BINARY/BASE58 CONVERSIONS
+def binary_to_base58(binstr):
+ """
+ This method applies the Bitcoin-specific conversion from binary to Base58
+ which may includes some extra "zero" bytes, such as is the case with the
+ main-network addresses.
+
+ This method is labeled as outputting an "addrStr", but it's really this
+ special kind of Base58 converter, which makes it usable for encoding other
+ data, such as ECDSA keys or scripts.
+ """
+ padding = 0;
+ for b in binstr:
+ if b=='\x00':
+ padding+=1
+ else:
+ break
+
+ n = 0
+ for ch in binstr:
+ n *= 256
+ n += ord(ch)
+
+ b58 = ''
+ while n > 0:
+ n, r = divmod (n, 58)
+ b58 = BASE58CHARS[r] + b58
+ return '1'*padding + b58
+
+
+################################################################################
+def base58_to_binary(addr):
+ """
+ This method applies the Bitcoin-specific conversion from Base58 to binary
+ which may includes some extra "zero" bytes, such as is the case with the
+ main-network addresses.
+
+ This method is labeled as inputting an "addrStr", but it's really this
+ special kind of Base58 converter, which makes it usable for encoding other
+ data, such as ECDSA keys or scripts.
+ """
+ # Count the zeros ('1' characters) at the beginning
+ padding = 0;
+ for c in addr:
+ if c=='1':
+ padding+=1
+ else:
+ break
+
+ n = 0
+ for ch in addr:
+ n *= 58
+ n += BASE58CHARS.index(ch)
+
+ binOut = ''
+ while n>0:
+ d,m = divmod(n,256)
+ binOut = chr(m) + binOut
+ n = d
+ return '\x00'*padding + binOut
+
+
+
+################################################################################
+def hash160_to_addrStr(binStr, netbyte=ADDRBYTE):
+ """
+ Converts the 20-byte pubKeyHash to 25-byte binary Bitcoin address
+ which includes the network byte (prefix) and 4-byte checksum (suffix)
+ """
+
+ if not len(binStr) == 20:
+ raise InvalidHashError('Input string is %d bytes' % len(binStr))
+
+ addr21 = netbyte + binStr
+ addr25 = addr21 + hash256(addr21)[:4]
+ return binary_to_base58(addr25);
+
+################################################################################
+def hash160_to_p2shStr(binStr):
+ if not len(binStr) == 20:
+ raise InvalidHashError('Input string is %d bytes' % len(binStr))
+
+ addr21 = P2SHBYTE + binStr
+ addr25 = addr21 + hash256(addr21)[:4]
+ return binary_to_base58(addr25);
+
+################################################################################
+def addrStr_is_p2sh(b58Str):
+ binStr = base58_to_binary(b58Str)
+ if not len(binStr)==25:
+ return False
+
+ if not hash256(binStr[:21])[:4] == binStr[-4:]:
+ return False
+
+ return (binStr[0] == P2SHBYTE)
+
+################################################################################
+# As of version 0.90.1, this returns the prefix byte with the hash160. This is
+# because we need to handle/distinguish regular addresses from P2SH. All code
+# using this method must be updated to expect 2 outputs and check the prefix.
+def addrStr_to_hash160(b58Str, p2shAllowed=True):
+ binStr = base58_to_binary(b58Str)
+ if not p2shAllowed and binStr[0]==P2SHBYTE:
+ raise P2SHNotSupportedError
+ if not len(binStr) == 25:
+ raise BadAddressError('Address string is %d bytes' % len(binStr))
+
+ if not hash256(binStr[:21])[:4] == binStr[-4:]:
+ raise ChecksumError('Address string has invalid checksum')
+
+ if not binStr[0] in (ADDRBYTE, P2SHBYTE):
+ raise BadAddressError('Unknown addr prefix: %s' % binary_to_hex(binStr[0]))
+
+ return (binStr[0], binStr[1:-4])
+
+
+###### Typing-friendly Base16 #####
+# Implements "hexadecimal" encoding but using only easy-to-type
+# characters in the alphabet. Hex usually includes the digits 0-9
+# which can be slow to type, even for good typists. On the other
+# hand, by changing the alphabet to common, easily distinguishable,
+# lowercase characters, typing such strings will become dramatically
+# faster. Additionally, some default encodings of QRCodes do not
+# preserve the capitalization of the letters, meaning that Base58
+# is not a feasible options
+
+NORMALCHARS = '0123 4567 89ab cdef'.replace(' ','')
+EASY16CHARS = 'asdf ghjk wert uion'.replace(' ','')
+hex_to_base16_map = {}
+base16_to_hex_map = {}
+for n,b in zip(NORMALCHARS,EASY16CHARS):
+ hex_to_base16_map[n] = b
+ base16_to_hex_map[b] = n
+
+def binary_to_easyType16(binstr):
+ return ''.join([hex_to_base16_map[c] for c in binary_to_hex(binstr)])
+
+# Treat unrecognized characters as 0, to facilitate possibly later recovery of
+# their correct values from the checksum.
+def easyType16_to_binary(b16str):
+ return hex_to_binary(''.join([base16_to_hex_map.get(c, '0') for c in b16str]))
+
+
+def makeSixteenBytesEasy(b16):
+ if not len(b16)==16:
+ raise ValueError('Must supply 16-byte input')
+ chk2 = computeChecksum(b16, nBytes=2)
+ et18 = binary_to_easyType16(b16 + chk2)
+ nineQuads = [et18[i*4:(i+1)*4] for i in range(9)]
+ first4 = ' '.join(nineQuads[:4])
+ second4 = ' '.join(nineQuads[4:8])
+ last1 = nineQuads[8]
+ return ' '.join([first4, second4, last1])
+
+def readSixteenEasyBytes(et18):
+ b18 = easyType16_to_binary(et18.strip().replace(' ',''))
+ if len(b18)!=18:
+ raise ValueError('Must supply 18-byte input')
+ b16 = b18[:16]
+ chk = b18[ 16:]
+ if chk=='':
+ LOGWARN('Missing checksum when reading EasyType')
+ return (b16, 'No_Checksum')
+ b16new = verifyChecksum(b16, chk)
+ if len(b16new)==0:
+ return ('','Error_2+')
+ elif not b16new==b16:
+ return (b16new,'Fixed_1')
+ else:
+ return (b16new,None)
+
+##### FLOAT/BTC #####
+# https://en.bitcoin.it/wiki/Proper_Money_Handling_(JSON-RPC)
+def ubtc_to_floatStr(n):
+ return '%d.%08d' % divmod (n, ONE_BTC)
+def floatStr_to_ubtc(s):
+ return long(round(float(s) * ONE_BTC))
+def float_to_btc (f):
+ return long (round(f * ONE_BTC))
+
+
+##### And a few useful utilities #####
+def unixTimeToFormatStr(unixTime, formatStr=DEFAULT_DATE_FORMAT):
+ """
+ Converts a unix time (like those found in block headers) to a
+ pleasant, human-readable format
+ """
+ dtobj = datetime.fromtimestamp(unixTime)
+ dtstr = u'' + dtobj.strftime(formatStr).decode('utf-8')
+ dtstr = dtstr.encode('ascii', errors='replace')
+ return dtstr[:-2] + dtstr[-2:].lower()
+
+def secondsToHumanTime(nSec):
+ strPieces = []
+ floatSec = float(nSec)
+ if floatSec < 0.9*MINUTE:
+ strPieces = [floatSec, 'second']
+ elif floatSec < 0.9*HOUR:
+ strPieces = [floatSec/MINUTE, 'minute']
+ elif floatSec < 0.9*DAY:
+ strPieces = [floatSec/HOUR, 'hour']
+ elif floatSec < 0.9*WEEK:
+ strPieces = [floatSec/DAY, 'day']
+ elif floatSec < 0.9*MONTH:
+ strPieces = [floatSec/WEEK, 'week']
+ elif floatSec < 0.9*YEAR:
+ strPieces = [floatSec/MONTH, 'month']
+ else:
+ strPieces = [floatSec/YEAR, 'year']
+
+ #
+ if strPieces[0]<1.25:
+ return '1 '+strPieces[1]
+ elif strPieces[0]<=1.75:
+ return '1.5 '+strPieces[1]+'s'
+ else:
+ return '%d %ss' % (int(strPieces[0]+0.5), strPieces[1])
+
+def bytesToHumanSize(nBytes):
+ if nBytes0:
+ if not beQuiet: LOGWARN('fixed!')
+ return fixStr
+ else:
+ # ONE LAST CHECK SPECIFIC TO MY SERIALIZATION SCHEME:
+ # If the string was originally all zeros, chksum is hash256('')
+ # ...which is a known value, and frequently used in my files
+ if chksum==hex_to_binary('5df6e0e2'):
+ if not beQuiet: LOGWARN('fixed!')
+ return ''
+
+
+ # ID a checksum byte error...
+ origHash = hashFunc(bin1)
+ for i in range(len(chksum)):
+ chkArray = [chksum[j] for j in range(len(chksum))]
+ for ch in range(256):
+ chkArray[i] = chr(ch)
+ if origHash.startswith(''.join(chkArray)):
+ LOGWARN('***Checksum error! Incorrect byte in checksum!')
+ return bin1
+
+ LOGWARN('Checksum fix failed')
+ return ''
+
+
+# Taken directly from rpc.cpp in reference bitcoin client, 0.3.24
+def binaryBits_to_difficulty(b):
+ """ Converts the 4-byte binary difficulty string to a float """
+ i = binary_to_int(b)
+ nShift = (i >> 24) & 0xff
+ dDiff = float(0x0000ffff) / float(i & 0x00ffffff)
+ while nShift < 29:
+ dDiff *= 256.0
+ nShift += 1
+ while nShift > 29:
+ dDiff /= 256.0
+ nShift -= 1
+ return dDiff
+
+
+# TODO: I don't actually know how to do this, yet...
+def difficulty_to_binaryBits(i):
+ pass
+
+################################################################################
+def CreateQRMatrix(dataToEncode, errLevel='L'):
+ sz=3
+ success=False
+ qrmtrx = [[]]
+ while sz<20:
+ try:
+ errCorrectEnum = getattr(QRErrorCorrectLevel, errLevel.upper())
+ qr = QRCode(sz, errCorrectEnum)
+ qr.addData(dataToEncode)
+ qr.make()
+ success=True
+ break
+ except TypeError:
+ sz += 1
+
+ if not success:
+ LOGERROR('Unsuccessful attempt to create QR code')
+ LOGERROR('Data to encode: (Length: %s, isAscii: %s)', \
+ len(dataToEncode), isASCII(dataToEncode))
+ return [[0]], 1
+
+ qrmtrx = []
+ modCt = qr.getModuleCount()
+ for r in range(modCt):
+ tempList = [0]*modCt
+ for c in range(modCt):
+ # The matrix is transposed by default, from what we normally expect
+ tempList[c] = 1 if qr.isDark(c,r) else 0
+ qrmtrx.append(tempList)
+
+ return [qrmtrx, modCt]
+
+
+# The following params are for the Bitcoin elliptic curves (secp256k1)
+SECP256K1_MOD = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2FL
+SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141L
+SECP256K1_B = 0x0000000000000000000000000000000000000000000000000000000000000007L
+SECP256K1_A = 0x0000000000000000000000000000000000000000000000000000000000000000L
+SECP256K1_GX = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798L
+SECP256K1_GY = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8L
+
+################################################################################
+################################################################################
+# START FINITE FIELD OPERATIONS
+
+class FiniteField(object):
+ """
+ Create a simple, prime-order FiniteField. Because this is used only
+ to encode data of fixed width, I enforce prime-order by hardcoding
+ primes, and you just pick the data width (in bytes). If your desired
+ data width is not here, simply find a prime number very close to 2^N,
+ and add it to the PRIMES map below.
+
+ This will be used for Shamir's Secret Sharing scheme. Encode your
+ data as the coeffient of finite-field polynomial, and store points
+ on that polynomial. The order of the polynomial determines how
+ many points are needed to recover the original secret.
+ """
+
+ # bytes: primeclosetomaxval
+ PRIMES = { 1: 2**8-5, # mainly for testing
+ 2: 2**16-39,
+ 4: 2**32-5,
+ 8: 2**64-59,
+ 16: 2**128-797,
+ 20: 2**160-543,
+ 24: 2**192-333,
+ 32: 2**256-357,
+ 48: 2**384-317,
+ 64: 2**512-569,
+ 96: 2**768-825,
+ 128: 2**1024-105,
+ 192: 2**1536-3453,
+ 256: 2**2048-1157 }
+
+ def __init__(self, nbytes):
+ if not self.PRIMES.has_key(nbytes):
+ LOGERROR('No primes available for size=%d bytes', nbytes)
+ self.prime = None
+ raise FiniteFieldError
+ self.prime = self.PRIMES[nbytes]
+
+
+ def add(self,a,b):
+ return (a+b) % self.prime
+
+ def subtract(self,a,b):
+ return (a-b) % self.prime
+
+ def mult(self,a,b):
+ return (a*b) % self.prime
+
+ def power(self,a,b):
+ result = 1
+ while(b>0):
+ b,x = divmod(b,2)
+ result = (result * (a if x else 1)) % self.prime
+ a = a*a % self.prime
+ return result
+
+ def powinv(self,a):
+ """ USE ONLY PRIME MODULUS """
+ return self.power(a,self.prime-2)
+
+ def divide(self,a,b):
+ """ USE ONLY PRIME MODULUS """
+ baddinv = self.powinv(b)
+ return self.mult(a,baddinv)
+
+ def mtrxrmrowcol(self,mtrx,r,c):
+ if not len(mtrx) == len(mtrx[0]):
+ LOGERROR('Must be a square matrix!')
+ return []
+ sz = len(mtrx)
+ return [[mtrx[i][j] for j in range(sz) if not j==c] for i in range(sz) if not i==r]
+
+
+ ################################################################################
+ def mtrxdet(self,mtrx):
+ if len(mtrx)==1:
+ return mtrx[0][0]
+
+ if not len(mtrx) == len(mtrx[0]):
+ LOGERROR('Must be a square matrix!')
+ return -1
+
+ result = 0;
+ for i in range(len(mtrx)):
+ mult = mtrx[0][i] * (-1 if i%2==1 else 1)
+ subdet = self.mtrxdet(self.mtrxrmrowcol(mtrx,0,i))
+ result = self.add(result, self.mult(mult,subdet))
+ return result
+
+ ################################################################################
+ def mtrxmultvect(self,mtrx, vect):
+ M,N = len(mtrx), len(mtrx[0])
+ if not len(mtrx[0])==len(vect):
+ LOGERROR('Mtrx and vect are incompatible: %dx%d, %dx1', M, N, len(vect))
+ return [ sum([self.mult(mtrx[i][j],vect[j]) for j in range(N)])%self.prime for i in range(M) ]
+
+ ################################################################################
+ def mtrxmult(self,m1, m2):
+ M1,N1 = len(m1), len(m1[0])
+ M2,N2 = len(m2), len(m2[0])
+ if not N1==M2:
+ LOGERROR('Mtrx and vect are incompatible: %dx%d, %dx%d', M1,N1, M2,N2)
+ inner = lambda i,j: sum([self.mult(m1[i][k],m2[k][j]) for k in range(N1)])
+ return [ [inner(i,j)%self.prime for j in range(N1)] for i in range(M1) ]
+
+ ################################################################################
+ def mtrxadjoint(self,mtrx):
+ sz = len(mtrx)
+ inner = lambda i,j: self.mtrxdet(self.mtrxrmrowcol(mtrx,i,j))
+ return [[((-1 if (i+j)%2==1 else 1)*inner(j,i))%self.prime for j in range(sz)] for i in range(sz)]
+
+ ################################################################################
+ def mtrxinv(self,mtrx):
+ det = self.mtrxdet(mtrx)
+ adj = self.mtrxadjoint(mtrx)
+ sz = len(mtrx)
+ return [[self.divide(adj[i][j],det) for j in range(sz)] for i in range(sz)]
+
+
+################################################################################
+def SplitSecret(secret, needed, pieces, nbytes=None, use_random_x=False):
+ if not isinstance(secret, basestring):
+ secret = secret.toBinStr()
+
+ if nbytes==None:
+ nbytes = len(secret)
+
+ ff = FiniteField(nbytes)
+ fragments = []
+
+ # Convert secret to an integer
+ a = binary_to_int(SecureBinaryData(secret).toBinStr(),BIGENDIAN)
+ if not a=needed:
+ LOGERROR('You must create more pieces than needed to reconstruct!')
+ raise FiniteFieldError
+
+ if needed==1 or needed>8:
+ LOGERROR('Can split secrets into parts *requiring* at most 8 fragments')
+ LOGERROR('You can break it into as many optional fragments as you want')
+ raise FiniteFieldError
+
+
+ # We deterministically produce the coefficients so that we always use the
+ # same polynomial for a given secret
+ lasthmac = secret[:]
+ othernum = []
+ for i in range(pieces+needed-1):
+ lasthmac = HMAC512(lasthmac, 'splitsecrets')[:nbytes]
+ othernum.append(binary_to_int(lasthmac))
+
+ def poly(x):
+ polyout = ff.mult(a, ff.power(x,needed-1))
+ for i,e in enumerate(range(needed-2,-1,-1)):
+ term = ff.mult(othernum[i], ff.power(x,e))
+ polyout = ff.add(polyout, term)
+ return polyout
+
+ for i in range(pieces):
+ x = othernum[i+2] if use_random_x else i+1
+ fragments.append( [x, poly(x)] )
+
+ secret,a = None,None
+ fragments = [ [int_to_binary(p, nbytes, BIGENDIAN) for p in frag] for frag in fragments]
+ return fragments
+
+
+################################################################################
+def ReconstructSecret(fragments, needed, nbytes):
+
+ ff = FiniteField(nbytes)
+ pairs = fragments[:needed]
+ m = []
+ v = []
+ for x,y in pairs:
+ x = binary_to_int(x, BIGENDIAN)
+ y = binary_to_int(y, BIGENDIAN)
+ m.append([])
+ for i,e in enumerate(range(needed-1,-1,-1)):
+ m[-1].append( ff.power(x,e) )
+ v.append(y)
+
+ minv = ff.mtrxinv(m)
+ outvect = ff.mtrxmultvect(minv,v)
+ return int_to_binary(outvect[0], nbytes, BIGENDIAN)
+
+
+################################################################################
+def createTestingSubsets( fragIndices, M, maxTestCount=20):
+ """
+ Returns (IsRandomized, listOfTuplesOfSizeM)
+ """
+ numIdx = len(fragIndices)
+
+ if M>numIdx:
+ LOGERROR('Insufficent number of fragments')
+ raise KeyDataError
+ elif M==numIdx:
+ LOGINFO('Fragments supplied == needed. One subset to test (%s-of-N)' % M)
+ return ( False, [tuple(fragIndices)] )
+ else:
+ LOGINFO('Test reconstruct %s-of-N, with %s fragments' % (M, numIdx))
+ subs = []
+
+ # Compute the number of possible subsets. This is stable because we
+ # shouldn't ever have more than 12 fragments
+ fact = math.factorial
+ numCombo = fact(numIdx) / ( fact(M) * fact(numIdx-M) )
+
+ if numCombo <= maxTestCount:
+ LOGINFO('Testing all %s combinations...' % numCombo)
+ for x in xrange(2**numIdx):
+ bits = int_to_bitset(x)
+ if not bits.count('1') == M:
+ continue
+
+ subs.append(tuple([fragIndices[i] for i,b in enumerate(bits) if b=='1']))
+
+ return (False, sorted(subs))
+ else:
+ LOGINFO('#Subsets > %s, will need to randomize' % maxTestCount)
+ usedSubsets = set()
+ while len(subs) < maxTestCount:
+ sample = tuple(sorted(random.sample(fragIndices, M)))
+ if not sample in usedSubsets:
+ usedSubsets.add(sample)
+ subs.append(sample)
+
+ return (True, sorted(subs))
+
+
+
+################################################################################
+def testReconstructSecrets(fragMap, M, maxTestCount=20):
+ # If fragMap has X elements, then it will test all X-choose-M subsets of
+ # the fragMap and return the restored secret for each one. If there's more
+ # subsets than maxTestCount, then just do a random sampling of the possible
+ # subsets
+ fragKeys = [k for k in fragMap.iterkeys()]
+ isRandom, subs = createTestingSubsets(fragKeys, M, maxTestCount)
+ nBytes = len(fragMap[fragKeys[0]][1])
+ LOGINFO('Testing %d-byte fragments' % nBytes)
+
+ testResults = []
+ for subset in subs:
+ fragSubset = [fragMap[i][:] for i in subset]
+
+ recon = ReconstructSecret(fragSubset, M, nBytes)
+ testResults.append((subset, recon))
+
+ return isRandom, testResults
+
+
+################################################################################
+def ComputeFragIDBase58(M, wltIDBin):
+ mBin4 = int_to_binary(M, widthBytes=4, endOut=BIGENDIAN)
+ fragBin = hash256(wltIDBin + mBin4)[:4]
+ fragB58 = str(M) + binary_to_base58(fragBin)
+ return fragB58
+
+################################################################################
+def ComputeFragIDLineHex(M, index, wltIDBin, isSecure=False, addSpaces=False):
+ fragID = int_to_hex((128+M) if isSecure else M)
+ fragID += int_to_hex(index+1)
+ fragID += binary_to_hex(wltIDBin)
+
+ if addSpaces:
+ fragID = ' '.join([fragID[i*4:(i+1)*4] for i in range(4)])
+
+ return fragID
+
+
+################################################################################
+def ReadFragIDLineBin(binLine):
+ doMask = binary_to_int(binLine[0]) > 127
+ M = binary_to_int(binLine[0]) & 0x7f
+ fnum = binary_to_int(binLine[1])
+ wltID = binLine[2:]
+
+ idBase58 = ComputeFragIDBase58(M, wltID) + '-#' + str(fnum)
+ return (M, fnum, wltID, doMask, idBase58)
+
+
+################################################################################
+def ReadFragIDLineHex(hexLine):
+ return ReadFragIDLineBin( hex_to_binary(hexLine.strip().replace(' ','')))
+
+# END FINITE FIELD OPERATIONS
+################################################################################
+################################################################################
+
+
+
+
+
+################################################################################
+def checkAddrType(addrBin):
+ """ Gets the network byte of the address. Returns -1 if chksum fails """
+ first21, chk4 = addrBin[:-4], addrBin[-4:]
+ chkBytes = hash256(first21)
+ return addrBin[0] if (chkBytes[:4] == chk4) else -1
+
+################################################################################
+def checkAddrBinValid(addrBin, validPrefixes=None):
+ """
+ Checks whether this address is valid for the given network
+ (set at the top of pybtcengine.py)
+ """
+ if validPrefixes is None:
+ validPrefixes = [ADDRBYTE, P2SHBYTE]
+
+ if not isinstance(validPrefixes, list):
+ validPrefixes = [validPrefixes]
+
+ return (checkAddrType(addrBin) in validPrefixes)
+
+
+
+################################################################################
+def checkAddrStrValid(addrStr):
+ """ Check that a Base58 address-string is valid on this network """
+ return checkAddrBinValid(base58_to_binary(addrStr))
+
+
+################################################################################
+def convertKeyDataToAddress(privKey=None, pubKey=None):
+ """ Returns a hash160 value """
+ if not privKey and not pubKey:
+ raise BadAddressError('No key data supplied for conversion')
+ elif privKey:
+ if isinstance(privKey, str):
+ privKey = SecureBinaryData(privKey)
+
+ if not privKey.getSize()==32:
+ raise BadAddressError('Invalid private key format!')
+ else:
+ pubKey = CryptoECDSA().ComputePublicKey(privKey)
+
+ if isinstance(pubKey,str):
+ pubKey = SecureBinaryData(pubKey)
+ return pubKey.getHash160()
+
+
+
+################################################################################
+def decodeMiniPrivateKey(keyStr):
+ """
+ Converts a 22, 26 or 30-character Base58 mini private key into a
+ 32-byte binary private key.
+ """
+ if not len(keyStr) in (22,26,30):
+ return ''
+
+ keyQ = keyStr + '?'
+ theHash = sha256(keyQ)
+
+ if binary_to_hex(theHash[0]) == '01':
+ raise KeyDataError('PBKDF2-based mini private keys not supported!')
+ elif binary_to_hex(theHash[0]) != '00':
+ raise KeyDataError('Invalid mini private key... double check the entry')
+
+ return sha256(keyStr)
+
+
+################################################################################
+def parsePrivateKeyData(theStr):
+ hexChars = '01234567890abcdef'
+ b58Chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
+
+ hexCount = sum([1 if c in hexChars else 0 for c in theStr.lower()])
+ b58Count = sum([1 if c in b58Chars else 0 for c in theStr])
+ canBeHex = hexCount==len(theStr)
+ canBeB58 = b58Count==len(theStr)
+
+ binEntry = ''
+ keyType = ''
+ isMini = False
+ if canBeB58 and not canBeHex:
+ if len(theStr) in (22, 30):
+ # Mini-private key format!
+ try:
+ binEntry = decodeMiniPrivateKey(theStr)
+ except KeyDataError:
+ raise BadAddressError('Invalid mini-private key string')
+ keyType = 'Mini Private Key Format'
+ isMini = True
+ elif len(theStr) in range(48,53):
+ binEntry = base58_to_binary(theStr)
+ keyType = 'Plain Base58'
+ else:
+ raise BadAddressError('Unrecognized key data')
+ elif canBeHex:
+ binEntry = hex_to_binary(theStr)
+ keyType = 'Plain Hex'
+ else:
+ raise BadAddressError('Unrecognized key data')
+
+
+ if len(binEntry)==36 or (len(binEntry)==37 and binEntry[0]==PRIVKEYBYTE):
+ if len(binEntry)==36:
+ keydata = binEntry[:32 ]
+ chk = binEntry[ 32:]
+ binEntry = verifyChecksum(keydata, chk)
+ if not isMini:
+ keyType = 'Raw %s with checksum' % keyType.split(' ')[1]
+ else:
+ # Assume leading 0x80 byte, and 4 byte checksum
+ keydata = binEntry[ :1+32 ]
+ chk = binEntry[ 1+32:]
+ binEntry = verifyChecksum(keydata, chk)
+ binEntry = binEntry[1:]
+ if not isMini:
+ keyType = 'Standard %s key with checksum' % keyType.split(' ')[1]
+
+ if binEntry=='':
+ raise InvalidHashError('Private Key checksum failed!')
+ elif len(binEntry) in (33, 37) and binEntry[-1]=='\x01':
+ raise CompressedKeyError('Compressed Public keys not supported!')
+ return binEntry, keyType
+
+
+
+################################################################################
+def encodePrivKeyBase58(privKeyBin):
+ bin33 = PRIVKEYBYTE + privKeyBin
+ chk = computeChecksum(bin33)
+ return binary_to_base58(bin33 + chk)
+
+
+
+URI_VERSION_STR = '1.0'
+
+################################################################################
+def parseBitcoinURI(theStr):
+ """ Takes a URI string, returns the pieces of it, in a dictionary """
+
+ # Start by splitting it into pieces on any separator
+ seplist = ':;?&'
+ for c in seplist:
+ theStr = theStr.replace(c,' ')
+ parts = theStr.split()
+
+ # Now start walking through the parts and get the info out of it
+ if not parts[0] == 'bitcoin':
+ return {}
+
+ uriData = {}
+
+ try:
+ uriData['address'] = parts[1]
+ for p in parts[2:]:
+ if not '=' in p:
+ raise BadURIError('Unrecognized URI field: "%s"'%p)
+
+ # All fields must be "key=value" making it pretty easy to parse
+ key, value = p.split('=')
+
+ # A few
+ if key.lower()=='amount':
+ uriData['amount'] = str2coin(value)
+ elif key.lower() in ('label','message'):
+ uriData[key] = uriPercentToReserved(value)
+ else:
+ uriData[key] = value
+ except:
+ return {}
+
+ return uriData
+
+
+################################################################################
+def uriReservedToPercent(theStr):
+ """
+ Convert from a regular string to a percent-encoded string
+ """
+ #Must replace '%' first, to avoid recursive (and incorrect) replacement!
+ reserved = "%!*'();:@&=+$,/?#[] "
+
+ for c in reserved:
+ theStr = theStr.replace(c, '%%%s' % int_to_hex(ord(c)))
+ return theStr
+
+
+################################################################################
+def uriPercentToReserved(theStr):
+ """
+ This replacement direction is much easier!
+ Convert from a percent-encoded string to a
+ """
+
+ parts = theStr.split('%')
+ if len(parts)>1:
+ for p in parts[1:]:
+ parts[0] += chr( hex_to_int(p[:2]) ) + p[2:]
+ return parts[0][:]
+
+
+################################################################################
+def createBitcoinURI(addr, amt=None, msg=None):
+ uriStr = 'bitcoin:%s' % addr
+ if amt or msg:
+ uriStr += '?'
+
+ if amt:
+ uriStr += 'amount=%s' % coin2str(amt, maxZeros=0).strip()
+
+ if amt and msg:
+ uriStr += '&'
+
+ if msg:
+ uriStr += 'label=%s' % uriReservedToPercent(msg)
+
+ return uriStr
+
+
+################################################################################
+def createSigScriptFromRS(rBin, sBin):
+ # Remove all leading zero-bytes
+ while rBin[0]=='\x00':
+ rBin = rBin[1:]
+ while sBin[0]=='\x00':
+ sBin = sBin[1:]
+
+ if binary_to_int(rBin[0])&128>0: rBin = '\x00'+rBin
+ if binary_to_int(sBin[0])&128>0: sBin = '\x00'+sBin
+ rSize = int_to_binary(len(rBin))
+ sSize = int_to_binary(len(sBin))
+ rsSize = int_to_binary(len(rBin) + len(sBin) + 4)
+ sigScript = '\x30' + rsSize + \
+ '\x02' + rSize + rBin + \
+ '\x02' + sSize + sBin
+ return sigScript
+
+
+
+
+
+################################################################################
+class PyBackgroundThread(threading.Thread):
+ """
+ Wraps a function in a threading.Thread object which will run
+ that function in a separate thread. Calling self.start() will
+ return immediately, but will start running that function in
+ separate thread. You can check its progress later by using
+ self.isRunning() or self.isFinished(). If the function returns
+ a value, use self.getOutput(). Use self.getElapsedSeconds()
+ to find out how long it took.
+ """
+
+ def __init__(self, *args, **kwargs):
+ threading.Thread.__init__(self)
+
+ self.output = None
+ self.startedAt = UNINITIALIZED
+ self.finishedAt = UNINITIALIZED
+ self.errorThrown = None
+ self.passAsync = None
+ self.setDaemon(True)
+
+ if len(args)==0:
+ self.func = lambda: ()
+ else:
+ if not hasattr(args[0], '__call__'):
+ raise TypeError('PyBkgdThread ctor arg1 must be a function')
+ else:
+ self.setThreadFunction(args[0], *args[1:], **kwargs)
+
+ def setThreadFunction(self, thefunc, *args, **kwargs):
+ def funcPartial():
+ return thefunc(*args, **kwargs)
+ self.func = funcPartial
+
+ def setDaemon(self, yesno):
+ if self.isStarted():
+ LOGERROR('Must set daemon property before starting thread')
+ else:
+ super(PyBackgroundThread, self).setDaemon(yesno)
+
+ def isFinished(self):
+ return not (self.finishedAt==UNINITIALIZED)
+
+ def isStarted(self):
+ return not (self.startedAt==UNINITIALIZED)
+
+ def isRunning(self):
+ return (self.isStarted() and not self.isFinished())
+
+ def getElapsedSeconds(self):
+ if not self.isFinished():
+ LOGERROR('Thread is not finished yet!')
+ return None
+ else:
+ return self.finishedAt - self.startedAt
+
+ def getOutput(self):
+ if not self.isFinished():
+ if self.isRunning():
+ LOGERROR('Cannot get output while thread is running')
+ else:
+ LOGERROR('Thread was never .start()ed')
+ return None
+
+ return self.output
+
+ def didThrowError(self):
+ return (self.errorThrown is not None)
+
+ def raiseLastError(self):
+ if self.errorThrown is None:
+ return
+ raise self.errorThrown
+
+ def getErrorType(self):
+ if self.errorThrown is None:
+ return None
+ return type(self.errorThrown)
+
+ def getErrorMsg(self):
+ if self.errorThrown is None:
+ return ''
+ return self.errorThrown.args[0]
+
+
+ def start(self):
+ # The prefunc is blocking. Probably preparing something
+ # that needs to be in place before we start the thread
+ self.startedAt = RightNow()
+ super(PyBackgroundThread, self).start()
+
+ def run(self):
+ # This should not be called manually. Only call start()
+ try:
+ self.output = self.func()
+ except Exception as e:
+ LOGEXCEPT('Error in pybkgdthread: %s', str(e))
+ self.errorThrown = e
+ self.finishedAt = RightNow()
+
+ if not self.passAsync: return
+ if hasattr(self.passAsync, '__call__'):
+ self.passAsync()
+
+ def reset(self):
+ self.output = None
+ self.startedAt = UNINITIALIZED
+ self.finishedAt = UNINITIALIZED
+ self.errorThrown = None
+
+ def restart(self):
+ self.reset()
+ self.start()
+
+
+# Define a decorator that allows the function to be called asynchronously
+def AllowAsync(func):
+ def wrappedFunc(*args, **kwargs):
+ if not 'async' in kwargs or kwargs['async']==False:
+ # Run the function normally
+ if 'async' in kwargs:
+ del kwargs['async']
+ return func(*args, **kwargs)
+ else:
+ # Run the function as a background thread
+ passAsync = kwargs['async']
+ del kwargs['async']
+
+ thr = PyBackgroundThread(func, *args, **kwargs)
+ thr.passAsync = passAsync
+ thr.start()
+ return thr
+
+ return wrappedFunc
+
+
+def emptyFunc(*args, **kwargs):
+ return
+
+
+def EstimateCumulativeBlockchainSize(blkNum):
+ # I tried to make a "static" variable here so that
+ # the string wouldn't be parsed on every call, but
+ # I botched that, somehow.
+ #
+ # It doesn't *have to* be fast, but why not?
+ # Oh well..
+ blksizefile = """
+ 0 285
+ 20160 4496226
+ 40320 9329049
+ 60480 16637208
+ 80640 31572990
+ 82656 33260320
+ 84672 35330575
+ 86688 36815335
+ 88704 38386205
+ 100800 60605119
+ 102816 64795352
+ 104832 68697265
+ 108864 79339447
+ 112896 92608525
+ 116928 116560952
+ 120960 140607929
+ 124992 170059586
+ 129024 217718109
+ 133056 303977266
+ 137088 405836779
+ 141120 500934468
+ 145152 593217668
+ 149184 673064617
+ 153216 745173386
+ 157248 816675650
+ 161280 886105443
+ 165312 970660768
+ 169344 1058290613
+ 173376 1140721593
+ 177408 1240616018
+ 179424 1306862029
+ 181440 1463634913
+ 183456 1639027360
+ 185472 1868851317
+ 187488 2019397056
+ 189504 2173291204
+ 191520 2352873908
+ 193536 2530862533
+ 195552 2744361593
+ 197568 2936684028
+ 199584 3115432617
+ 201600 3282437367
+ 203616 3490737816
+ 205632 3669806064
+ 207648 3848901149
+ 209664 4064972247
+ 211680 4278148686
+ 213696 4557787597
+ 215712 4786120879
+ 217728 5111707340
+ 219744 5419128115
+ 221760 5733907456
+ 223776 6053668460
+ 225792 6407870776
+ 227808 6652067986
+ 228534 6778529822
+ 257568 10838081536
+ 259542 11106516992
+ 271827 12968787968
+ 286296 15619588096
+ 290715 16626221056
+ """
+ strList = [line.strip().split() for line in blksizefile.strip().split('\n')]
+ BLK_SIZE_LIST = [[int(x[0]), int(x[1])] for x in strList]
+
+ if blkNum < BLK_SIZE_LIST[-1][0]:
+ # Interpolate
+ bprev,bcurr = None, None
+ for i,blkpair in enumerate(BLK_SIZE_LIST):
+ if blkNum < blkpair[0]:
+ b0,d0 = BLK_SIZE_LIST[i-1]
+ b1,d1 = blkpair
+ ratio = float(blkNum-b0)/float(b1-b0)
+ return int(ratio*d1 + (1-ratio)*d0)
+ raise ValueError('Interpolation failed for %d' % blkNum)
+
+ else:
+ bend, dend = BLK_SIZE_LIST[-1]
+ bend2, dend2 = BLK_SIZE_LIST[-3]
+ rate = float(dend - dend2) / float(bend - bend2) # bytes per block
+ extraOnTop = (blkNum - bend) * rate
+ return dend+extraOnTop
+
+
+
+#############################################################################
+def DeriveChaincodeFromRootKey(sbdPrivKey):
+ return SecureBinaryData( HMAC256( sbdPrivKey.getHash256(), \
+ 'Derive Chaincode from Root Key'))
+
+
+################################################################################
+def HardcodedKeyMaskParams():
+ paramMap = {}
+
+ # Nothing up my sleeve! Need some hardcoded random numbers to use for
+ # encryption IV and salt. Using the first 256 digits of Pi for the
+ # the IV, and first 256 digits of e for the salt (hashed)
+ digits_pi = ( \
+ 'ARMORY_ENCRYPTION_INITIALIZATION_VECTOR_'
+ '1415926535897932384626433832795028841971693993751058209749445923'
+ '0781640628620899862803482534211706798214808651328230664709384460'
+ '9550582231725359408128481117450284102701938521105559644622948954'
+ '9303819644288109756659334461284756482337867831652712019091456485')
+ digits_e = ( \
+ 'ARMORY_KEY_DERIVATION_FUNCTION_SALT_'
+ '7182818284590452353602874713526624977572470936999595749669676277'
+ '2407663035354759457138217852516642742746639193200305992181741359'
+ '6629043572900334295260595630738132328627943490763233829880753195'
+ '2510190115738341879307021540891499348841675092447614606680822648')
+
+ paramMap['IV'] = SecureBinaryData( hash256(digits_pi)[:16] )
+ paramMap['SALT'] = SecureBinaryData( hash256(digits_e) )
+ paramMap['KDFBYTES'] = long(16*MEGABYTE)
+
+ def hardcodeCreateSecurePrintPassphrase(secret):
+ if isinstance(secret, basestring):
+ secret = SecureBinaryData(secret)
+ bin7 = HMAC512(secret.getHash256(), paramMap['SALT'].toBinStr())[:7]
+ out,bin7 = SecureBinaryData(binary_to_base58(bin7 + hash256(bin7)[0])), None
+ return out
+
+ def hardcodeCheckPassphrase(passphrase):
+ if isinstance(passphrase, basestring):
+ pwd = base58_to_binary(passphrase)
+ else:
+ pwd = base58_to_binary(passphrase.toBinStr())
+
+ isgood,pwd = (hash256(pwd[:7])[0] == pwd[-1]), None
+ return isgood
+
+ def hardcodeApplyKdf(secret):
+ if isinstance(secret, basestring):
+ secret = SecureBinaryData(secret)
+ kdf = KdfRomix()
+ kdf.usePrecomputedKdfParams(paramMap['KDFBYTES'], 1, paramMap['SALT'])
+ return kdf.DeriveKey(secret)
+
+ def hardcodeMask(secret, passphrase=None, ekey=None):
+ if not ekey:
+ ekey = hardcodeApplyKdf(passphrase)
+ return CryptoAES().EncryptCBC(secret, ekey, paramMap['IV'])
+
+ def hardcodeUnmask(secret, passphrase=None, ekey=None):
+ if not ekey:
+ ekey = hardcodeApplyKdf(passphrase)
+ return CryptoAES().DecryptCBC(secret, ekey, paramMap['IV'])
+
+ paramMap['FUNC_PWD'] = hardcodeCreateSecurePrintPassphrase
+ paramMap['FUNC_KDF'] = hardcodeApplyKdf
+ paramMap['FUNC_MASK'] = hardcodeMask
+ paramMap['FUNC_UNMASK'] = hardcodeUnmask
+ paramMap['FUNC_CHKPWD'] = hardcodeCheckPassphrase
+ return paramMap
+
+
+
+
+################################################################################
+################################################################################
+class SettingsFile(object):
+ """
+ This class could be replaced by the built-in QSettings in PyQt, except
+ that older versions of PyQt do not support the QSettings (or at least
+ I never figured it out). Easy enough to do it here
+
+ All settings must populated with a simple datatype -- non-simple
+ datatypes should be broken down into pieces that are simple: numbers
+ and strings, or lists/tuples of them.
+
+ Will write all the settings to file. Each line will look like:
+ SingleValueSetting1 | 3824.8
+ SingleValueSetting2 | this is a string
+ Tuple Or List Obj 1 | 12 $ 43 $ 13 $ 33
+ Tuple Or List Obj 2 | str1 $ another str
+ """
+
+ #############################################################################
+ def __init__(self, path=None):
+ self.settingsPath = path
+ self.settingsMap = {}
+ if not path:
+ self.settingsPath = os.path.join(ARMORY_HOME_DIR, 'ArmorySettings.txt')
+
+ LOGINFO('Using settings file: %s', self.settingsPath)
+ if os.path.exists(self.settingsPath):
+ self.loadSettingsFile(path)
+
+
+
+ #############################################################################
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print indstr + 'Settings:'
+ for k,v in self.settingsMap.iteritems():
+ print indstr + indent + k.ljust(15), v
+
+
+ #############################################################################
+ def hasSetting(self, name):
+ return self.settingsMap.has_key(name)
+
+ #############################################################################
+ def set(self, name, value):
+ if isinstance(value, tuple):
+ self.settingsMap[name] = list(value)
+ else:
+ self.settingsMap[name] = value
+ self.writeSettingsFile()
+
+ #############################################################################
+ def extend(self, name, value):
+ """ Adds/converts setting to list, appends value to the end of it """
+ if not self.settingsMap.has_key(name):
+ if isinstance(value, list):
+ self.set(name, value)
+ else:
+ self.set(name, [value])
+ else:
+ origVal = self.get(name, expectList=True)
+ if isinstance(value, list):
+ origVal.extend(value)
+ else:
+ origVal.append(value)
+ self.settingsMap[name] = origVal
+ self.writeSettingsFile()
+
+ #############################################################################
+ def get(self, name, expectList=False):
+ if not self.hasSetting(name) or self.settingsMap[name]=='':
+ return ([] if expectList else '')
+ else:
+ val = self.settingsMap[name]
+ if expectList:
+ if isinstance(val, list):
+ return val
+ else:
+ return [val]
+ else:
+ return val
+
+ #############################################################################
+ def getAllSettings(self):
+ return self.settingsMap
+
+ #############################################################################
+ def getSettingOrSetDefault(self, name, defaultVal, expectList=False):
+ output = defaultVal
+ if self.hasSetting(name):
+ output = self.get(name)
+ else:
+ self.set(name, defaultVal)
+
+ return output
+
+
+
+ #############################################################################
+ def delete(self, name):
+ if self.hasSetting(name):
+ del self.settingsMap[name]
+ self.writeSettingsFile()
+
+ #############################################################################
+ def writeSettingsFile(self, path=None):
+ if not path:
+ path = self.settingsPath
+ f = open(path, 'w')
+ for key,val in self.settingsMap.iteritems():
+ try:
+ # Skip anything that throws an exception
+ valStr = ''
+ if isinstance(val, basestring):
+ valStr = val
+ elif isinstance(val, int) or \
+ isinstance(val, float) or \
+ isinstance(val, long):
+ valStr = str(val)
+ elif isinstance(val, list) or \
+ isinstance(val, tuple):
+ valStr = ' $ '.join([str(v) for v in val])
+ f.write(key.ljust(36))
+ f.write(' | ')
+ f.write(toBytes(valStr))
+ f.write('\n')
+ except:
+ LOGEXCEPT('Invalid entry in SettingsFile... skipping')
+ f.close()
+
+
+ #############################################################################
+ def loadSettingsFile(self, path=None):
+ if not path:
+ path = self.settingsPath
+
+ if not os.path.exists(path):
+ raise FileExistsError('Settings file DNE:' + path)
+
+ f = open(path, 'rb')
+ sdata = f.read()
+ f.close()
+
+ # Automatically convert settings to numeric if possible
+ def castVal(v):
+ v = v.strip()
+ a,b = v.isdigit(), v.replace('.','').isdigit()
+ if a:
+ return int(v)
+ elif b:
+ return float(v)
+ else:
+ if v.lower()=='true':
+ return True
+ elif v.lower()=='false':
+ return False
+ else:
+ return toUnicode(v)
+
+
+ sdata = [line.strip() for line in sdata.split('\n')]
+ for line in sdata:
+ if len(line.strip())==0:
+ continue
+
+ try:
+ key,vals = line.split('|')
+ valList = [castVal(v) for v in vals.split('$')]
+ if len(valList)==1:
+ self.settingsMap[key.strip()] = valList[0]
+ else:
+ self.settingsMap[key.strip()] = valList
+ except:
+ LOGEXCEPT('Invalid setting in %s (skipping...)', path)
+
+
+
+# Random method for creating
+def touchFile(fname):
+ try:
+ os.utime(fname, None)
+ except:
+ f = open(fname, 'a')
+ f.flush()
+ os.fsync(f.fileno())
+ f.close()
+
+
+# NOTE: Had to put in this at the eend so it was after the AllowAsync def
+# This flag takes into account both CLI_OPTIONs, and availability of the
+# BitTornado library (the user can remove the BitTornado dir and/or the
+# torrentDL.py files without breaking Armory, it will simply set this
+# disable flag to true)
+class FakeTDM(object):
+ def __init__(self):
+ self.isRunning = lambda: False
+ self.isStarted = lambda: False
+ self.isFinished = lambda: False
+ self.getTDMState = lambda: 'Disabled'
+ self.removeOldTorrentFile = lambda: None
+
+
+DISABLE_TORRENTDL = CLI_OPTIONS.disableTorrent
+TheTDM = FakeTDM()
+try:
+ import torrentDL
+ TheTDM = torrentDL.TorrentDownloadManager()
+except:
+ LOGEXCEPT('Failed to import torrent downloader')
+ DISABLE_TORRENTDL = True
+
+# We only use BITTORRENT for mainnet
+if USE_TESTNET:
+ DISABLE_TORRENTDL = True
+
+
+
+
diff --git a/armoryengine/BDM.py b/armoryengine/BDM.py
new file mode 100644
index 000000000..b6b5aff03
--- /dev/null
+++ b/armoryengine/BDM.py
@@ -0,0 +1,1458 @@
+################################################################################
+# #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
+# Distributed under the GNU Affero General Public License (AGPL v3) #
+# See LICENSE or http://www.gnu.org/licenses/agpl.html #
+# #
+################################################################################
+import Queue
+import os.path
+import random
+import threading
+import traceback
+
+from armoryengine.ArmoryUtils import *
+from SDM import SatoshiDaemonManager
+from armoryengine.Timer import TimeThisFunction
+import CppBlockUtils as Cpp
+
+BDMcurrentBlock = [UINT32_MAX, 0]
+
+
+def getCurrTimeAndBlock():
+ time0 = long(RightNowUTC())
+ if TheBDM.getBDMState()=='BlockchainReady':
+ if BDMcurrentBlock[1]: return (time0, BDMcurrentBlock[0])
+ else: return (time0, TheBDM.getTopBlockHeight())
+ else:
+ return (time0, UINT32_MAX)
+
+################################################################################
+# Let's create a thread-wrapper for the blockchain utilities. Enable the
+# ability for multi-threaded blockchain scanning -- have a main thread and
+# a blockchain thread: blockchain can scan, and main thread will check back
+# every now and then to see if it's done
+BLOCKCHAINMODE = enum('Offline', \
+ 'Uninitialized', \
+ 'Full', \
+ 'Rescanning', \
+ 'LiteScanning', \
+ 'FullPrune', \
+ 'Lite')
+
+BDMINPUTTYPE = enum('RegisterAddr', \
+ 'ZeroConfTxToInsert', \
+ 'HeaderRequested', \
+ 'TxRequested', \
+ 'BlockRequested', \
+ 'AddrBookRequested', \
+ 'BlockAtHeightRequested', \
+ 'HeaderAtHeightRequested', \
+ 'ForceRebuild', \
+ 'RescanRequested', \
+ 'WalletRecoveryScan', \
+ 'UpdateWallets', \
+ 'ReadBlkUpdate', \
+ 'GoOnlineRequested', \
+ 'GoOfflineRequested', \
+ 'Passthrough', \
+ 'Reset', \
+ 'Shutdown')
+
+################################################################################
+class BlockDataManagerThread(threading.Thread):
+ """
+ A note about this class:
+
+ It was mainly created to allow for asynchronous blockchain scanning,
+ but the act of splitting the BDM into it's own thread meant that ALL
+ communication with the BDM requires thread-safe access. So basically,
+ I had to wrap EVERYTHING. And then make it flexible.
+
+ For this reason, any calls not explicitly related to rescanning will
+ block by default, which could be a long time if the BDM is in the
+ middle of rescanning. For this reason, you are expected to either
+ pass wait=False if you just want to queue the function call and move
+ on in the main thread, or check the BDM state first, to make sure
+ it's not currently scanning and can expect immediate response.
+
+ This makes using the BDM much more complicated. But comes with the
+ benefit of all rescanning being able to happen in the background.
+ If you want to run it like single-threaded, you can use
+ TheBDM.setBlocking(True) and all calls will block. Always (unless
+ you pass wait=False explicitly to one of those calls).
+
+ Any calls that retrieve data from the BDM should block, even if you
+ technically can specify wait=False. This is because the class was
+ not designed to maintain organization of output data asynchronously.
+ So a call like TheBDM.getTopBlockHeader() will always block, and you
+ should check the BDM state if you want to make sure it returns
+ immediately. Since there is only one main thread, There is really no
+ way for a rescan to be started between the time you check the state
+ and the time you call the method (so if you want to access the BDM
+ from multiple threads, this class will need some redesign).
+
+
+ This serves as a layer between the GUI and the Blockchain utilities.
+ If a request is made to mess with the BDM while it is in the
+ middle of scanning, it will queue it for when it's done
+
+ All private methods (those starting with two underscores, like __method),
+ are executed only by the BDM thread. These should never be called
+ externally, and are only safe to run when the BDM is ready to execute
+ them.
+
+ You can use any non-private methods at any time, and if you set wait=True,
+ the main thread will block until that operation is complete. If the BDM
+ is in the middle of a scan, the main thread could block for minutes until
+ the scanning is complete and then it processes your request.
+
+ Therefore, use setBlocking(True) to make sure you always wait/block after
+ every call, if you are interested in simplicity and don't mind waiting.
+
+ Use setBlocking(False) along with wait=False for the appropriate calls
+ to queue up your request and continue the main thread immediately. You
+ can finish up something else, and then come back and check whether the
+ job is finished (usually using TheBDM.getBDMState()=='BlockchainReady')
+
+ Any methods not defined explicitly in this class will "passthrough" the
+ __getattr__() method, which will then call that exact method name on
+ the BDM. All calls block by default. All such calls can also include
+ wait=False if you want to queue it and then continue asynchronously.
+
+
+ Implementation notes:
+
+ Before the multi-threaded BDM, there was wallets, and there was the BDM.
+ We always gave the wallets to the BDM and said "please search the block-
+ chain for relevant transactions". Now that this is asynchronous, the
+ calling thread is going to queue the blockchain scan, and then run off
+ and do other things: which may include address/wallet operations that
+ would collide with the BDM updating it.
+
+ THEREFORE, the BDM now has a single, master wallet. Any address you add
+ to any of your wallets, should be added to the master wallet, too. The
+ PyBtcWallet class does this for you, but if you are using raw BtcWallets
+ (the C++ equivalent), you need to do:
+
+ cppWallet.addScrAddress_1_(Hash160ToScrAddr(newAddr))
+ TheBDM.registerScrAddr(newAddr, isFresh=?)
+
+ This will add the address to the TheBDM.masterCppWallet. Then when you
+ queue up the TheBDM to do a rescan (if necessary), it will update only
+ its own wallet. Luckily, I designed the BDM so that data for addresses
+ in one wallet (the master), can be applied immediately to other/new
+ wallets that have the same addresses.
+
+ If you say isFresh=False, then the BDM will set isDirty=True. This means
+ that a full rescan will have to be performed, and wallet information may
+ not be accurate until it is performed. isFresh=True should be used for
+ addresses/wallets you just created, and thus there's no reason to rescan,
+ because there's no chance they could have any history in the blockchain.
+
+ Tying this all together: if you add an address to a PYTHON wallet, you
+ just add it through an existing call. If you add it with a C++ wallet,
+ you need to explicitly register it with TheBDM, too. Then you need to
+ tell the BDM to do a rescan (if isDirty==True), and then call the method
+ updateWalletsAfterScan(
+ are ready, you can chec
+
+ """
+ #############################################################################
+ def __init__(self, isOffline=False, blocking=False):
+ super(BlockDataManagerThread, self).__init__()
+
+ if isOffline:
+ self.blkMode = BLOCKCHAINMODE.Offline
+ self.prefMode = BLOCKCHAINMODE.Offline
+ else:
+ self.blkMode = BLOCKCHAINMODE.Uninitialized
+ self.prefMode = BLOCKCHAINMODE.Full
+
+ self.bdm = Cpp.BlockDataManager().getBDM()
+
+ # These are for communicating with the master (GUI) thread
+ self.inputQueue = Queue.Queue()
+ self.outputQueue = Queue.Queue()
+
+ # Flags
+ self.startBDM = False
+ self.doShutdown = False
+ self.aboutToRescan = False
+ self.errorOut = 0
+
+ self.setBlocking(blocking)
+
+ self.currentActivity = 'None'
+
+ # Lists of wallets that should be checked after blockchain updates
+ self.pyWltList = [] # these will be python refs
+ self.cppWltList = [] # these will be python refs
+
+ # The BlockDataManager is easier to use if you put all your addresses
+ # into a C++ BtcWallet object, and let it
+ self.masterCppWallet = Cpp.BtcWallet()
+ self.bdm.registerWallet(self.masterCppWallet)
+
+ self.btcdir = BTC_HOME_DIR
+ self.ldbdir = LEVELDB_DIR
+ self.lastPctLoad = 0
+
+
+
+
+ #############################################################################
+ def __getattr__(self, name):
+ '''
+ Anything that is not explicitly defined in this class should
+ passthrough to the C++ BlockDataManager class
+
+ This remaps such calls into "passthrough" requests via the input
+ queue. This makes sure that the requests are processed only when
+ the BDM is ready. Hopefully, this will prevent multi-threaded
+ disasters, such as seg faults due to trying to read memory that is
+ in the process of being updated.
+
+ Specifically, any passthrough call is expected to return output
+ unless you add 'waitForReturn=False' to the arg list. i.e. all
+ calls that "passthrough" will always block unless you explicitly
+ tell it not to.
+ '''
+
+
+ rndID = int(random.uniform(0,100000000))
+ if not hasattr(self.bdm, name):
+ LOGERROR('No BDM method: %s', name)
+ raise AttributeError
+ else:
+ def passthruFunc(*args, **kwargs):
+ #LOGDEBUG('External thread requesting: %s (%d)', name, rndID)
+ waitForReturn = True
+ if len(kwargs)>0 and \
+ kwargs.has_key('wait') and \
+ not kwargs['wait']:
+ waitForReturn = False
+
+
+ # If this was ultimately called from the BDM thread, don't go
+ # through the queue, just do it!
+ if len(kwargs)>0 and \
+ kwargs.has_key('calledFromBDM') and \
+ kwargs['calledFromBDM']:
+ return getattr(self.bdm, name)(*args)
+
+ self.inputQueue.put([BDMINPUTTYPE.Passthrough, rndID, waitForReturn, name] + list(args))
+
+
+ if waitForReturn:
+ try:
+ out = self.outputQueue.get(True, self.mtWaitSec)
+ return out
+ except Queue.Empty:
+ LOGERROR('BDM was not ready for your request! Waited %d sec.' % self.mtWaitSec)
+ LOGERROR(' getattr name: %s', name)
+ LOGERROR('BDM currently doing: %s (%d)', self.currentActivity,self.currentID )
+ LOGERROR('Waiting for completion: ID= %d', rndID)
+ LOGERROR('Direct traceback')
+ traceback.print_stack()
+ self.errorOut += 1
+ LOGEXCEPT('Traceback:')
+ return passthruFunc
+
+
+
+ #############################################################################
+ def waitForOutputIfNecessary(self, expectOutput, rndID=0):
+ # The get() command will block until the thread puts something there.
+ # We don't always expect output, but we use this method to
+ # replace inputQueue.join(). The reason for doing it is so
+ # that we can guarantee that BDM thread knows whether we are waiting
+ # for output or not, and any additional requests put on the inputQueue
+ # won't extend our wait time for this request
+ if expectOutput:
+ try:
+ return self.outputQueue.get(True, self.mtWaitSec)
+ except Queue.Empty:
+ stkOneUp = traceback.extract_stack()[-2]
+ filename,method = stkOneUp[0], stkOneUp[1]
+ LOGERROR('Waiting for BDM output that didn\'t come after %ds.' % self.mtWaitSec)
+ LOGERROR('BDM state is currently: %s', self.getBDMState())
+ LOGERROR('Called from: %s:%d (%d)', os.path.basename(filename), method, rndID)
+ LOGERROR('BDM currently doing: %s (%d)', self.currentActivity, self.currentID)
+ LOGERROR('Direct traceback')
+ traceback.print_stack()
+ LOGEXCEPT('Traceback:')
+ self.errorOut += 1
+ else:
+ return None
+
+
+ #############################################################################
+ def setBlocking(self, doblock=True, newTimeout=MT_WAIT_TIMEOUT_SEC):
+ """
+ If we want TheBDM to behave as a single-threaded app, we need to disable
+ the timeouts so that long operations (such as reading the blockchain) do
+ not crash the process.
+
+ So setting wait=True is NOT identical to setBlocking(True), since using
+ wait=True with blocking=False will break when the timeout has been reached
+ """
+ if doblock:
+ self.alwaysBlock = True
+ self.mtWaitSec = None
+ else:
+ self.alwaysBlock = False
+ self.mtWaitSec = newTimeout
+
+
+ #############################################################################
+ def Reset(self, wait=None):
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.Reset, rndID, expectOutput] )
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+ #############################################################################
+ def getBlkMode(self):
+ return self.blkMode
+
+ #############################################################################
+ def getBDMState(self):
+ if self.blkMode == BLOCKCHAINMODE.Offline:
+ # BDM will not be able to provide any blockchain data, or scan
+ return 'Offline'
+ elif self.blkMode == BLOCKCHAINMODE.Full and not self.aboutToRescan:
+ # The BDM is idle, waiting for things to do
+ return 'BlockchainReady'
+ elif self.blkMode == BLOCKCHAINMODE.LiteScanning and not self.aboutToRescan:
+ # The BDM is doing some processing but it is expected to be done within
+ # 0.1s. For instance, readBlkFileUpdate requires processing, but can be
+ # performed 100/sec. For the outside calling thread, this is not any
+ # different than BlockchainReady.
+ return 'BlockchainReady'
+ elif self.blkMode == BLOCKCHAINMODE.Rescanning or self.aboutToRescan:
+ # BDM is doing a FULL scan of the blockchain, and expected to take
+
+ return 'Scanning'
+ elif self.blkMode == BLOCKCHAINMODE.Uninitialized and not self.aboutToRescan:
+ # BDM wants to be online, but the calling thread never initiated the
+ # loadBlockchain() call. Usually setOnlineMode, registerWallets, then
+ # load the blockchain.
+ return 'Uninitialized'
+ elif self.blkMode == BLOCKCHAINMODE.FullPrune:
+ # NOT IMPLEMENTED
+ return 'FullPrune'
+ elif self.blkMode == BLOCKCHAINMODE.Lite:
+ # NOT IMPLEMENTED
+ return 'Lite'
+ else:
+ return '' % self.blkMode
+
+
+ #############################################################################
+ def predictLoadTime(self):
+ # Apparently we can't read the C++ state while it's scanning,
+ # specifically getLoadProgress* methods. Thus we have to resort
+ # to communicating via files... bleh
+ bfile = os.path.join(ARMORY_HOME_DIR,'blkfiles.txt')
+ if not os.path.exists(bfile):
+ return [-1,-1,-1,-1]
+
+ try:
+ with open(bfile,'r') as f:
+ tmtrx = [line.split() for line in f.readlines() if len(line.strip())>0]
+ phases = [float(row[0]) for row in tmtrx]
+ currPhase = phases[-1]
+ startat = [float(row[1]) for row in tmtrx if float(row[0])==currPhase]
+ sofar = [float(row[2]) for row in tmtrx if float(row[0])==currPhase]
+ total = [float(row[3]) for row in tmtrx if float(row[0])==currPhase]
+ times = [float(row[4]) for row in tmtrx if float(row[0])==currPhase]
+
+ todo = total[0] - startat[0]
+ pct0 = sofar[0] / todo
+ pct1 = sofar[-1] / todo
+ t0,t1 = times[0], times[-1]
+ if (not t1>t0) or todo<0:
+ return [-1,-1,-1,-1]
+ rate = (pct1-pct0) / (t1-t0)
+ tleft = (1-pct1)/rate
+ totalPct = (startat[-1] + sofar[-1]) / total[-1]
+ if not self.lastPctLoad == pct1:
+ LOGINFO('Reading blockchain, pct complete: %0.1f', 100*totalPct)
+ self.lastPctLoad = totalPct
+ return (currPhase,totalPct,rate,tleft)
+ except:
+ raise
+ return [-1,-1,-1,-1]
+
+
+
+
+ #############################################################################
+ def execCleanShutdown(self, wait=True):
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.Shutdown, rndID, expectOutput])
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+ #############################################################################
+ def setSatoshiDir(self, newBtcDir):
+ if not os.path.exists(newBtcDir):
+ LOGERROR('setSatoshiDir: directory does not exist: %s', newBtcDir)
+ return
+
+ if not self.blkMode in (BLOCKCHAINMODE.Offline, BLOCKCHAINMODE.Uninitialized):
+ LOGERROR('Cannot set blockchain/satoshi path after BDM is started')
+ return
+
+ self.btcdir = newBtcDir
+
+ #############################################################################
+ def setLevelDBDir(self, ldbdir):
+
+ if not self.blkMode in (BLOCKCHAINMODE.Offline, BLOCKCHAINMODE.Uninitialized):
+ LOGERROR('Cannot set blockchain/satoshi path after BDM is started')
+ return
+
+ if not os.path.exists(ldbdir):
+ os.makedirs(ldbdir)
+
+ self.ldbdir = ldbdir
+
+
+ #############################################################################
+ def setOnlineMode(self, goOnline=True, wait=None):
+ LOGINFO('Setting online mode: %s (wait=%s)' % (str(goOnline), str(wait)))
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ rndID = int(random.uniform(0,100000000))
+
+ if goOnline:
+ if TheBDM.getBDMState() in ('Offline','Uninitialized'):
+ self.inputQueue.put([BDMINPUTTYPE.GoOnlineRequested, rndID, expectOutput])
+ else:
+ if TheBDM.getBDMState() in ('Scanning','BlockchainReady'):
+ self.inputQueue.put([BDMINPUTTYPE.GoOfflineRequested, rndID, expectOutput])
+
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+ #############################################################################
+ def isScanning(self):
+ return (self.aboutToRescan or self.blkMode==BLOCKCHAINMODE.Rescanning)
+
+
+ #############################################################################
+ def readBlkFileUpdate(self, wait=True):
+ """
+ This method can be blocking... it always has been without a problem,
+ because the block file updates are always fast. But I have to assume
+ that it theoretically *could* take a while. Consider using wait=False
+ if you want it to do its thing and not wait for it (this matters, because
+ you'll want to call TheBDM.updateWalletsAfterScan() when this is
+ finished to make sure that
+ """
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.ReadBlkUpdate, rndID, expectOutput])
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+
+ #############################################################################
+ def isInitialized(self):
+ return self.blkMode==BLOCKCHAINMODE.Full and self.bdm.isInitialized()
+
+
+ #############################################################################
+ def isDirty(self):
+ return self.bdm.isDirty()
+
+
+
+ #############################################################################
+ def rescanBlockchain(self, scanType='AsNeeded', wait=None):
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ self.aboutToRescan = True
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.RescanRequested, rndID, expectOutput, scanType])
+ LOGINFO('Blockchain rescan requested')
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+
+ #############################################################################
+ def updateWalletsAfterScan(self, wait=True):
+ """
+ Be careful with this method: it is asking the BDM thread to update
+ the wallets in the main thread. If you do this with wait=False, you
+ need to avoid any wallet operations in the main thread until it's done.
+ However, this is usually very fast as long as you know the BDM is not
+ in the middle of a rescan, so you might as well set wait=True.
+
+ In fact, I highly recommend you always use wait=True, in order to
+ guarantee thread-safety.
+
+ NOTE: If there are multiple wallet-threads, this might not work. It
+ might require specifying which wallets to update after a scan,
+ so that other threads don't collide with the BDM updating its
+ wallet when called from this thread.
+ """
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.UpdateWallets, rndID, expectOutput])
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+
+ #############################################################################
+ def startWalletRecoveryScan(self, pywlt, wait=None):
+ """
+ A wallet recovery scan may require multiple, independent rescans. This
+ is because we don't know how many addresses to pre-calculate for the
+ initial scan. So, we will calculate the first X addresses in the wallet,
+ do a scan, and then if any addresses have tx history beyond X/2, calculate
+ another X and rescan. This will usually only have to be done once, but
+ may need to be repeated for super-active wallets.
+ (In the future, I may add functionality to sample the gap between address
+ usage, so I can more-intelligently determine when we're at the end...)
+ """
+
+
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ self.aboutToRescan = True
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.WalletRecoveryScan, rndID, expectOutput, pywlt])
+ LOGINFO('Wallet recovery scan requested')
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+
+
+ #############################################################################
+ def __checkBDMReadyToServeData(self):
+ if self.blkMode==BLOCKCHAINMODE.Rescanning:
+ LOGERROR('Requested blockchain data while scanning. Don\'t do this!')
+ LOGERROR('Check self.getBlkModeStr()==BLOCKCHAINMODE.Full before')
+ LOGERROR('making requests! Skipping request')
+ return False
+ if self.blkMode==BLOCKCHAINMODE.Offline:
+ LOGERROR('Requested blockchain data while BDM is in offline mode.')
+ LOGERROR('Please start the BDM using TheBDM.setOnlineMode() before,')
+ LOGERROR('and then wait for it to complete, before requesting data.')
+ return False
+ if not self.bdm.isInitialized():
+ LOGERROR('The BDM thread declares the BDM is ready, but the BDM ')
+ LOGERROR('itself reports that it is not initialized! What is ')
+ LOGERROR('going on...?')
+ return False
+
+
+ return True
+
+ #############################################################################
+ def getTxByHash(self, txHash):
+ """
+ All calls that retrieve blockchain data are blocking calls. You have
+ no choice in the matter!
+ """
+ #if not self.__checkBDMReadyToServeData():
+ #return None
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.TxRequested, rndID, True, txHash])
+
+ try:
+ result = self.outputQueue.get(True, 10)
+ if result==None:
+ LOGERROR('Requested tx does not exist:\n%s', binary_to_hex(txHash))
+ return result
+ except Queue.Empty:
+ LOGERROR('Waited 10s for tx to be returned. Abort')
+ LOGERROR('ID: getTxByHash (%d)', rndID)
+ return None
+ #LOGERROR('Going to block until we get something...')
+ #return self.outputQueue.get(True)
+
+ return None
+
+
+ ############################################################################
+ def getHeaderByHash(self, headHash):
+ """
+ All calls that retrieve blockchain data are blocking calls. You have
+ no choice in the matter!
+ """
+ #if not self.__checkBDMReadyToServeData():
+ #return None
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.HeaderRequested, rndID, True, headHash])
+
+ try:
+ result = self.outputQueue.get(True, 10)
+ if result==None:
+ LOGERROR('Requested header does not exist:\n%s', \
+ binary_to_hex(headHash))
+ return result
+ except Queue.Empty:
+ LOGERROR('Waited 10s for header to be returned. Abort')
+ LOGERROR('ID: getTxByHash (%d)', rndID)
+ #LOGERROR('Going to block until we get something...')
+ #return self.outputQueue.get(True)
+
+ return None
+
+
+ #############################################################################
+ def getBlockByHash(self,headHash):
+ """
+ All calls that retrieve blockchain data are blocking calls. You have
+ no choice in the matter!
+
+ This retrives the full block, not just the header, encoded the same
+ way as it is in the blkXXXX.dat files (including magic bytes and
+ block 4-byte block size)
+ """
+ #if not self.__checkBDMReadyToServeData():
+ #return None
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.BlockRequested, rndID, True, headHash])
+
+ try:
+ result = self.outputQueue.get(True, 10)
+ if result==None:
+ LOGERROR('Requested block does not exist:\n%s', \
+ binary_to_hex(headHash))
+ return result
+ except Queue.Empty:
+ LOGERROR('Waited 10s for block to be returned. Abort')
+ LOGERROR('ID: getTxByHash (%d)', rndID)
+ #LOGERROR('Going to block until we get something...')
+ #return self.outputQueue.get(True)
+
+ return None
+
+
+ #############################################################################
+ def getAddressBook(self, wlt):
+ """
+ Address books are constructed from Blockchain data, which means this
+ must be a blocking method.
+ """
+ rndID = int(random.uniform(0,100000000))
+ if isinstance(wlt, PyBtcWallet):
+ self.inputQueue.put([BDMINPUTTYPE.AddrBookRequested, rndID, True, wlt.cppWallet])
+ elif isinstance(wlt, Cpp.BtcWallet):
+ self.inputQueue.put([BDMINPUTTYPE.AddrBookRequested, rndID, True, wlt])
+
+ try:
+ result = self.outputQueue.get(True, self.mtWaitSec)
+ return result
+ except Queue.Empty:
+ LOGERROR('Waited %ds for addrbook to be returned. Abort' % self.mtWaitSec)
+ LOGERROR('ID: getTxByHash (%d)', rndID)
+ #LOGERROR('Going to block until we get something...')
+ #return self.outputQueue.get(True)
+
+ return None
+
+ #############################################################################
+ def addNewZeroConfTx(self, rawTx, timeRecv, writeToFile, wait=None):
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.ZeroConfTxToInsert, rndID, expectOutput, rawTx, timeRecv])
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+ #############################################################################
+ def registerScrAddr(self, scrAddr, isFresh=False, wait=None):
+ """
+ This is for a generic address: treat it as imported (requires rescan)
+ unless specifically specified otherwise
+ """
+ if isFresh:
+ self.registerNewScrAddr(scrAddr, wait=wait)
+ else:
+ self.registerImportedScrAddr(scrAddr, wait=wait)
+
+
+ #############################################################################
+ def registerNewScrAddr(self, scrAddr, wait=None):
+ """
+ Variable isFresh==True means the address was just [freshly] created,
+ and we need to watch for transactions with it, but we don't need
+ to rescan any blocks
+ """
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.RegisterAddr, rndID, expectOutput, scrAddr, True])
+
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+
+
+ #############################################################################
+ def registerImportedScrAddr(self, scrAddr, \
+ firstTime=UINT32_MAX, \
+ firstBlk=UINT32_MAX, \
+ lastTime=0, \
+ lastBlk=0, wait=None):
+ """
+ TODO: Need to clean up the first/last blk/time variables. Rather,
+ I need to make sure they are maintained and applied intelligently
+ and consistently
+ """
+ expectOutput = False
+ if not wait==False and (self.alwaysBlock or wait==True):
+ expectOutput = True
+
+ rndID = int(random.uniform(0,100000000))
+ self.inputQueue.put([BDMINPUTTYPE.RegisterAddr, rndID, expectOutput, scrAddr, \
+ [firstTime, firstBlk, lastTime, lastBlk]])
+
+ return self.waitForOutputIfNecessary(expectOutput, rndID)
+
+
+ #############################################################################
+ def registerWallet(self, wlt, isFresh=False, wait=None):
+ """
+ Will register a C++ wallet or Python wallet
+ """
+ if isinstance(wlt, PyBtcWallet):
+ scrAddrs = [Hash160ToScrAddr(a.getAddr160()) for a in wlt.getAddrList()]
+
+ if isFresh:
+ for scrad in scrAddrs:
+ self.registerNewScrAddr(scrad, wait=wait)
+ else:
+ for scrad in scrAddrs:
+ self.registerImportedScrAddr(scrad, wait=wait)
+
+ if not wlt in self.pyWltList:
+ self.pyWltList.append(wlt)
+
+ elif isinstance(wlt, Cpp.BtcWallet):
+ naddr = wlt.getNumScrAddr()
+
+ for a in range(naddr):
+ self.registerScrAddr(wlt.getScrAddrObjByIndex(a).getScrAddr(), isFresh, wait=wait)
+
+ if not wlt in self.cppWltList:
+ self.cppWltList.append(wlt)
+ else:
+ LOGERROR('Unrecognized object passed to registerWallet function')
+
+
+
+
+
+ #############################################################################
+ # These bdm_direct methods feel like a hack. They probably are. I need
+ # find an elegant way to get the code normally run outside the BDM thread,
+ # to be able to run inside the BDM thread without using the BDM queue (since
+ # the queue is specifically FOR non-BDM-thread calls). For now, the best
+ # I can do is create non-private versions of these methods that access BDM
+ # methods directly, but should not be used under any circumstances, unless
+ # we know for sure that the BDM ultimately called this method.
+ def registerScrAddr_bdm_direct(self, scrAddr, timeInfo):
+ """
+ Something went awry calling __registerScrAddrNow from the PyBtcWallet
+ code (apparently I don't understand __methods). Use this method to
+ externally bypass the BDM thread queue and register the address
+ immediately.
+
+ THIS METHOD IS UNSAFE UNLESS CALLED FROM A METHOD RUNNING IN THE BDM THREAD
+ This method can be called from a non BDM class, but should only do so if
+ that class method was called by the BDM (thus, no conflicts)
+ """
+ self.__registerScrAddrNow(scrAddr, timeInfo)
+
+
+ #############################################################################
+ def scanBlockchainForTx_bdm_direct(self, cppWlt, startBlk=0, endBlk=UINT32_MAX):
+ """
+ THIS METHOD IS UNSAFE UNLESS CALLED FROM A METHOD RUNNING IN THE BDM THREAD
+ This method can be called from a non BDM class, but should only do so if
+ that class method was called by the BDM (thus, no conflicts)
+ """
+ self.bdm.scanRegisteredTxForWallet(cppWlt, startBlk, endBlk)
+
+ #############################################################################
+ def scanRegisteredTxForWallet_bdm_direct(self, cppWlt, startBlk=0, endBlk=UINT32_MAX):
+ """
+ THIS METHOD IS UNSAFE UNLESS CALLED FROM A METHOD RUNNING IN THE BDM THREAD
+ This method can be called from a non BDM class, but should only do so if
+ that class method was called by the BDM (thus, no conflicts)
+ """
+ self.bdm.scanRegisteredTxForWallet(cppWlt, startBlk, endBlk)
+
+ #############################################################################
+ def getTopBlockHeight_bdm_direct(self):
+ """
+ THIS METHOD IS UNSAFE UNLESS CALLED FROM A METHOD RUNNING IN THE BDM THREAD
+ This method can be called from a non BDM class, but should only do so if
+ that class method was called by the BDM (thus, no conflicts)
+ """
+ return self.bdm.getTopBlockHeight()
+
+
+
+ #############################################################################
+ def getLoadProgress(self):
+ """
+ This method does not actually work! The load progress in bytes is not
+ updated properly while the BDM thread is scanning. It might have to
+ emit this information explicitly in order to be useful.
+ """
+ return (self.bdm.getLoadProgressBytes(), self.bdm.getTotalBlockchainBytes())
+
+
+ #############################################################################
+ def __registerScrAddrNow(self, scrAddr, timeInfo):
+ """
+ Do the registration right now. This should not be called directly
+ outside of this class. This is only called by the BDM thread when
+ any previous scans have been completed
+ """
+
+ if isinstance(timeInfo, bool):
+ isFresh = timeInfo
+ if isFresh:
+ # We claimed to have just created this ScrAddr...(so no rescan needed)
+ self.masterCppWallet.addNewScrAddress_1_(scrAddr)
+ else:
+ self.masterCppWallet.addScrAddress_1_(scrAddr)
+ else:
+ if isinstance(timeInfo, (list,tuple)) and len(timeInfo)==4:
+ self.masterCppWallet.addScrAddress_5_(scrAddr, *timeInfo)
+ else:
+ LOGWARN('Unrecognized time information in register method.')
+ LOGWARN(' Data: %s', str(timeInfo))
+ LOGWARN('Assuming imported key requires full rescan...')
+ self.masterCppWallet.addScrAddress_1_(scrAddr)
+
+
+
+ #############################################################################
+ @TimeThisFunction
+ def __startLoadBlockchain(self):
+ """
+ This should only be called by the threaded BDM, and thus there should
+ never be a conflict.
+ """
+ if self.blkMode == BLOCKCHAINMODE.Rescanning:
+ LOGERROR('Blockchain is already scanning. Was this called already?')
+ return
+ elif self.blkMode == BLOCKCHAINMODE.Full:
+ LOGERROR('Blockchain has already been loaded -- maybe we meant')
+ LOGERROR('to call startRescanBlockchain()...?')
+ return
+ elif not self.blkMode == BLOCKCHAINMODE.Uninitialized:
+ LOGERROR('BDM should be in "Uninitialized" mode before starting ')
+ LOGERROR('the initial scan. If BDM is in offline mode, you should ')
+ LOGERROR('switch it to online-mode, first, then request the scan.')
+ LOGERROR('Continuing with the scan, anyway.')
+
+
+ # Remove "blkfiles.txt" to make sure we get accurate TGO
+ bfile = os.path.join(ARMORY_HOME_DIR,'blkfiles.txt')
+ if os.path.exists(bfile):
+ os.remove(bfile)
+
+ # Check for the existence of the Bitcoin-Qt directory
+ if not os.path.exists(self.btcdir):
+ raise FileExistsError, ('Directory does not exist: %s' % self.btcdir)
+
+ blkdir = os.path.join(self.btcdir, 'blocks')
+ blk1st = os.path.join(blkdir, 'blk00000.dat')
+
+ # ... and its blk000X.dat files
+ if not os.path.exists(blk1st):
+ LOGERROR('Blockchain data not available: %s', blk1st)
+ self.prefMode = BLOCKCHAINMODE.Offline
+ raise FileExistsError, ('Blockchain data not available: %s' % self.blk1st)
+
+ # We have the data, we're ready to go
+ self.blkMode = BLOCKCHAINMODE.Rescanning
+ self.aboutToRescan = False
+
+ armory_homedir = ARMORY_HOME_DIR
+ blockdir = blkdir
+ leveldbdir = self.ldbdir
+
+ if isinstance(ARMORY_HOME_DIR, unicode):
+ armory_homedir = ARMORY_HOME_DIR.encode('utf8')
+ if isinstance(blkdir, unicode):
+ blockdir = blkdir.encode('utf8')
+ if isinstance(self.ldbdir, unicode):
+ leveldbdir = self.ldbdir.encode('utf8')
+
+ LOGINFO('Setting Armory Home Dir: %s' % unicode(armory_homedir))
+ LOGINFO('Setting BlkFile Dir: %s' % unicode(blockdir))
+ LOGINFO('Setting LevelDB Dir: %s' % unicode(leveldbdir))
+
+ self.bdm.SetDatabaseModes(ARMORY_DB_BARE, DB_PRUNE_NONE);
+ self.bdm.SetHomeDirLocation(armory_homedir)
+ self.bdm.SetBlkFileLocation(blockdir)
+ self.bdm.SetLevelDBLocation(leveldbdir)
+ self.bdm.SetBtcNetworkParams( GENESIS_BLOCK_HASH, \
+ GENESIS_TX_HASH, \
+ MAGIC_BYTES)
+
+ # The master wallet contains all addresses of all wallets registered
+ self.bdm.registerWallet(self.masterCppWallet)
+
+ # Now we actually startup the BDM and run with it
+ if CLI_OPTIONS.rebuild:
+ self.bdm.doInitialSyncOnLoad_Rebuild()
+ elif CLI_OPTIONS.rescan:
+ self.bdm.doInitialSyncOnLoad_Rescan()
+ else:
+ self.bdm.doInitialSyncOnLoad()
+
+ # The above op populates the BDM with all relevent tx, but those tx
+ # still need to be scanned to collect the wallet ledger and UTXO sets
+ self.bdm.scanBlockchainForTx(self.masterCppWallet)
+ self.bdm.saveScrAddrHistories()
+
+
+ #############################################################################
+ @TimeThisFunction
+ def __startRescanBlockchain(self, scanType='AsNeeded'):
+ """
+ This should only be called by the threaded BDM, and thus there should
+ never be a conflict.
+
+ If we don't force a full scan, we let TheBDM figure out how much of the
+ chain needs to be rescanned. Which may not be very much. We may
+ force a full scan if we think there's an issue with balances.
+ """
+ if self.blkMode==BLOCKCHAINMODE.Offline:
+ LOGERROR('Blockchain is in offline mode. How can we rescan?')
+ elif self.blkMode==BLOCKCHAINMODE.Uninitialized:
+ LOGERROR('Blockchain was never loaded. Why did we request rescan?')
+
+ # Remove "blkfiles.txt" to make sure we get accurate TGO
+ bfile = os.path.join(ARMORY_HOME_DIR,'blkfiles.txt')
+ if os.path.exists(bfile):
+ os.remove(bfile)
+
+ if not self.isDirty():
+ LOGWARN('It does not look like we need a rescan... doing it anyway')
+
+ if scanType=='AsNeeded':
+ if self.bdm.numBlocksToRescan(self.masterCppWallet) < 144:
+ LOGINFO('Rescan requested, but <1 day\'s worth of block to rescan')
+ self.blkMode = BLOCKCHAINMODE.LiteScanning
+ else:
+ LOGINFO('Rescan requested, and very large scan is necessary')
+ self.blkMode = BLOCKCHAINMODE.Rescanning
+
+
+ self.aboutToRescan = False
+
+ if scanType=='AsNeeded':
+ self.bdm.doSyncIfNeeded()
+ elif scanType=='ForceRescan':
+ LOGINFO('Forcing full rescan of blockchain')
+ self.bdm.doFullRescanRegardlessOfSync()
+ self.blkMode = BLOCKCHAINMODE.Rescanning
+ elif scanType=='ForceRebuild':
+ LOGINFO('Forcing full rebuild of blockchain database')
+ self.bdm.doRebuildDatabases()
+ self.blkMode = BLOCKCHAINMODE.Rescanning
+
+ # missingBlocks = self.bdm.missingBlockHashes()
+
+ self.bdm.scanBlockchainForTx(self.masterCppWallet)
+ self.bdm.saveScrAddrHistories()
+
+
+ #############################################################################
+ @TimeThisFunction
+ def __startRecoveryRescan(self, pywlt):
+ """
+ This should only be called by the threaded BDM, and thus there should
+ never be a conflict.
+
+ In order to work cleanly with the threaded BDM, the search code
+ needed to be integrated directly here, instead of being called
+ from the PyBtcWallet method. Because that method is normally called
+ from outside the BDM thread, but this method is only called from
+ _inside_ the BDM thread. Those calls use the BDM stack which will
+ deadlock waiting for the itself before it can move on...
+
+ Unfortunately, because of this, we have to break a python-class
+ privacy rules: we are accessing the PyBtcWallet object as if this
+ were PyBtcWallet code (accessing properties directly).
+ """
+ if not isinstance(pywlt, PyBtcWallet):
+ LOGERROR('Only python wallets can be passed for recovery scans')
+ return
+
+ if self.blkMode==BLOCKCHAINMODE.Offline:
+ LOGERROR('Blockchain is in offline mode. How can we rescan?')
+ elif self.blkMode==BLOCKCHAINMODE.Uninitialized:
+ LOGERROR('Blockchain was never loaded. Why did we request rescan?')
+
+
+ self.blkMode = BLOCKCHAINMODE.Rescanning
+ self.aboutToRescan = False
+
+ #####
+
+ # Whenever calling PyBtcWallet methods from BDM, set flag
+ prevCalledFromBDM = pywlt.calledFromBDM
+ pywlt.calledFromBDM = True
+
+ # Do the scan...
+ pywlt.freshImportFindHighestIndex()
+
+ # Unset flag when done
+ pywlt.calledFromBDM = prevCalledFromBDM
+
+ #####
+ self.bdm.scanRegisteredTxForWallet(self.masterCppWallet)
+
+
+
+ #############################################################################
+ @TimeThisFunction
+ def __readBlockfileUpdates(self):
+ '''
+ This method can be blocking... it always has been without a problem,
+ because the block file updates are always fast. But I have to assume
+ that it theoretically *could* take a while, and the caller might care.
+ '''
+ if self.blkMode == BLOCKCHAINMODE.Offline:
+ LOGERROR('Can\'t update blockchain in %s mode!', self.getBDMState())
+ return
+
+ self.blkMode = BLOCKCHAINMODE.LiteScanning
+ nblk = self.bdm.readBlkFileUpdate()
+
+ # On new blocks, re-save the histories
+ # ACR: This was removed because the histories get saved already on the
+ # call to TheBDM.updateWalletsAfterScan()
+ #if nblk > 0:
+ #self.bdm.saveScrAddrHistories()
+
+ return nblk
+
+
+ #############################################################################
+ @TimeThisFunction
+ def __updateWalletsAfterScan(self):
+ """
+ This will actually do a scan regardless of whether it is currently
+ "after scan", but it will usually only be requested right after a
+ full rescan
+ """
+
+ numToRescan = 0
+ for pyWlt in self.pyWltList:
+ thisNum = self.bdm.numBlocksToRescan(pyWlt.cppWallet)
+ numToRescan = max(numToRescan, thisNum)
+
+ for cppWlt in self.cppWltList:
+ thisNum = self.bdm.numBlocksToRescan(cppWlt)
+ numToRescan = max(numToRescan, thisNum)
+
+ if numToRescan<144:
+ self.blkMode = BLOCKCHAINMODE.LiteScanning
+ else:
+ self.blkMode = BLOCKCHAINMODE.Rescanning
+
+
+ for pyWlt in self.pyWltList:
+ pyWlt.syncWithBlockchain()
+
+ for cppWlt in self.cppWltList:
+ # The pre-leveldb version of Armory specifically required to call
+ #
+ # scanRegisteredTxForWallet (scan already-collected reg tx)
+ #
+ # instead of
+ #
+ # scanBlockchainForTx (search for reg tx then scan)
+ #
+ # Because the second one will induce a full rescan to find all new
+ # registeredTx, if we recently imported an addr or wallet. If we
+ # imported but decided not to rescan yet, we wan tthe first one,
+ # which only scans the registered tx that are already collected
+ # (including new blocks, but not previous blocks).
+ #
+ # However, with the leveldb stuff only supporting super-node, there
+ # is no rescanning, thus it's safe to always call scanBlockchainForTx,
+ # which grabs everything from the database almost instantaneously.
+ # However we may want to re-examine this after we implement new
+ # database modes of operation
+ #self.bdm.scanRegisteredTxForWallet(cppWlt)
+ self.bdm.scanBlockchainForTx(cppWlt)
+
+ # At this point all wallets should be 100% up-to-date, save the histories
+ # to be reloaded next time
+ self.bdm.saveScrAddrHistories()
+
+
+
+ #############################################################################
+ def __shutdown(self):
+ if not self.blkMode == BLOCKCHAINMODE.Rescanning:
+ self.bdm.saveScrAddrHistories()
+
+ self.__reset()
+ self.blkMode = BLOCKCHAINMODE.Offline
+ self.doShutdown = True
+
+ #############################################################################
+ def __fullRebuild(self):
+ self.bdm.destroyAndResetDatabases()
+ self.__reset()
+ self.__startLoadBlockchain()
+
+ #############################################################################
+ def __reset(self):
+ LOGERROR('Resetting BDM and all wallets')
+ self.bdm.Reset()
+
+ if self.blkMode in (BLOCKCHAINMODE.Full, BLOCKCHAINMODE.Rescanning):
+ # Uninitialized means we want to be online, but haven't loaded yet
+ self.blkMode = BLOCKCHAINMODE.Uninitialized
+ elif not self.blkMode==BLOCKCHAINMODE.Offline:
+ return
+
+ self.bdm.resetRegisteredWallets()
+
+ # Flags
+ self.startBDM = False
+ #self.btcdir = BTC_HOME_DIR
+
+ # Lists of wallets that should be checked after blockchain updates
+ self.pyWltList = [] # these will be python refs
+ self.cppWltList = [] # these will be C++ refs
+
+
+ # The BlockDataManager is easier to use if you put all your addresses
+ # into a C++ BtcWallet object, and let it
+ self.masterCppWallet = Cpp.BtcWallet()
+ self.bdm.registerWallet(self.masterCppWallet)
+
+
+ #############################################################################
+ def __getFullBlock(self, headerHash):
+ headerObj = self.bdm.getHeaderByHash(headerHash)
+ if not headerObj:
+ return None
+
+ rawTxList = []
+ txList = headerObj.getTxRefPtrList()
+ for txref in txList:
+ tx = txref.getTxCopy()
+ rawTxList.append(tx.serialize())
+
+ numTxVarInt = len(rawTxList)
+ blockBytes = 80 + len(numTxVarInt) + sum([len(tx) for tx in rawTxList])
+
+ rawBlock = MAGIC_BYTES
+ rawBlock += int_to_hex(blockBytes, endOut=LITTLEENDIAN, widthBytes=4)
+ rawBlock += headerObj.serialize()
+ rawBlock += packVarInt(numTxVarInt)
+ rawBlock += ''.join(rawTxList)
+ return rawBlock
+
+
+ #############################################################################
+ def getBDMInputName(self, i):
+ for name in dir(BDMINPUTTYPE):
+ if getattr(BDMINPUTTYPE, name)==i:
+ return name
+
+ #############################################################################
+ @TimeThisFunction
+ def createAddressBook(self, cppWlt):
+ return cppWlt.createAddressBook()
+
+ def run(self):
+ """
+ This thread runs in an infinite loop, waiting for things to show up
+ on the self.inputQueue, and then processing those entries. If there
+ are no requests to the BDM from the main thread, this thread will just
+ sit idle (in a CPU-friendly fashion) until something does.
+ """
+
+ while not self.doShutdown:
+ # If there were any errors, we will have that many extra output
+ # entries on the outputQueue. We clear them off so that this
+ # thread can be re-sync'd with the main thread
+ try:
+ while self.errorOut>0:
+ self.outputQueue.get_nowait()
+ self.errorOut -= 1
+ except Queue.Empty:
+ LOGERROR('ErrorOut var over-represented number of errors!')
+ self.errorOut = 0
+
+
+ # Now start the main
+ try:
+ try:
+ inputTuple = self.inputQueue.get_nowait()
+ # If we don't error out, we have stuff to process right now
+ except Queue.Empty:
+ # We only switch to offline/full/uninitialzed when the queue
+ # is empty. After that, then we block in a CPU-friendly way
+ # until data shows up on the Queue
+ if self.prefMode==BLOCKCHAINMODE.Full:
+ if self.bdm.isInitialized():
+ self.blkMode = BLOCKCHAINMODE.Full
+ else:
+ self.blkMode = BLOCKCHAINMODE.Uninitialized
+ else:
+ self.blkMode = BLOCKCHAINMODE.Offline
+
+ self.currentActivity = 'None'
+
+ # Block until something shows up.
+ inputTuple = self.inputQueue.get()
+ except:
+ LOGERROR('Unknown error in BDM thread')
+
+
+
+ # The first list element is always the BDMINPUTTYPE (command)
+ # The second argument is whether the caller will be waiting
+ # for the output: which means even if it's None, we need to
+ # put something on the output queue.
+ cmd = inputTuple[0]
+ rndID = inputTuple[1]
+ expectOutput = inputTuple[2]
+ output = None
+
+ # Some variables that can be queried externally to figure out
+ # what the BDM is currently doing
+ self.currentActivity = self.getBDMInputName(inputTuple[0])
+ self.currentID = rndID
+
+ if cmd == BDMINPUTTYPE.RegisterAddr:
+ scrAddr,timeInfo = inputTuple[3:]
+ self.__registerScrAddrNow(scrAddr, timeInfo)
+
+ elif cmd == BDMINPUTTYPE.ZeroConfTxToInsert:
+ rawTx = inputTuple[3]
+ timeIn = inputTuple[4]
+ if isinstance(rawTx, PyTx):
+ rawTx = rawTx.serialize()
+ self.bdm.addNewZeroConfTx(rawTx, timeIn, True)
+
+ elif cmd == BDMINPUTTYPE.HeaderRequested:
+ headHash = inputTuple[3]
+ rawHeader = self.bdm.getHeaderByHash(headHash)
+ if rawHeader:
+ output = rawHeader
+ else:
+ output = None
+
+ elif cmd == BDMINPUTTYPE.TxRequested:
+ txHash = inputTuple[3]
+ rawTx = self.bdm.getTxByHash(txHash)
+ if rawTx:
+ output = rawTx
+ else:
+ output = None
+
+ elif cmd == BDMINPUTTYPE.BlockRequested:
+ headHash = inputTuple[3]
+ rawBlock = self.__getFullBlock(headHash)
+ if rawBlock:
+ output = rawBlock
+ else:
+ output = None
+ LOGERROR('Requested header does not exist:\n%s', \
+ binary_to_hex(headHash))
+
+ elif cmd == BDMINPUTTYPE.HeaderAtHeightRequested:
+ height = inputTuple[3]
+ rawHeader = self.bdm.getHeaderByHeight(height)
+ if rawHeader:
+ output = rawHeader
+ else:
+ output = None
+ LOGERROR('Requested header does not exist:\nHeight=%s', height)
+
+ elif cmd == BDMINPUTTYPE.BlockAtHeightRequested:
+ height = inputTuple[3]
+ rawBlock = self.__getFullBlock(height)
+ if rawBlock:
+ output = rawBlock
+ else:
+ output = None
+ LOGERROR('Requested header does not exist:\nHeight=%s', height)
+
+ elif cmd == BDMINPUTTYPE.AddrBookRequested:
+ cppWlt = inputTuple[3]
+ output = self.createAddressBook(cppWlt)
+
+ elif cmd == BDMINPUTTYPE.UpdateWallets:
+ self.__updateWalletsAfterScan()
+
+ elif cmd == BDMINPUTTYPE.RescanRequested:
+ scanType = inputTuple[3]
+ if not scanType in ('AsNeeded', 'ForceRescan', 'ForceRebuild'):
+ LOGERROR('Invalid scan type for rescanning: ' + scanType)
+ scanType = 'AsNeeded'
+ self.__startRescanBlockchain(scanType)
+
+ elif cmd == BDMINPUTTYPE.WalletRecoveryScan:
+ LOGINFO('Wallet Recovery Scan Requested')
+ pywlt = inputTuple[3]
+ self.__startRecoveryRescan(pywlt)
+
+ elif cmd == BDMINPUTTYPE.ReadBlkUpdate:
+ output = self.__readBlockfileUpdates()
+
+ elif cmd == BDMINPUTTYPE.Passthrough:
+ # If the caller is waiting, then it is notified by output
+ funcName = inputTuple[3]
+ funcArgs = inputTuple[4:]
+ output = getattr(self.bdm, funcName)(*funcArgs)
+
+ elif cmd == BDMINPUTTYPE.Shutdown:
+ LOGINFO('Shutdown Requested')
+ self.__shutdown()
+
+ elif cmd == BDMINPUTTYPE.ForceRebuild:
+ LOGINFO('Rebuild databases requested')
+ self.__fullRebuild()
+
+ elif cmd == BDMINPUTTYPE.Reset:
+ LOGINFO('Reset Requested')
+ self.__reset()
+
+ elif cmd == BDMINPUTTYPE.GoOnlineRequested:
+ LOGINFO('Go online requested')
+ # This only sets the blkMode to what will later be
+ # recognized as online-requested, or offline
+ self.prefMode = BLOCKCHAINMODE.Full
+ if self.bdm.isInitialized():
+ # The BDM was started and stopped at one point, without
+ # being reset. It can safely pick up from where it
+ # left off
+ self.__readBlockfileUpdates()
+ else:
+ self.blkMode = BLOCKCHAINMODE.Uninitialized
+ self.__startLoadBlockchain()
+
+ elif cmd == BDMINPUTTYPE.GoOfflineRequested:
+ LOGINFO('Go offline requested')
+ self.prefMode = BLOCKCHAINMODE.Offline
+
+ self.inputQueue.task_done()
+ if expectOutput:
+ self.outputQueue.put(output)
+
+ except Queue.Empty:
+ continue
+ except:
+ inputName = self.getBDMInputName(inputTuple[0])
+ LOGERROR('Error processing BDM input')
+ #traceback.print_stack()
+ LOGERROR('Received inputTuple: ' + inputName + ' ' + str(inputTuple))
+ LOGERROR('Error processing ID (%d)', rndID)
+ LOGEXCEPT('ERROR:')
+ if expectOutput:
+ self.outputQueue.put('BDM_REQUEST_ERROR')
+ self.inputQueue.task_done()
+ continue
+
+ LOGINFO('BDM is shutdown.')
+
+
+
+
+
+################################################################################
+# Make TheBDM reference the asyncrhonous BlockDataManager wrapper if we are
+# running
+TheBDM = None
+TheSDM = None
+if CLI_OPTIONS.offline:
+ LOGINFO('Armory loaded in offline-mode. Will not attempt to load ')
+ LOGINFO('blockchain without explicit command to do so.')
+ TheBDM = BlockDataManagerThread(isOffline=True, blocking=False)
+ TheBDM.start()
+
+ # Also create the might-be-needed SatoshiDaemonManager
+ TheSDM = SatoshiDaemonManager()
+
+else:
+ # NOTE: "TheBDM" is sometimes used in the C++ code to reference the
+ # singleton BlockDataManager_LevelDB class object. Here,
+ # "TheBDM" refers to a python BlockDataManagerThead class
+ # object that wraps the C++ version. It implements some of
+ # it's own methods, and then passes through anything it
+ # doesn't recognize to the C++ object.
+ LOGINFO('Using the asynchronous/multi-threaded BlockDataManager.')
+ LOGINFO('Blockchain operations will happen in the background. ')
+ LOGINFO('Devs: check TheBDM.getBDMState() before asking for data.')
+ LOGINFO('Registering addresses during rescans will queue them for ')
+ LOGINFO('inclusion after the current scan is completed.')
+ TheBDM = BlockDataManagerThread(isOffline=False, blocking=False)
+ TheBDM.setDaemon(True)
+ TheBDM.start()
+
+ #if CLI_OPTIONS.doDebug or CLI_OPTIONS.netlog or CLI_OPTIONS.mtdebug:
+ cppLogFile = os.path.join(ARMORY_HOME_DIR, 'armorycpplog.txt')
+
+ cpplf = cppLogFile
+ if getattr(sys, 'frozen', False):
+ cpplf = cppLogFile.encode('utf8')
+
+ TheBDM.StartCppLogging(cpplf, 4)
+ TheBDM.EnableCppLogStdOut()
+
+ # 32-bit linux has an issue with max open files. Rather than modifying
+ # the system, we can tell LevelDB to take it easy with max files to open
+ if OS_LINUX and not SystemSpecs.IsX64:
+ LOGINFO('Lowering max-open-files parameter in LevelDB for 32-bit linux')
+ TheBDM.setMaxOpenFiles(75)
+
+ # Override the above if they explicitly specify it as CLI arg
+ if CLI_OPTIONS.maxOpenFiles > 0:
+ LOGINFO('Overriding max files via command-line arg')
+ TheBDM.setMaxOpenFiles( CLI_OPTIONS.maxOpenFiles )
+
+ #LOGINFO('LevelDB max-open-files is %d', TheBDM.getMaxOpenFiles())
+
+ # Also load the might-be-needed SatoshiDaemonManager
+ TheSDM = SatoshiDaemonManager()
+
+
+# Put the import at the end to avoid circular reference problem
+from armoryengine.PyBtcWallet import PyBtcWallet
+from armoryengine.Transaction import PyTx
+
diff --git a/armoryengine/BinaryPacker.py b/armoryengine/BinaryPacker.py
new file mode 100644
index 000000000..379fcb53c
--- /dev/null
+++ b/armoryengine/BinaryPacker.py
@@ -0,0 +1,84 @@
+################################################################################
+#
+# Copyright (C) 2011-2014, Armory Technologies, Inc.
+# Distributed under the GNU Affero General Public License (AGPL v3)
+# See LICENSE or http://www.gnu.org/licenses/agpl.html
+#
+################################################################################
+#
+# Project: Armory
+# Author: Alan Reiner
+# Website: www.bitcoinarmory.com
+# Orig Date: 20 November, 2011
+#
+################################################################################
+from armoryengine.ArmoryUtils import LITTLEENDIAN, int_to_binary, packVarInt
+UINT8, UINT16, UINT32, UINT64, INT8, INT16, INT32, INT64, VAR_INT, VAR_STR, FLOAT, BINARY_CHUNK = range(12)
+from struct import pack, unpack
+
+class PackerError(Exception): pass
+
+class BinaryPacker(object):
+
+ """
+ Class for helping load binary data into a stream. Typical usage is
+ >> binpack = BinaryPacker()
+ >> bup.put(UINT32, 12)
+ >> bup.put(VAR_INT, 78)
+ >> bup.put(BINARY_CHUNK, '\x9f'*10)
+ >> ...etc...
+ >> result = bup.getBinaryString()
+ """
+ def __init__(self):
+ self.binaryConcat = []
+
+ def getSize(self):
+ return sum([len(a) for a in self.binaryConcat])
+
+ def getBinaryString(self):
+ return ''.join(self.binaryConcat)
+
+ def __str__(self):
+ return self.getBinaryString()
+
+
+ def put(self, varType, theData, width=None, endianness=LITTLEENDIAN):
+ """
+ Need to supply the argument type you are put'ing into the stream.
+ Values of BINARY_CHUNK will automatically detect the size as necessary
+
+ Use width=X to include padding of BINARY_CHUNKs w/ 0x00 bytes
+ """
+ E = endianness
+ if varType == UINT8:
+ self.binaryConcat += int_to_binary(theData, 1, endianness)
+ elif varType == UINT16:
+ self.binaryConcat += int_to_binary(theData, 2, endianness)
+ elif varType == UINT32:
+ self.binaryConcat += int_to_binary(theData, 4, endianness)
+ elif varType == UINT64:
+ self.binaryConcat += int_to_binary(theData, 8, endianness)
+ elif varType == INT8:
+ self.binaryConcat += pack(E+'b', theData)
+ elif varType == INT16:
+ self.binaryConcat += pack(E+'h', theData)
+ elif varType == INT32:
+ self.binaryConcat += pack(E+'i', theData)
+ elif varType == INT64:
+ self.binaryConcat += pack(E+'q', theData)
+ elif varType == VAR_INT:
+ self.binaryConcat += packVarInt(theData)[0]
+ elif varType == VAR_STR:
+ self.binaryConcat += packVarInt(len(theData))[0]
+ self.binaryConcat += theData
+ elif varType == FLOAT:
+ self.binaryConcat += pack(E+'f', theData)
+ elif varType == BINARY_CHUNK:
+ if width==None:
+ self.binaryConcat += theData
+ else:
+ if len(theData)>width:
+ raise PackerError, 'Too much data to fit into fixed width field'
+ self.binaryConcat += theData.ljust(width, '\x00')
+ else:
+ raise PackerError, "Var type not recognized! VarType="+str(varType)
diff --git a/armoryengine/BinaryUnpacker.py b/armoryengine/BinaryUnpacker.py
new file mode 100644
index 000000000..2eac6ea06
--- /dev/null
+++ b/armoryengine/BinaryUnpacker.py
@@ -0,0 +1,130 @@
+################################################################################
+#
+# Copyright (C) 2011-2014, Armory Technologies, Inc.
+# Distributed under the GNU Affero General Public License (AGPL v3)
+# See LICENSE or http://www.gnu.org/licenses/agpl.html
+#
+################################################################################
+#
+# Project: Armory
+# Author: Alan Reiner
+# Website: www.bitcoinarmory.com
+# Orig Date: 20 November, 2011
+#
+################################################################################
+
+
+
+
+################################################################################
+################################################################################
+# Classes for reading and writing large binary objects
+################################################################################
+################################################################################
+from struct import pack, unpack
+from BinaryPacker import UINT8, UINT16, UINT32, UINT64, INT8, INT16, INT32, INT64, VAR_INT, VAR_STR, FLOAT, BINARY_CHUNK
+from armoryengine.ArmoryUtils import LITTLEENDIAN, unpackVarInt, LOGERROR
+
+class UnpackerError(Exception): pass
+
+# Seed this object with binary data, then read in its pieces sequentially
+class BinaryUnpacker(object):
+ """
+ Class for helping unpack binary streams of data. Typical usage is
+ >> bup = BinaryUnpacker(myBinaryData)
+ >> int32 = bup.get(UINT32)
+ >> int64 = bup.get(VAR_INT)
+ >> bytes10 = bup.get(BINARY_CHUNK, 10)
+ >> ...etc...
+ """
+ def __init__(self, binaryStr):
+ self.binaryStr = binaryStr
+ self.pos = 0
+
+ def getSize(self): return len(self.binaryStr)
+ def getRemainingSize(self): return len(self.binaryStr) - self.pos
+ def getBinaryString(self): return self.binaryStr
+ def getRemainingString(self): return self.binaryStr[self.pos:]
+ def append(self, binaryStr): self.binaryStr += binaryStr
+ def advance(self, bytesToAdvance): self.pos += bytesToAdvance
+ def rewind(self, bytesToRewind): self.pos -= bytesToRewind
+ def resetPosition(self, toPos=0): self.pos = toPos
+ def getPosition(self): return self.pos
+
+ def get(self, varType, sz=0, endianness=LITTLEENDIAN):
+ """
+ First argument is the data-type: UINT32, VAR_INT, etc.
+ If BINARY_CHUNK, need to supply a number of bytes to read, as well
+ """
+ def sizeCheck(sz):
+ if self.getRemainingSize() 1:
+ hashes = self.merkleTree[-sz:]
+ mod2 = sz%2
+ for i in range(sz/2):
+ self.merkleTree.append( hash256(hashes[2*i] + hashes[2*i+1]) )
+ if mod2==1:
+ self.merkleTree.append( hash256(hashes[-1] + hashes[-1]) )
+ sz = (sz+1) / 2
+ self.merkleRoot = self.merkleTree[-1]
+ return self.merkleRoot
+
+ def printMerkleTree(self, reverseHash=False, indent=''):
+ print indent + 'Printing Merkle Tree:'
+ if reverseHash:
+ print indent + '(hashes will be reversed, like shown on BlockExplorer.com)'
+ root = self.getMerkleRoot()
+ print indent + 'Merkle Root:', binary_to_hex(root)
+ for h in self.merkleTree:
+ phash = binary_to_hex(h) if not reverseHash else binary_to_hex(h, endOut=BIGENDIAN)
+ print indent + '\t' + phash
+
+
+ def pprint(self, nIndent=0, endian=BIGENDIAN):
+ indstr = indent*nIndent
+ print indstr + 'BlockData:'
+ print indstr + indent + 'MerkleRoot: ', binary_to_hex(self.getMerkleRoot(), endian), \
+ '(BE)' if endian==BIGENDIAN else '(LE)'
+ print indstr + indent + 'NumTx: ', self.numTx
+ for tx in self.txList:
+ tx.pprint(nIndent+1, endian=endian)
+
+
+################################################################################
+################################################################################
+class PyBlock(object):
+ def __init__(self, prevHeader=None, txlist=[]):
+ self.blockHeader = PyBlockHeader()
+ self.blockData = PyBlockData()
+ if prevHeader:
+ self.setPrevHeader(prevHeader)
+ if txlist:
+ self.setTxList(txlist)
+
+ def serialize(self):
+ assert( not self.blockHeader == UNINITIALIZED )
+ binOut = BinaryPacker()
+ binOut.put(BINARY_CHUNK, self.blockHeader.serialize())
+ binOut.put(BINARY_CHUNK, self.blockData.serialize())
+ return binOut.getBinaryString()
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ blkData = toUnpack
+ else:
+ blkData = BinaryUnpacker( toUnpack )
+
+ self.txList = []
+ self.blockHeader = PyBlockHeader().unserialize(blkData)
+ self.blockData = PyBlockData().unserialize(blkData)
+ return self
+
+ def getNumTx(self):
+ return len(self.blockData.txList)
+
+ def getSize(self):
+ return len(self.serialize())
+
+ # Not sure how useful these manual block-construction methods
+ # are. For now, I just need something with non-ridiculous vals
+ def setPrevHeader(self, prevHeader, copyAttr=True):
+ self.blockHeader.prevBlkHash = prevHeader.theHash
+ self.blockHeader.nonce = 0
+ if copyAttr:
+ self.blockHeader.version = prevHeader.version
+ self.blockHeader.timestamp = prevHeader.timestamp+600
+ self.blockHeader.diffBits = prevHeader.diffBits
+
+ def setTxList(self, txlist):
+ self.blockData = PyBlockData(txlist)
+ if not self.blockHeader == UNINITIALIZED:
+ self.blockHeader.merkleRoot = self.blockData.getMerkleRoot()
+
+ def tx(self, idx):
+ return self.blockData.txList[idx]
+
+ def pprint(self, nIndent=0, endian=BIGENDIAN):
+ indstr = indent*nIndent
+ print indstr + 'Block:'
+ self.blockHeader.pprint(nIndent+1, endian=endian)
+ self.blockData.pprint(nIndent+1, endian=endian)
+
diff --git a/armoryengine/CoinSelection.py b/armoryengine/CoinSelection.py
new file mode 100644
index 000000000..a01ffcc36
--- /dev/null
+++ b/armoryengine/CoinSelection.py
@@ -0,0 +1,744 @@
+################################################################################
+# #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
+# Distributed under the GNU Affero General Public License (AGPL v3) #
+# See LICENSE or http://www.gnu.org/licenses/agpl.html #
+# #
+################################################################################
+################################################################################
+################################################################################
+#
+# SelectCoins algorithms
+#
+# The following methods define multiple ways that one could select coins
+# for a given transaction. However, the "best" solution is extremely
+# dependent on the variety of unspent outputs, and also the preferences
+# of the user. Things to take into account when selecting coins:
+#
+# - Number of inputs: If we have a lot of inputs in this transaction
+# from different addresses, then all those addresses
+# have now been linked together. We want to use
+# as few outputs as possible
+#
+# - Tx Fess/Size: The bigger the transaction, in bytes, the more
+# fee we're going to have to pay to the miners
+#
+# - Priority: Low-priority transactions might require higher
+# fees and/or take longer to make it into the
+# blockchain. Priority is the sum of TxOut
+# priorities: (NumConfirm * NumBTC / SizeKB)
+# We especially want to avoid 0-confirmation txs
+#
+# - Output values: In almost every transaction, we must return
+# change to ourselves. This means there will
+# be two outputs, one to the recipient, one to
+# us. We prefer that both outputs be about the
+# same size, so that it's not clear which is the
+# recipient, which is the change. But we don't
+# want to use too many inputs to do this.
+#
+# - Sustainability: We should pick a strategy that tends to leave our
+# wallet containing a variety of TxOuts that are
+# well-suited for future transactions to benefit.
+# For instance, always favoring the single TxOut
+# with a value close to the target, will result
+# in a future wallet full of tiny TxOuts. This
+# guarantees that in the future, we're going to
+# have to do 10+ inputs for a single Tx.
+#
+#
+# The strategy is to execute a half dozen different types of SelectCoins
+# algorithms, each with a different goal in mind. Then we examine each
+# of the results and evaluate a "select-score." Use the one with the
+# best score. In the future, we could make the scoring algorithm based
+# on user preferences. We expect that depending on what the availble
+# list looks like, some of these algorithms could produce perfect results,
+# and in other instances *terrible* results.
+#
+################################################################################
+################################################################################
+import math
+import random
+
+from armoryengine.ArmoryUtils import CheckHash160, binary_to_hex, coin2str, \
+ hash160_to_addrStr, ONE_BTC, CENT, int_to_binary, MIN_RELAY_TX_FEE, MIN_TX_FEE
+from armoryengine.Timer import TimeThisFunction
+from armoryengine.Transaction import *
+
+
+################################################################################
+# These would normally be defined by C++ and fed in, but I've recreated
+# the C++ class here... it's really just a container, anyway
+#
+# TODO: LevelDB upgrade: had to upgrade this class to use arbitrary
+# ScrAddress "notation", even though everything else on the python
+# side expects pure hash160 values. For now, it looks like it can
+# handle arbitrary scripts, but the CheckHash160() calls will
+# (correctly) throw errors if you don't. We can upgrade this in
+# the future.
+class PyUnspentTxOut(object):
+ def __init__(self, scrAddr='', val=-1, numConf=-1):
+ pass
+ #self.scrAddr = scrAddr
+ #self.val = long(val*ONE_BTC)
+ #self.conf = numConf
+ def createFromCppUtxo(self, cppUtxo):
+ self.scrAddr = cppUtxo.getRecipientScrAddr()
+ self.val = cppUtxo.getValue()
+ self.conf = cppUtxo.getNumConfirm()
+ # For now, this will throw errors unless we always use hash160 scraddrs
+ self.binScript = '\x76\xa9\x14' + CheckHash160(self.scrAddr) + '\x88\xac'
+ self.txHash = cppUtxo.getTxHash()
+ self.txOutIndex = cppUtxo.getTxOutIndex()
+ return self
+ def getTxHash(self):
+ return self.txHash
+ def getTxOutIndex(self):
+ return self.txOutIndex
+ def getValue(self):
+ return self.val
+ def getNumConfirm(self):
+ return self.conf
+ def getScript(self):
+ return self.binScript
+ def getRecipientScrAddr(self):
+ return self.scrAddr
+ def getRecipientHash160(self):
+ return CheckHash160(self.scrAddr)
+ def prettyStr(self, indent=''):
+ pstr = [indent]
+ pstr.append(binary_to_hex(self.scrAddr[:8]))
+ pstr.append(coin2str(self.val))
+ pstr.append(str(self.conf).rjust(8,' '))
+ return ' '.join(pstr)
+ def pprint(self, indent=''):
+ print self.prettyStr(indent)
+
+
+################################################################################
+def sumTxOutList(txoutList):
+ return sum([u.getValue() for u in txoutList])
+
+################################################################################
+# This is really just for viewing a TxOut list -- usually for debugging
+def pprintUnspentTxOutList(utxoList, headerLine='Coin Selection: '):
+ totalSum = sum([u.getValue() for u in utxoList])
+ print headerLine, '(Total = %s BTC)' % coin2str(totalSum)
+ print ' ','Owner Address'.ljust(34),
+ print ' ','TxOutValue'.rjust(18),
+ print ' ','NumConf'.rjust(8),
+ print ' ','PriorityFactor'.rjust(16)
+ for utxo in utxoList:
+ a160 = CheckHash160(utxo.getRecipientScrAddr())
+ print ' ',hash160_to_addrStr(a160).ljust(34),
+ print ' ',(coin2str(utxo.getValue()) + ' BTC').rjust(18),
+ print ' ',str(utxo.getNumConfirm()).rjust(8),
+ print ' ', ('%0.2f' % (utxo.getValue()*utxo.getNumConfirm()/(ONE_BTC*144.))).rjust(16)
+
+
+################################################################################
+# Sorting currently implemented in C++, but we implement a different kind, here
+def PySortCoins(unspentTxOutInfo, sortMethod=1):
+ """
+ Here we define a few different ways to sort a list of unspent TxOut objects.
+ Most of them are simple, some are more complex. In particular, the last
+ method (4) tries to be intelligent, by grouping together inputs from the
+ same address.
+
+ The goal is not to do the heavy lifting for SelectCoins... we simply need
+ a few different ways to sort coins so that the SelectCoins algorithms has
+ a variety of different inputs to play with. Each sorting method is useful
+ for some types of unspent-TxOut lists, so as long as we have one good
+ sort, the PyEvalCoinSelect method will pick it out.
+
+ As a precaution we send all the zero-confirmation UTXO's to the back
+ of the list, so that they will only be used if absolutely necessary.
+ """
+ zeroConfirm = []
+
+ if sortMethod==0:
+ priorityFn = lambda a: a.getValue() * a.getNumConfirm()
+ return sorted(unspentTxOutInfo, key=priorityFn, reverse=True)
+ if sortMethod==1:
+ priorityFn = lambda a: (a.getValue() * a.getNumConfirm())**(1/3.)
+ return sorted(unspentTxOutInfo, key=priorityFn, reverse=True)
+ if sortMethod==2:
+ priorityFn = lambda a: (math.log(a.getValue()*a.getNumConfirm()+1)+4)**4
+ return sorted(unspentTxOutInfo, key=priorityFn, reverse=True)
+ if sortMethod==3:
+ priorityFn = lambda a: a.getValue() if a.getNumConfirm()>0 else 0
+ return sorted(unspentTxOutInfo, key=priorityFn, reverse=True)
+ if sortMethod==4:
+ addrMap = {}
+ zeroConfirm = []
+ for utxo in unspentTxOutInfo:
+ if utxo.getNumConfirm() == 0:
+ zeroConfirm.append(utxo)
+ else:
+ addr = script_to_addrStr(utxo.getScript())
+ if not addrMap.has_key(addr):
+ addrMap[addr] = [utxo]
+ else:
+ addrMap[addr].append(utxo)
+
+ priorityUTXO = (lambda a: (a.getNumConfirm()*a.getValue()**0.333))
+ for addr,txoutList in addrMap.iteritems():
+ txoutList.sort(key=priorityUTXO, reverse=True)
+
+ priorityGrp = lambda a: max([priorityUTXO(utxo) for utxo in a])
+ finalSortedList = []
+ for utxo in sorted(addrMap.values(), key=priorityGrp, reverse=True):
+ finalSortedList.extend(utxo)
+
+ finalSortedList.extend(zeroConfirm)
+ return finalSortedList
+ if sortMethod in (5, 6, 7):
+ utxoSorted = PySortCoins(unspentTxOutInfo, 1)
+ # Rotate the top 1,2 or 3 elements to the bottom of the list
+ for i in range(sortMethod-4):
+ utxoSorted.append(utxoSorted[0])
+ del utxoSorted[0]
+ return utxoSorted
+
+ # TODO: Add a semi-random sort method: it will favor putting high-priority
+ # outputs at the front of the list, but will not be deterministic
+ # This should give us some high-fitness variation compared to sorting
+ # uniformly
+ if sortMethod==8:
+ utxosNoZC = filter(lambda a: a.getNumConfirm()!=0, unspentTxOutInfo)
+ random.shuffle(utxosNoZC)
+ utxosNoZC.extend(filter(lambda a: a.getNumConfirm()==0, unspentTxOutInfo))
+ return utxosNoZC
+ if sortMethod==9:
+ utxoSorted = PySortCoins(unspentTxOutInfo, 1)
+ sz = len(filter(lambda a: a.getNumConfirm()!=0, utxoSorted))
+ # swap 1/3 of the values at random
+ topsz = int(min(max(round(sz/3), 5), sz))
+ for i in range(topsz):
+ pick1 = int(random.uniform(0,topsz))
+ pick2 = int(random.uniform(0,sz-topsz))
+ utxoSorted[pick1], utxoSorted[pick2] = utxoSorted[pick2], utxoSorted[pick1]
+ return utxoSorted
+
+
+
+
+################################################################################
+# Now we try half a dozen different selection algorithms
+################################################################################
+
+
+
+################################################################################
+def PySelectCoins_SingleInput_SingleValue( \
+ unspentTxOutInfo, targetOutVal, minFee=0):
+ """
+ This method should usually be called with a small number added to target val
+ so that a tx can be constructed that has room for user to add some extra fee
+ if necessary.
+
+ However, we must also try calling it with the exact value, in case the user
+ is trying to spend exactly their remaining balance.
+ """
+ target = targetOutVal + minFee
+ bestMatchVal = 2**64
+ bestMatchUtxo = None
+ for utxo in unspentTxOutInfo:
+ if target <= utxo.getValue() < bestMatchVal:
+ bestMatchVal = utxo.getValue()
+ bestMatchUtxo = utxo
+
+ closeness = bestMatchVal - target
+ if 0 < closeness <= CENT:
+ # If we're going to have a change output, make sure it's above CENT
+ # to avoid a mandatory fee
+ try2Val = 2**64
+ try2Utxo = None
+ for utxo in unspentTxOutInfo:
+ if target+CENT < utxo.getValue() < try2Val:
+ try2Val = utxo.getValue()
+ try2Val = utxo
+ if not try2Utxo==None:
+ bestMatchUtxo = try2Utxo
+
+
+ if bestMatchUtxo==None:
+ return []
+ else:
+ return [bestMatchUtxo]
+
+################################################################################
+def PySelectCoins_MultiInput_SingleValue( \
+ unspentTxOutInfo, targetOutVal, minFee=0):
+ """
+ This method should usually be called with a small number added to target val
+ so that a tx can be constructed that has room for user to add some extra fee
+ if necessary.
+
+ However, we must also try calling it with the exact value, in case the user
+ is trying to spend exactly their remaining balance.
+ """
+ target = targetOutVal + minFee
+ outList = []
+ sumVal = 0
+ for utxo in unspentTxOutInfo:
+ sumVal += utxo.getValue()
+ outList.append(utxo)
+ if sumVal>=target:
+ break
+
+ return outList
+
+
+
+################################################################################
+def PySelectCoins_SingleInput_DoubleValue( \
+ unspentTxOutInfo, targetOutVal, minFee=0):
+ """
+ We will look for a single input that is within 30% of the target
+ In case the tx value is tiny rel to the fee: the minTarget calc
+ may fail to exceed the actual tx size needed, so we add an extra
+
+ We restrain the search to 25%. If there is no one output in this
+ range, then we will return nothing, and the SingleInput_SingleValue
+ method might return a usable result
+ """
+ idealTarget = 2*targetOutVal + minFee
+
+ # check to make sure we're accumulating enough
+ minTarget = long(0.75 * idealTarget)
+ minTarget = max(minTarget, targetOutVal+minFee)
+ maxTarget = long(1.25 * idealTarget)
+
+ if sum([u.getValue() for u in unspentTxOutInfo]) < minTarget:
+ return []
+
+ bestMatch = 2**64-1
+ bestUTXO = None
+ for txout in unspentTxOutInfo:
+ if minTarget <= txout.getValue() <= maxTarget:
+ if abs(txout.getValue()-idealTarget) < bestMatch:
+ bestMatch = abs(txout.getValue()-idealTarget)
+ bestUTXO = txout
+
+ if bestUTXO==None:
+ return []
+ else:
+ return [bestUTXO]
+
+################################################################################
+def PySelectCoins_MultiInput_DoubleValue( \
+ unspentTxOutInfo, targetOutVal, minFee=0):
+
+ idealTarget = 2.0 * targetOutVal
+ minTarget = long(0.80 * idealTarget)
+ minTarget = max(minTarget, targetOutVal+minFee)
+ if sum([u.getValue() for u in unspentTxOutInfo]) < minTarget:
+ return []
+
+ outList = []
+ lastDiff = 2**64-1
+ sumVal = 0
+ for utxo in unspentTxOutInfo:
+ sumVal += utxo.getValue()
+ outList.append(utxo)
+ currDiff = abs(sumVal - idealTarget)
+ # should switch from decreasing to increasing when best match
+ if sumVal>=minTarget and currDiff>lastDiff:
+ del outList[-1]
+ break
+ lastDiff = currDiff
+
+ return outList
+
+
+
+
+################################################################################
+def getSelectCoinsScores(utxoSelectList, targetOutVal, minFee):
+ """
+ Define a metric for scoring the output of SelectCoints. The output of
+ this method is a tuple of scores which identify a few different factors
+ of a txOut selection that users might care about in a selectCoins algorithm.
+
+ This method only returns an absolute score, usually between 0 and 1 for
+ each factor. It is up to the person calling this method to decide how
+ much "weight" they want to give each one. You could even use the scores
+ as multiplicative factors if you wanted, though they were designed with
+ the following equation in mind: finalScore = sum(WEIGHT[i] * SCORE[i])
+
+ TODO: I need to recalibrate some of these factors, and modify them to
+ represent more directly what the user would be concerned about --
+ such as PayFeeFactor, AnonymityFactor, etc. The information is
+ indirectly available with the current set of factors here
+ """
+
+ # Need to calculate how much the change will be returned to sender on this tx
+ totalIn = sum([utxo.getValue() for utxo in utxoSelectList])
+ totalChange = totalIn - (targetOutVal+minFee)
+
+ # Abort if this is an empty list (negative score) or not enough coins
+ if len(utxoSelectList)==0 or totalIn 0)
+ #
+ # On the other hand, if we have 1.832 and 10.00, and the 10.000 is the
+ # change, we don't really care that they're not close, it's still
+ # damned good/deceptive output anonymity (so: only execute
+ # the following block if outAnonFactor <= 1)
+ if 0 < outAnonFactor <= 1 and not totalChange==0:
+ outValDiff = abs(totalChange - targetOutVal)
+ diffPct = (outValDiff / max(totalChange, targetOutVal))
+ if diffPct < 0.20:
+ outAnonFactor *= 1
+ elif diffPct < 0.50:
+ outAnonFactor *= 0.7
+ elif diffPct < 1.0:
+ outAnonFactor *= 0.3
+ else:
+ outAnonFactor = 0
+
+
+ ##################
+ # Tx size: we don't have signatures yet, but we assume that each txin is
+ # about 180 Bytes, TxOuts are 35, and 10 other bytes in the Tx
+ numBytes = 10
+ numBytes += 180 * len(utxoSelectList)
+ numBytes += 35 * (1 if totalChange==0 else 2)
+ txSizeFactor = 0
+ numKb = int(numBytes / 1000)
+ # Will compute size factor after we see this tx priority and AllowFree
+ # results. If the tx qualifies for free, we don't need to penalize
+ # a 3 kB transaction vs one that is 0.5 kB
+
+
+ ##################
+ # Priority: If our priority is above the 1-btc-after-1-day threshold
+ # then we might be allowed a free tx. But, if its priority
+ # isn't much above this thresh, it might take a couple blocks
+ # to be included
+ dPriority = 0
+ anyZeroConfirm = False
+ for utxo in utxoSelectList:
+ if utxo.getNumConfirm() == 0:
+ anyZeroConfirm = True
+ else:
+ dPriority += utxo.getValue() * utxo.getNumConfirm()
+
+ dPriority = dPriority / numBytes
+ priorityThresh = ONE_BTC * 144 / 250
+ if dPriority < priorityThresh:
+ priorityFactor = 0
+ elif dPriority < 10.0*priorityThresh:
+ priorityFactor = 0.7
+ elif dPriority < 100.0*priorityThresh:
+ priorityFactor = 0.9
+ else:
+ priorityFactor = 1.0
+
+
+ ##################
+ # AllowFree: If three conditions are met, then the tx can be sent safely
+ # without a tx fee. Granted, it may not be included in the
+ # current block if the free space is full, but definitely in
+ # the next one
+ isFreeAllowed = 0
+ haveDustOutputs = (0= priorityThresh and \
+ numBytes <= 10000):
+ isFreeAllowed = 1
+
+
+ ##################
+ # Finish size-factor calculation -- if free is allowed, kB is irrelevant
+ txSizeFactor = 0
+ if isFreeAllowed or numKb<1:
+ txSizeFactor = 1
+ else:
+ if numKb < 2:
+ txSizeFactor=0.2
+ elif numKb<3:
+ txSizeFactor=0.1
+ elif numKb<4:
+ txSizeFactor=0
+ else:
+ txSizeFactor=-1 #if this is huge, actually subtract score
+
+ return (isFreeAllowed, noZeroConf, priorityFactor, numAddrFactor, txSizeFactor, outAnonFactor)
+
+
+################################################################################
+# We define default preferences for weightings. Weightings are used to
+# determine the "priorities" for ranking various SelectCoins results
+# By setting the weights to different orders of magnitude, you are essentially
+# defining a sort-order: order by FactorA, then sub-order by FactorB...
+################################################################################
+# TODO: ADJUST WEIGHTING!
+IDX_ALLOWFREE = 0
+IDX_NOZEROCONF = 1
+IDX_PRIORITY = 2
+IDX_NUMADDR = 3
+IDX_TXSIZE = 4
+IDX_OUTANONYM = 5
+WEIGHTS = [None]*6
+WEIGHTS[IDX_ALLOWFREE] = 100000
+WEIGHTS[IDX_NOZEROCONF] = 1000000 # let's avoid zero-conf if possible
+WEIGHTS[IDX_PRIORITY] = 50
+WEIGHTS[IDX_NUMADDR] = 100000
+WEIGHTS[IDX_TXSIZE] = 100
+WEIGHTS[IDX_OUTANONYM] = 30
+
+
+################################################################################
+def PyEvalCoinSelect(utxoSelectList, targetOutVal, minFee, weights=WEIGHTS):
+ """
+ Use a specified set of weightings and sub-scores for a unspentTxOut list,
+ to assign an absolute "fitness" of this particular selection. The goal of
+ getSelectCoinsScores() is to produce weighting-agnostic subscores -- then
+ this method applies the weightings to these scores to get a final answer.
+
+ If list A has a higher score than list B, then it's a better selection for
+ that transaction. If you the two scores don't look right to you, then you
+ probably just need to adjust the weightings to your liking.
+
+ These weightings may become user-configurable in the future -- likely as an
+ option of coin-selection profiles -- such as "max anonymity", "min fee",
+ "balanced", etc).
+ """
+ scores = getSelectCoinsScores(utxoSelectList, targetOutVal, minFee)
+ if scores==-1:
+ return -1
+
+ # Combine all the scores
+ theScore = 0
+ theScore += weights[IDX_NOZEROCONF] * scores[IDX_NOZEROCONF]
+ theScore += weights[IDX_PRIORITY] * scores[IDX_PRIORITY]
+ theScore += weights[IDX_NUMADDR] * scores[IDX_NUMADDR]
+ theScore += weights[IDX_TXSIZE] * scores[IDX_TXSIZE]
+ theScore += weights[IDX_OUTANONYM] * scores[IDX_OUTANONYM]
+
+ # If we're already paying a fee, why bother including this weight?
+ if minFee < 0.0005:
+ theScore += weights[IDX_ALLOWFREE] * scores[IDX_ALLOWFREE]
+
+ return theScore
+
+
+################################################################################
+@TimeThisFunction
+def PySelectCoins(unspentTxOutInfo, targetOutVal, minFee=0, numRand=10, margin=CENT):
+ """
+ Intense algorithm for coin selection: computes about 30 different ways to
+ select coins based on the desired target output and the min tx fee. Then
+ ranks the various solutions and picks the best one
+ """
+
+ if sum([u.getValue() for u in unspentTxOutInfo]) < targetOutVal:
+ return []
+
+ targExact = targetOutVal
+ targMargin = targetOutVal+margin
+
+ selectLists = []
+
+ # Start with the intelligent solutions with different sortings
+ for sortMethod in range(8):
+ diffSortList = PySortCoins(unspentTxOutInfo, sortMethod)
+ selectLists.append(PySelectCoins_SingleInput_SingleValue( diffSortList, targExact, minFee ))
+ selectLists.append(PySelectCoins_MultiInput_SingleValue( diffSortList, targExact, minFee ))
+ selectLists.append(PySelectCoins_SingleInput_SingleValue( diffSortList, targMargin, minFee ))
+ selectLists.append(PySelectCoins_MultiInput_SingleValue( diffSortList, targMargin, minFee ))
+ selectLists.append(PySelectCoins_SingleInput_DoubleValue( diffSortList, targExact, minFee ))
+ selectLists.append(PySelectCoins_MultiInput_DoubleValue( diffSortList, targExact, minFee ))
+ selectLists.append(PySelectCoins_SingleInput_DoubleValue( diffSortList, targMargin, minFee ))
+ selectLists.append(PySelectCoins_MultiInput_DoubleValue( diffSortList, targMargin, minFee ))
+
+ # Throw in a couple random solutions, maybe we get lucky
+ # But first, make a copy before in-place shuffling
+ # NOTE: using list[:] like below, really causes a swig::vector to freak out!
+ #utxos = unspentTxOutInfo[:]
+ #utxos = list(unspentTxOutInfo)
+ for method in range(8,10):
+ for i in range(numRand):
+ utxos = PySortCoins(unspentTxOutInfo, method)
+ selectLists.append(PySelectCoins_MultiInput_SingleValue(utxos, targExact, minFee))
+ selectLists.append(PySelectCoins_MultiInput_DoubleValue(utxos, targExact, minFee))
+ selectLists.append(PySelectCoins_MultiInput_SingleValue(utxos, targMargin, minFee))
+ selectLists.append(PySelectCoins_MultiInput_DoubleValue(utxos, targMargin, minFee))
+
+ # Now we define PyEvalCoinSelect as our sorting metric, and find the best solution
+ scoreFunc = lambda ulist: PyEvalCoinSelect(ulist, targetOutVal, minFee)
+ finalSelection = max(selectLists, key=scoreFunc)
+ SCORES = getSelectCoinsScores(finalSelection, targetOutVal, minFee)
+ if len(finalSelection)==0:
+ return []
+
+ # If we selected a list that has only one or two inputs, and we have
+ # other, tiny, unspent outputs from the same addresses, we should
+ # throw one or two of them in to help clear them out. However, we
+ # only do so if a plethora of conditions exist:
+ #
+ # First, we only consider doing this if the tx has <5 inputs already.
+ # Also, we skip this process if the current tx doesn't have excessive
+ # priority already -- we don't want to risk de-prioritizing a tx for
+ # this purpose.
+ #
+ # Next we sort by LOWEST value, because we really benefit from this most
+ # by clearing out tiny outputs. Along those lines, we don't even do
+ # unless it has low priority -- don't want to take a high-priority utxo
+ # and convert it to one that will be low-priority to start.
+ #
+ # Finally, we shouldn't do this if a high score was assigned to output
+ # anonymity: this extra output may cause a tx with good output anonymity
+ # to no longer possess this property
+ IDEAL_NUM_INPUTS = 5
+ if len(finalSelection) < IDEAL_NUM_INPUTS and \
+ SCORES[IDX_OUTANONYM] == 0:
+
+ utxoToHash160 = lambda a: CheckHash160(a.getRecipientScrAddr())
+ getPriority = lambda a: a.getValue() * a.getNumConfirm()
+ getUtxoID = lambda a: a.getTxHash() + int_to_binary(a.getTxOutIndex())
+
+ alreadyUsedAddr = set( [utxoToHash160(utxo) for utxo in finalSelection] )
+ utxoSmallToLarge = sorted(unspentTxOutInfo, key=getPriority)
+ utxoSmToLgIDs = [getUtxoID(utxo) for utxo in utxoSmallToLarge]
+ finalSelectIDs = [getUtxoID(utxo) for utxo in finalSelection]
+
+ for other in utxoSmallToLarge:
+
+ # Skip it if it is already selected
+ if getUtxoID(other) in finalSelectIDs:
+ continue
+
+ # We only consider UTXOs that won't link any new addresses together
+ if not utxoToHash160(other) in alreadyUsedAddr:
+ continue
+
+ # Avoid zero-conf inputs altogether
+ if other.getNumConfirm() == 0:
+ continue
+
+ # Don't consider any inputs that are high priority already
+ if getPriority(other) > ONE_BTC*144:
+ continue
+
+ finalSelection.append(other)
+ if len(finalSelection)>=IDEAL_NUM_INPUTS:
+ break
+ return finalSelection
+
+
+################################################################################
+def calcMinSuggestedFees(selectCoinsResult, targetOutVal, preSelectedFee,
+ numRecipients):
+ """
+ Returns two fee options: one for relay, one for include-in-block.
+ In general, relay fees are required to get your block propagated
+ (since most nodes are Satoshi clients), but there's no guarantee
+ it will be included in a block -- though I'm sure there's plenty
+ of miners out there will include your tx for sub-standard fee.
+ However, it's virtually guaranteed that a miner will accept a fee
+ equal to the second return value from this method.
+
+ We have to supply the fee that was used in the selection algorithm,
+ so that we can figure out how much change there will be. Without
+ this information, we might accidentally declare a tx to be freeAllow
+ when it actually is not.
+ """
+
+ if len(selectCoinsResult)==0:
+ return [-1,-1]
+
+ paid = targetOutVal + preSelectedFee
+ change = sum([u.getValue() for u in selectCoinsResult]) - paid
+
+ # Calc approx tx size
+ numBytes = 10
+ numBytes += 180 * len(selectCoinsResult)
+ numBytes += 35 * (numRecipients + (1 if change>0 else 0))
+ numKb = int(numBytes / 1000)
+
+ if numKb>10:
+ return [(1+numKb)*MIN_RELAY_TX_FEE, (1+numKb)*MIN_TX_FEE]
+
+ # Compute raw priority of tx
+ prioritySum = 0
+ for utxo in selectCoinsResult:
+ prioritySum += utxo.getValue() * utxo.getNumConfirm()
+ prioritySum = prioritySum / numBytes
+
+ # Any tiny/dust outputs?
+ haveDustOutputs = (0= ONE_BTC * 144 / 250. and \
+ numBytes < 10000):
+ return [0,0]
+
+ # This cannot be a free transaction.
+ minFeeMultiplier = (1 + numKb)
+
+ # At the moment this condition never triggers
+ if minFeeMultiplier<1.0 and haveDustOutputs:
+ minFeeMultiplier = 1.0
+
+
+ return [minFeeMultiplier * MIN_RELAY_TX_FEE, \
+ minFeeMultiplier * MIN_TX_FEE]
+
+
+
+
diff --git a/armoryengine/Decorators.py b/armoryengine/Decorators.py
new file mode 100644
index 000000000..021bd84d8
--- /dev/null
+++ b/armoryengine/Decorators.py
@@ -0,0 +1,63 @@
+################################################################################
+#
+# Copyright (C) 2011-2014, Armory Technologies, Inc.
+# Distributed under the GNU Affero General Public License (AGPL v3)
+# See LICENSE or http://www.gnu.org/licenses/agpl.html
+#
+################################################################################
+#
+# Project: Armory
+# Author: Alan Reiner
+# Website: www.bitcoinarmory.com
+# Orig Date: 20 November, 2011
+#
+################################################################################
+from armoryengine.ArmoryUtils import LOGWARN, LOGERROR
+
+
+import smtplib
+import os
+from email.MIMEMultipart import MIMEMultipart
+from email.MIMEBase import MIMEBase
+from email.MIMEText import MIMEText
+from email.Utils import COMMASPACE, formatdate
+from email import Encoders
+import functools
+
+def send_email(send_from, password, send_to, subject, text):
+ if not type(send_to) == list:
+ raise AssertionError
+ msg = MIMEMultipart()
+ msg['From'] = send_from
+ msg['To'] = COMMASPACE.join(send_to)
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = subject
+ msg.attach(MIMEText(text))
+ mailServer = smtplib.SMTP('smtp.gmail.com', 587)
+ mailServer.ehlo()
+ mailServer.starttls()
+ mailServer.ehlo()
+ mailServer.login(send_from, password)
+ mailServer.sendmail(send_from, send_to, msg.as_string())
+ mailServer.close()
+
+# Following this pattern to allow arguments to be passed to this decorator:
+# http://stackoverflow.com/questions/10176226/how-to-pass-extra-arguments-to-python-decorator
+def EmailOutput(send_from, password, send_to, subject='Armory Output'):
+ def ActualEmailOutputDecorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ ret = func(*args, **kwargs)
+ if ret and send_from and password and send_to:
+ send_email(send_from, password, send_to, subject, ret)
+ return ret
+ return wrapper
+ return ActualEmailOutputDecorator
+
+
+
+
+
+
+
+
diff --git a/armoryengine/Networking.py b/armoryengine/Networking.py
new file mode 100644
index 000000000..2ebe4baab
--- /dev/null
+++ b/armoryengine/Networking.py
@@ -0,0 +1,1075 @@
+################################################################################
+# #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
+# Distributed under the GNU Affero General Public License (AGPL v3) #
+# See LICENSE or http://www.gnu.org/licenses/agpl.html #
+# #
+################################################################################
+################################################################################
+#
+# Armory Networking:
+#
+# This is where I will define all the network operations needed for
+# Armory to operate, using python-twisted. There are "better"
+# ways to do this with "reusable" code structures (i.e. using huge
+# deferred callback chains), but this is not the central "creative"
+# part of the Bitcoin protocol. I need just enough to broadcast tx
+# and receive new tx that aren't in the blockchain yet. Beyond that,
+# I'll just be ignoring everything else.
+#
+################################################################################
+
+import os.path
+import random
+
+from twisted.internet.defer import Deferred
+from twisted.internet.protocol import Protocol, ReconnectingClientFactory
+
+from armoryengine.ArmoryUtils import LOGINFO, RightNow, getVersionString, \
+ BTCARMORY_VERSION, NetworkIDError, LOGERROR, BLOCKCHAINS, CLI_OPTIONS, LOGDEBUG, \
+ binary_to_hex, BIGENDIAN, LOGRAWDATA, ARMORY_HOME_DIR, ConnectionError, \
+ MAGIC_BYTES, hash256, verifyChecksum, NETWORKENDIAN, int_to_bitset, \
+ bitset_to_int, unixTimeToFormatStr
+from armoryengine.BDM import TheBDM
+from armoryengine.BinaryPacker import BinaryPacker, BINARY_CHUNK, UINT32, UINT64, \
+ UINT16, VAR_INT, INT32, INT64, VAR_STR
+from armoryengine.BinaryUnpacker import BinaryUnpacker, UnpackerError
+from armoryengine.Block import PyBlockHeader
+from armoryengine.Transaction import PyTx, indent
+
+
+class ArmoryClient(Protocol):
+ """
+ This is where all the Bitcoin-specific networking stuff goes.
+ In the Twisted way, you need to inject your own chains of
+ callbacks through the factory in order to get this class to do
+ the right thing on the various events.
+ """
+
+ ############################################################
+ def __init__(self):
+ self.recvData = ''
+ self.gotVerack = False
+ self.sentVerack = False
+ self.sentHeadersReq = True
+ self.peer = []
+
+ ############################################################
+ def connectionMade(self):
+ """
+ Construct the initial version message and send it right away.
+ Everything else will be handled by dataReceived.
+ """
+ LOGINFO('Connection initiated. Start handshake')
+ addrTo = str_to_quad(self.transport.getPeer().host)
+ portTo = self.transport.getPeer().port
+ addrFrom = str_to_quad(self.transport.getHost().host)
+ portFrom = self.transport.getHost().port
+
+ self.peer = [addrTo, portTo]
+
+ services = '0'*16
+ msgVersion = PayloadVersion()
+ msgVersion.version = 40000 # TODO: this is what my Satoshi client says
+ msgVersion.services = services
+ msgVersion.time = long(RightNow())
+ msgVersion.addrRecv = PyNetAddress(0, services, addrTo, portTo )
+ msgVersion.addrFrom = PyNetAddress(0, services, addrFrom, portFrom)
+ msgVersion.nonce = random.randint(2**60, 2**64-1)
+ msgVersion.subver = 'Armory:%s' % getVersionString(BTCARMORY_VERSION)
+ msgVersion.height0 = -1
+ self.sendMessage( msgVersion )
+ self.factory.func_madeConnect()
+
+
+ ############################################################
+ def dataReceived(self, data):
+ """
+ Called by the reactor when data is received over the connection.
+ This method will do nothing if we don't receive a full message.
+ """
+
+
+ #print '\n\nData Received:',
+ #pprintHex(binary_to_hex(data), withAddr=False)
+
+ # Put the current buffer into an unpacker, process until empty
+ self.recvData += data
+ buf = BinaryUnpacker(self.recvData)
+
+ messages = []
+ while True:
+ try:
+ # recvData is only modified if the unserialize succeeds
+ # Had a serious issue with references, so I had to convert
+ # messages to strings to guarantee that copies were being
+ # made! (yes, hacky...)
+ thisMsg = PyMessage().unserialize(buf)
+ messages.append( thisMsg.serialize() )
+ self.recvData = buf.getRemainingString()
+ except NetworkIDError:
+ LOGERROR('Message for a different network!' )
+ if BLOCKCHAINS.has_key(self.recvData[:4]):
+ LOGERROR( '(for network: %s)', BLOCKCHAINS[self.recvData[:4]])
+ # Before raising the error, we should've finished reading the msg
+ # So pop it off the front of the buffer
+ self.recvData = buf.getRemainingString()
+ return
+ except UnpackerError:
+ # Expect this error when buffer isn't full enough for a whole msg
+ break
+
+ # We might've gotten here without anything to process -- if so, bail
+ if len(messages)==0:
+ return
+
+
+ # Finally, we have some message to process, let's do it
+ for msgStr in messages:
+ msg = PyMessage().unserialize(msgStr)
+ cmd = msg.cmd
+
+ # Log the message if netlog option
+ if CLI_OPTIONS.netlog:
+ LOGDEBUG( 'DataReceived: %s', msg.payload.command)
+ if msg.payload.command == 'tx':
+ LOGDEBUG('\t' + binary_to_hex(msg.payload.tx.thisHash))
+ elif msg.payload.command == 'block':
+ LOGDEBUG('\t' + msg.payload.header.getHashHex())
+ elif msg.payload.command == 'inv':
+ for inv in msg.payload.invList:
+ LOGDEBUG(('\tBLOCK: ' if inv[0]==2 else '\tTX : ') + \
+ binary_to_hex(inv[1]))
+
+
+ # We process version and verackk only if we haven't yet
+ if cmd=='version' and not self.sentVerack:
+ self.peerInfo = {}
+ self.peerInfo['version'] = msg.payload.version
+ self.peerInfo['subver'] = msg.payload.subver
+ self.peerInfo['time'] = msg.payload.time
+ self.peerInfo['height'] = msg.payload.height0
+ LOGINFO('Received version message from peer:')
+ LOGINFO(' Version: %s', str(self.peerInfo['version']))
+ LOGINFO(' SubVersion: %s', str(self.peerInfo['subver']))
+ LOGINFO(' TimeStamp: %s', str(self.peerInfo['time']))
+ LOGINFO(' StartHeight: %s', str(self.peerInfo['height']))
+ self.sentVerack = True
+ self.sendMessage( PayloadVerack() )
+ elif cmd=='verack':
+ self.gotVerack = True
+ self.factory.handshakeFinished(self)
+ #self.startHeaderDL()
+
+ ####################################################################
+ # Don't process any other messages unless the handshake is finished
+ if self.gotVerack and self.sentVerack:
+ self.processMessage(msg)
+
+
+ ############################################################
+ #def connectionLost(self, reason):
+ #"""
+ #Try to reopen connection (not impl yet)
+ #"""
+ #self.factory.connectionFailed(self, reason)
+
+
+ ############################################################
+ def processMessage(self, msg):
+ # TODO: when I start expanding this class to be more versatile,
+ # I'll consider chaining/setting callbacks from the calling
+ # application. For now, it's pretty static.
+ #msg.payload.pprint(nIndent=2)
+ if msg.cmd=='inv':
+ invobj = msg.payload
+ getdataMsg = PyMessage('getdata')
+ for inv in invobj.invList:
+ if inv[0]==MSG_INV_BLOCK:
+ if self.factory.bdm and (self.factory.bdm.getBDMState()=='Scanning' or \
+ self.factory.bdm.hasHeaderWithHash(inv[1])):
+ continue
+ getdataMsg.payload.invList.append(inv)
+ if inv[0]==MSG_INV_TX:
+ if self.factory.bdm and (self.factory.bdm.getBDMState()=='Scanning' or \
+ self.factory.bdm.hasTxWithHash(inv[1])):
+ continue
+ getdataMsg.payload.invList.append(inv)
+
+ # Now send the full request
+ if self.factory.bdm and not self.factory.bdm.getBDMState()=='Scanning':
+ self.sendMessage(getdataMsg)
+
+ if msg.cmd=='tx':
+ pytx = msg.payload.tx
+ self.factory.func_newTx(pytx)
+ elif msg.cmd=='inv':
+ invList = msg.payload.invList
+ self.factory.func_inv(invList)
+ elif msg.cmd=='block':
+ pyHeader = msg.payload.header
+ pyTxList = msg.payload.txList
+ LOGINFO('Received new block. %s', binary_to_hex(pyHeader.getHash(), BIGENDIAN))
+ self.factory.func_newBlock(pyHeader, pyTxList)
+
+
+
+ ############################################################
+ def startHeaderDL(self):
+ numList = self.createBlockLocatorNumList(self.topBlk)
+ msg = PyMessage('getheaders')
+ msg.payload.version = 1
+ if self.factory.bdm:
+ msg.payload.hashList = [self.factory.bdm.getHeaderByHeight(i).getHash() for i in numList]
+ else:
+ msg.payload.hashList = []
+ msg.payload.hashStop = '\x00'*32
+
+ self.sentHeadersReq = True
+
+
+
+ ############################################################
+ def startBlockDL(self):
+ numList = self.createBlockLocatorNumList(self.topBlk)
+ msg = PyMessage('getblocks')
+ msg.payload.version = 1
+ if self.factory.bdm:
+ msg.payload.hashList = [self.factory.bdm.getHeaderByHeight(i).getHash() for i in numList]
+ else:
+ msg.payload.hashList = []
+ msg.payload.hashStop = '\x00'*32
+
+
+ ############################################################
+ def sendMessage(self, msg):
+ """
+ Must pass in a PyMessage, or one of the Payload types, which
+ will be converted to a PyMessage -- and then sent to the peer.
+ If you have a fully-serialized message (with header) already,
+ easy enough to user PyMessage().unserialize(binMsg)
+ """
+
+ if isinstance(msg, PyMessage):
+ #print '\n\nSending Message:', msg.payload.command.upper()
+ #pprintHex(binary_to_hex(msg.serialize()), indent=' ')
+ if CLI_OPTIONS.netlog:
+ LOGDEBUG( 'SendMessage: %s', msg.payload.command)
+ LOGRAWDATA( msg.serialize() )
+ self.transport.write(msg.serialize())
+ else:
+ msg = PyMessage(payload=msg)
+ #print '\n\nSending Message:', msg.payload.command.upper()
+ #pprintHex(binary_to_hex(msg.serialize()), indent=' ')
+ if CLI_OPTIONS.netlog:
+ LOGDEBUG( 'SendMessage: %s', msg.payload.command)
+ LOGRAWDATA( msg.serialize() )
+ self.transport.write(msg.serialize())
+
+
+ ############################################################
+ def sendTx(self, txObj):
+ """
+ This is a convenience method for the special case of sending
+ a locally-constructed transaction. Pass in either a PyTx
+ object, or a binary serialized tx. It will be converted to
+ a PyMessage and forwarded to our peer(s)
+ """
+ LOGINFO('sendTx called...')
+ if isinstance(txObj, PyMessage):
+ self.sendMessage( txObj )
+ elif isinstance(txObj, PyTx):
+ self.sendMessage( PayloadTx(txObj))
+ elif isinstance(txObj, str):
+ self.sendMessage( PayloadTx(PyTx().unserialize(txObj)) )
+
+
+
+
+
+
+
+
+################################################################################
+################################################################################
+class ArmoryClientFactory(ReconnectingClientFactory):
+ """
+ Spawns Protocol objects used for communicating over the socket. All such
+ objects (ArmoryClients) can share information through this factory.
+ However, at the moment, this class is designed to only create a single
+ connection -- to localhost.
+ """
+ protocol = ArmoryClient
+ lastAlert = 0
+
+ #############################################################################
+ def __init__(self, \
+ bdm,
+ def_handshake=None, \
+ func_loseConnect=(lambda: None), \
+ func_madeConnect=(lambda: None), \
+ func_newTx=(lambda x: None), \
+ func_newBlock=(lambda x,y: None), \
+ func_inv=(lambda x: None)):
+ """
+ Initialize the ReconnectingClientFactory with a deferred for when the handshake
+ finishes: there should be only one handshake, and thus one firing
+ of the handshake-finished callback
+ """
+ self.bdm = bdm
+ self.lastAlert = 0
+ self.deferred_handshake = forceDeferred(def_handshake)
+ self.fileMemPool = os.path.join(ARMORY_HOME_DIR, 'mempool.bin')
+
+ # All other methods will be regular callbacks: we plan to have a very
+ # static set of behaviors for each message type
+ # (NOTE: The logic for what I need right now is so simple, that
+ # I finished implementing it in a few lines of code. When I
+ # need to expand the versatility of this class, I'll start
+ # doing more OOP/deferreds/etc
+ self.func_loseConnect = func_loseConnect
+ self.func_madeConnect = func_madeConnect
+ self.func_newTx = func_newTx
+ self.func_newBlock = func_newBlock
+ self.func_inv = func_inv
+ self.proto = None
+
+
+
+ #############################################################################
+ def addTxToMemoryPool(self, pytx):
+ if self.bdm and not self.bdm.getBDMState()=='Offline':
+ self.bdm.addNewZeroConfTx(pytx.serialize(), long(RightNow()), True)
+
+
+
+ #############################################################################
+ def handshakeFinished(self, protoObj):
+ LOGINFO('Handshake finished, connection open!')
+ self.proto = protoObj
+ if self.deferred_handshake:
+ d, self.deferred_handshake = self.deferred_handshake, None
+ d.callback(protoObj)
+
+
+ #############################################################################
+ def clientConnectionLost(self, connector, reason):
+ LOGERROR('***Connection to Satoshi client LOST! Attempting to reconnect...')
+ self.func_loseConnect()
+ ReconnectingClientFactory.clientConnectionLost(self,connector,reason)
+
+
+ #############################################################################
+ def connectionFailed(self, protoObj, reason):
+ LOGERROR('***Initial connection to Satoshi client failed! Retrying...')
+ ReconnectingClientFactory.connectionFailed(self, protoObj, reason)
+
+
+ #############################################################################
+ def sendTx(self, pytxObj):
+ if self.proto:
+ self.proto.sendTx(pytxObj)
+ else:
+ raise ConnectionError, 'Connection to localhost DNE.'
+
+
+ #############################################################################
+ def sendMessage(self, msgObj):
+ if self.proto:
+ self.proto.sendMessage(msgObj)
+ else:
+ raise ConnectionError, 'Connection to localhost DNE.'
+
+
+
+###############################################################################
+###############################################################################
+#
+# Networking Objects
+#
+###############################################################################
+###############################################################################
+
+def quad_to_str( addrQuad):
+ return '.'.join([str(a) for a in addrQuad])
+
+def quad_to_binary( addrQuad):
+ return ''.join([chr(a) for a in addrQuad])
+
+def binary_to_quad(addrBin):
+ return [ord(a) for a in addrBin]
+
+def str_to_quad(addrBin):
+ return [int(a) for a in addrBin.split('.')]
+
+def str_to_binary(addrBin):
+ """ I should come up with a better name for this -- it's net-addr only """
+ return ''.join([chr(int(a)) for a in addrBin.split('.')])
+
+def parseNetAddress(addrObj):
+ if isinstance(addrObj, str):
+ if len(addrObj)==4:
+ return binary_to_quad(addrObj)
+ else:
+ return str_to_quad(addrObj)
+ # Probably already in the right form
+ return addrObj
+
+
+
+MSG_INV_ERROR = 0
+MSG_INV_TX = 1
+MSG_INV_BLOCK = 2
+
+
+################################################################################
+class PyMessage(object):
+ """
+ All payload objects have a serialize and unserialize method, making them
+ easy to attach to PyMessage objects
+ """
+ def __init__(self, cmd='', payload=None):
+ """
+ Can create a message by the command name, or the payload (or neither)
+ """
+ self.magic = MAGIC_BYTES
+ self.cmd = cmd
+ self.payload = payload
+
+ if payload:
+ self.cmd = payload.command
+ elif cmd:
+ self.payload = PayloadMap[self.cmd]()
+
+
+
+ def serialize(self):
+ bp = BinaryPacker()
+ bp.put(BINARY_CHUNK, self.magic, width= 4)
+ bp.put(BINARY_CHUNK, self.cmd.ljust(12, '\x00'), width=12)
+ payloadBin = self.payload.serialize()
+ bp.put(UINT32, len(payloadBin))
+ bp.put(BINARY_CHUNK, hash256(payloadBin)[:4], width= 4)
+ bp.put(BINARY_CHUNK, payloadBin)
+ return bp.getBinaryString()
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ msgData = toUnpack
+ else:
+ msgData = BinaryUnpacker( toUnpack )
+
+
+ self.magic = msgData.get(BINARY_CHUNK, 4)
+ self.cmd = msgData.get(BINARY_CHUNK, 12).strip('\x00')
+ length = msgData.get(UINT32)
+ chksum = msgData.get(BINARY_CHUNK, 4)
+ payload = msgData.get(BINARY_CHUNK, length)
+ payload = verifyChecksum(payload, chksum)
+
+ self.payload = PayloadMap[self.cmd]().unserialize(payload)
+
+ if self.magic != MAGIC_BYTES:
+ raise NetworkIDError, 'Message has wrong network bytes!'
+ return self
+
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Bitcoin-Network-Message -- ' + self.cmd.upper()
+ print indstr + indent + 'Magic: ' + binary_to_hex(self.magic)
+ print indstr + indent + 'Command: ' + self.cmd
+ print indstr + indent + 'Payload: ' + str(len(self.payload.serialize())) + ' bytes'
+ self.payload.pprint(nIndent+1)
+
+
+################################################################################
+class PyNetAddress(object):
+
+ def __init__(self, time=-1, svcs='0'*16, netaddrObj=[], port=-1):
+ """
+ For our client we will ALWAYS use svcs=0 (NODE_NETWORK=0)
+
+ time is stored as a unix timestamp
+ services is stored as a bitset -- a string of 16 '0's or '1's
+ addrObj is stored as a list/tuple of four UINT8s
+ port is a regular old port number...
+ """
+ self.time = time
+ self.services = svcs
+ self.addrQuad = parseNetAddress(netaddrObj)
+ self.port = port
+
+ def unserialize(self, toUnpack, hasTimeField=True):
+ if isinstance(toUnpack, BinaryUnpacker):
+ addrData = toUnpack
+ else:
+ addrData = BinaryUnpacker( toUnpack )
+
+ if hasTimeField:
+ self.time = addrData.get(UINT32)
+
+ self.services = addrData.get(UINT64)
+ self.addrQuad = addrData.get(BINARY_CHUNK,16)[-4:]
+ self.port = addrData.get(UINT16, endianness=NETWORKENDIAN)
+
+ self.services = int_to_bitset(self.services)
+ self.addrQuad = binary_to_quad(self.addrQuad)
+ return self
+
+ def serialize(self, withTimeField=True):
+ bp = BinaryPacker()
+ if withTimeField:
+ bp.put(UINT32, self.time)
+ bp.put(UINT64, bitset_to_int(self.services))
+ bp.put(BINARY_CHUNK, quad_to_binary(self.addrQuad).rjust(16,'\x00'))
+ bp.put(UINT16, self.port, endianness=NETWORKENDIAN)
+ return bp.getBinaryString()
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Network-Address:',
+ print indstr + indent + 'Time: ' + unixTimeToFormatStr(self.time)
+ print indstr + indent + 'Svcs: ' + self.services
+ print indstr + indent + 'IPv4: ' + quad_to_str(self.addrQuad)
+ print indstr + indent + 'Port: ' + self.port
+
+ def pprintShort(self):
+ print quad_to_str(self.addrQuad) + ':' + str(self.port)
+
+################################################################################
+################################################################################
+class PayloadAddr(object):
+
+ command = 'addr'
+
+ def __init__(self, addrList=[]):
+ self.addrList = addrList # PyNetAddress objs
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ addrData = toUnpack
+ else:
+ addrData = BinaryUnpacker( toUnpack )
+
+ self.addrList = []
+ naddr = addrData.get(VAR_INT)
+ for i in range(naddr):
+ self.addrList.append( PyNetAddress().unserialize(addrData) )
+ return self
+
+ def serialize(self):
+ bp = BinaryPacker()
+ bp.put(VAR_INT, len(self.addrList))
+ for netaddr in self.addrList:
+ bp.put(BINARY_CHUNK, netaddr.serialize(), width=30)
+ return bp.getBinaryString()
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(addr):',
+ for a in self.addrList:
+ a.pprintShort()
+
+ def pprintShort(self):
+ for a in self.addrList:
+ print '[' + quad_to_str(a.pprintShort()) + '], '
+
+################################################################################
+################################################################################
+class PayloadPing(object):
+ """
+ All payload objects have a serialize and unserialize method, making them
+ easy to attach to PyMessage objects
+ """
+ command = 'ping'
+
+ def __init__(self):
+ pass
+
+ def unserialize(self, toUnpack):
+ return self
+
+ def serialize(self):
+ return ''
+
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(ping)'
+
+
+################################################################################
+################################################################################
+class PayloadVersion(object):
+
+ command = 'version'
+
+ def __init__(self, version=0, svcs='0'*16, tstamp=-1, addrRcv=PyNetAddress(), \
+ addrFrm=PyNetAddress(), nonce=-1, sub=-1, height=-1):
+ self.version = version
+ self.services = svcs
+ self.time = tstamp
+ self.addrRecv = addrRcv
+ self.addrFrom = addrFrm
+ self.nonce = nonce
+ self.subver = sub
+ self.height0 = height
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ verData = toUnpack
+ else:
+ verData = BinaryUnpacker( toUnpack )
+
+ self.version = verData.get(INT32)
+ self.services = int_to_bitset(verData.get(UINT64), widthBytes=8)
+ self.time = verData.get(INT64)
+ self.addrRecv = PyNetAddress().unserialize(verData, hasTimeField=False)
+ self.addrFrom = PyNetAddress().unserialize(verData, hasTimeField=False)
+ self.nonce = verData.get(UINT64)
+ self.subver = verData.get(VAR_STR)
+ self.height0 = verData.get(INT32)
+ return self
+
+ def serialize(self):
+ bp = BinaryPacker()
+ bp.put(INT32, self.version )
+ bp.put(UINT64, bitset_to_int(self.services))
+ bp.put(INT64, self.time ) # todo, should this really be int64?
+ bp.put(BINARY_CHUNK, self.addrRecv.serialize(withTimeField=False))
+ bp.put(BINARY_CHUNK, self.addrFrom.serialize(withTimeField=False))
+ bp.put(UINT64, self.nonce )
+ bp.put(VAR_STR, self.subver )
+ bp.put(INT32, self.height0 )
+ return bp.getBinaryString()
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(version):'
+ print indstr + indent + 'Version: ' + str(self.version)
+ print indstr + indent + 'Services: ' + self.services
+ print indstr + indent + 'Time: ' + unixTimeToFormatStr(self.time)
+ print indstr + indent + 'AddrTo: ',; self.addrRecv.pprintShort()
+ print indstr + indent + 'AddrFrom:',; self.addrFrom.pprintShort()
+ print indstr + indent + 'Nonce: ' + str(self.nonce)
+ print indstr + indent + 'SubVer: ', self.subver
+ print indstr + indent + 'StartHgt: ' + str(self.height0)
+
+################################################################################
+class PayloadVerack(object):
+ """
+ All payload objects have a serialize and unserialize method, making them
+ easy to attach to PyMessage objects
+ """
+
+ command = 'verack'
+
+ def __init__(self):
+ pass
+
+ def unserialize(self, toUnpack):
+ return self
+
+ def serialize(self):
+ return ''
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(verack)'
+
+
+
+################################################################################
+################################################################################
+class PayloadInv(object):
+ """
+ All payload objects have a serialize and unserialize method, making them
+ easy to attach to PyMessage objects
+ """
+
+ command = 'inv'
+
+ def __init__(self):
+ self.invList = [] # list of (type, hash) pairs
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ invData = toUnpack
+ else:
+ invData = BinaryUnpacker( toUnpack )
+
+ numInv = invData.get(VAR_INT)
+ for i in range(numInv):
+ invType = invData.get(UINT32)
+ invHash = invData.get(BINARY_CHUNK, 32)
+ self.invList.append( [invType, invHash] )
+ return self
+
+ def serialize(self):
+ bp = BinaryPacker()
+ bp.put(VAR_INT, len(self.invList))
+ for inv in self.invList:
+ bp.put(UINT32, inv[0])
+ bp.put(BINARY_CHUNK, inv[1], width=32)
+ return bp.getBinaryString()
+
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(inv):'
+ for inv in self.invList:
+ print indstr + indent + ('BLOCK: ' if inv[0]==2 else 'TX : ') + \
+ binary_to_hex(inv[1])
+
+
+
+################################################################################
+################################################################################
+class PayloadGetData(object):
+ """
+ All payload objects have a serialize and unserialize method, making them
+ easy to attach to PyMessage objects
+ """
+
+ command = 'getdata'
+
+ def __init__(self, invList=[]):
+ if invList:
+ self.invList = invList
+ else:
+ self.invList = []
+
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ invData = toUnpack
+ else:
+ invData = BinaryUnpacker( toUnpack )
+
+ numInv = invData.get(VAR_INT)
+ for i in range(numInv):
+ invType = invData.get(UINT32)
+ invHash = invData.get(BINARY_CHUNK, 32)
+ self.invList.append( [invType, invHash] )
+ return self
+
+ def serialize(self):
+ bp = BinaryPacker()
+ bp.put(VAR_INT, len(self.invList))
+ for inv in self.invList:
+ bp.put(UINT32, inv[0])
+ bp.put(BINARY_CHUNK, inv[1], width=32)
+ return bp.getBinaryString()
+
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(getdata):'
+ for inv in self.invList:
+ print indstr + indent + ('BLOCK: ' if inv[0]==2 else 'TX : ') + \
+ binary_to_hex(inv[1])
+
+
+################################################################################
+################################################################################
+class PayloadGetHeaders(object):
+ command = 'getheaders'
+
+ def __init__(self, hashStartList=[], hashStop=''):
+ self.version = 1
+ self.hashList = hashStartList
+ self.hashStop = hashStop
+
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ ghData = toUnpack
+ else:
+ ghData = BinaryUnpacker( toUnpack )
+
+ self.version = ghData.get(UINT32)
+ nhash = ghData.get(VAR_INT)
+ for i in range(nhash):
+ self.hashList.append(ghData.get(BINARY_CHUNK, 32))
+ self.hashStop = ghData.get(BINARY_CHUNK, 32)
+ return self
+
+ def serialize(self):
+ nhash = len(self.hashList)
+ bp = BinaryPacker()
+ bp.put(UINT32, self.version)
+ bp.put(VAR_INT, nhash)
+ for i in range(nhash):
+ bp.put(BINARY_CHUNK, self.hashList[i], width=32)
+ bp.put(BINARY_CHUNK, self.hashStop, width=32)
+ return bp.getBinaryString()
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(getheaders):'
+ print indstr + indent + 'HashList(s) :' + binary_to_hex(self.hashList[0])
+ for i in range(1,len(self.hashList)):
+ print indstr + indent + ' :' + binary_to_hex(self.hashList[i])
+ print indstr + indent + 'HashStop :' + binary_to_hex(self.hashStop)
+
+
+
+################################################################################
+################################################################################
+class PayloadGetBlocks(object):
+ command = 'getblocks'
+
+ def __init__(self, version=1, startCt=-1, hashStartList=[], hashStop=''):
+ self.version = 1
+ self.hashList = hashStartList
+ self.hashStop = hashStop
+
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ gbData = toUnpack
+ else:
+ gbData = BinaryUnpacker( toUnpack )
+
+ self.version = gbData.get(UINT32)
+ nhash = gbData.get(VAR_INT)
+ for i in range(nhash):
+ self.hashList.append(gbData.get(BINARY_CHUNK, 32))
+ self.hashStop = gbData.get(BINARY_CHUNK, 32)
+ return self
+
+ def serialize(self):
+ nhash = len(self.hashList)
+ bp = BinaryPacker()
+ bp.put(UINT32, self.version)
+ bp.put(VAR_INT, nhash)
+ for i in range(nhash):
+ bp.put(BINARY_CHUNK, self.hashList[i], width=32)
+ bp.put(BINARY_CHUNK, self.hashList, width=32)
+ return bp.getBinaryString()
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(getheaders):'
+ print indstr + indent + 'Version :' + str(self.version)
+ print indstr + indent + 'HashList(s) :' + binary_to_hex(self.hashList[0])
+ for i in range(1,len(self.hashList)):
+ print indstr + indent + ' :' + binary_to_hex(self.hashList[i])
+ print indstr + indent + 'HashStop :' + binary_to_hex(self.hashStop)
+
+
+################################################################################
+################################################################################
+class PayloadTx(object):
+ command = 'tx'
+
+ def __init__(self, tx=PyTx()):
+ self.tx = tx
+
+ def unserialize(self, toUnpack):
+ self.tx.unserialize(toUnpack)
+ return self
+
+ def serialize(self):
+ return self.tx.serialize()
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(tx):'
+ self.tx.pprint(nIndent+1)
+
+
+################################################################################
+################################################################################
+class PayloadHeaders(object):
+ command = 'headers'
+
+ def __init__(self, header=PyBlockHeader(), headerlist=[]):
+ self.header = header
+ self.headerList = headerlist
+
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ headerData = toUnpack
+ else:
+ headerData = BinaryUnpacker( toUnpack )
+
+ self.headerList = []
+ self.header.unserialize(headerData)
+ numHeader = headerData.get(VAR_INT)
+ for i in range(numHeader):
+ self.headerList.append(PyBlockHeader().unserialize(headerData))
+ headerData.get(VAR_INT) # Not sure if this is even used, ever
+ return self
+
+ def serialize(self):
+ bp = BinaryPacker()
+ bp.put(BINARY_CHUNK, self.header.serialize())
+ bp.put(VAR_INT, len(self.headerList))
+ for header in self.headerList:
+ bp.put(BINARY_CHUNK, header.serialize())
+ bp.put(VAR_INT, 0)
+ return bp.getBinaryString()
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(headers):'
+ self.header.pprint(nIndent+1)
+ for header in self.headerList:
+ print indstr + indent + 'Header:', header.getHash()
+
+
+################################################################################
+################################################################################
+class PayloadBlock(object):
+ command = 'block'
+
+ def __init__(self, header=PyBlockHeader(), txlist=[]):
+ self.header = header
+ self.txList = txlist
+
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ blkData = toUnpack
+ else:
+ blkData = BinaryUnpacker( toUnpack )
+
+ self.txList = []
+ self.header.unserialize(blkData)
+ numTx = blkData.get(VAR_INT)
+ for i in range(numTx):
+ self.txList.append(PyTx().unserialize(blkData))
+ return self
+
+ def serialize(self):
+ bp = BinaryPacker()
+ bp.put(BINARY_CHUNK, self.header.serialize())
+ bp.put(VAR_INT, len(self.txList))
+ for tx in self.txList:
+ bp.put(BINARY_CHUNK, tx.serialize())
+ return bp.getBinaryString()
+
+ def pprint(self, nIndent=0):
+ indstr = indent*nIndent
+ print ''
+ print indstr + 'Message(block):'
+ self.header.pprint(nIndent+1)
+ for tx in self.txList:
+ print indstr + indent + 'Tx:', tx.getHashHex()
+
+
+################################################################################
+class PayloadAlert(object):
+ command = 'alert'
+
+ def __init__(self):
+ self.version = 1
+ self.relayUntil = 0
+ self.expiration = 0
+ self.uniqueID = 0
+ self.cancelVal = 0
+ self.cancelSet = []
+ self.minVersion = 0
+ self.maxVersion = 0
+ self.subVerSet = []
+ self.comment = ''
+ self.statusBar = ''
+ self.reserved = ''
+ self.signature = ''
+
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ blkData = toUnpack
+ else:
+ blkData = BinaryUnpacker( toUnpack )
+
+ return self
+
+ def serialize(self):
+ bp = BinaryPacker()
+ return bp.getBinaryString()
+
+
+ def pprint(self, nIndent=0):
+ print nIndent*'\t' + 'ALERT(...)'
+
+################################################################################
+# Use this map to figure out which object to serialize/unserialize from a cmd
+PayloadMap = {
+ 'ping': PayloadPing,
+ 'tx': PayloadTx,
+ 'inv': PayloadInv,
+ 'version': PayloadVersion,
+ 'verack': PayloadVerack,
+ 'addr': PayloadAddr,
+ 'getdata': PayloadGetData,
+ 'getheaders': PayloadGetHeaders,
+ 'getblocks': PayloadGetBlocks,
+ 'block': PayloadBlock,
+ 'headers': PayloadHeaders,
+ 'alert': PayloadAlert }
+
+
+class FakeClientFactory(ReconnectingClientFactory):
+ """
+ A fake class that has the same methods as an ArmoryClientFactory,
+ but doesn't do anything. If there is no internet, then we want
+ to be able to use the same calls
+ """
+ #############################################################################
+ def __init__(self, \
+ def_handshake=None, \
+ func_loseConnect=(lambda: None), \
+ func_madeConnect=(lambda: None), \
+ func_newTx=(lambda x: None), \
+ func_newBlock=(lambda x,y: None), \
+ func_inv=(lambda x: None)): pass
+ def addTxToMemoryPool(self, pytx): pass
+ def handshakeFinished(self, protoObj): pass
+ def clientConnectionLost(self, connector, reason): pass
+ def connectionFailed(self, protoObj, reason): pass
+ def sendTx(self, pytxObj): pass
+
+################################################################################
+# It seems we need to do this frequently when downloading headers & blocks
+# This only returns a list of numbers, but one list-comprehension to get hashes
+def createBlockLocatorNumList(topblk):
+ blockNumList = []
+ n,step,niter = topblk,1,0
+ while n>0:
+ blockNumList.append(n)
+ if niter >= 10:
+ step *= 2
+ n -= step
+ niter += 1
+ blockNumList.append(0)
+ return blockNumList
+
+
+################################################################################
+def forceDeferred(callbk):
+ if callbk:
+ if isinstance(callbk, Deferred):
+ return callbk
+ else:
+ d = Deferred()
+ d.addCallback(callbk)
+ return d
+
+# kate: indent-width 3; replace-tabs on;
diff --git a/armoryengine/PyBtcAddress.py b/armoryengine/PyBtcAddress.py
new file mode 100644
index 000000000..f8bec854f
--- /dev/null
+++ b/armoryengine/PyBtcAddress.py
@@ -0,0 +1,1339 @@
+################################################################################
+# #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
+# Distributed under the GNU Affero General Public License (AGPL v3) #
+# See LICENSE or http://www.gnu.org/licenses/agpl.html #
+# #
+################################################################################
+from CppBlockUtils import SecureBinaryData, CryptoAES, CryptoECDSA
+from armoryengine.ArmoryUtils import ADDRBYTE, hash256, binary_to_base58, \
+ KeyDataError, RightNow, LOGERROR, ChecksumError, convertKeyDataToAddress, \
+ verifyChecksum, WalletLockError, createSigScriptFromRS, binary_to_int, computeChecksum, \
+ getVersionInt, PYBTCWALLET_VERSION, bitset_to_int, LOGDEBUG, Hash160ToScrAddr, \
+ int_to_bitset, UnserializeError, hash160_to_addrStr, int_to_binary, BIGENDIAN, \
+ BadAddressError, checkAddrStrValid, binary_to_hex
+from armoryengine.BinaryPacker import BinaryPacker, UINT8, UINT16, UINT32, UINT64, \
+ INT8, INT16, INT32, INT64, VAR_INT, VAR_STR, FLOAT, BINARY_CHUNK
+from armoryengine.BinaryUnpacker import BinaryUnpacker
+from armoryengine.Timer import TimeThisFunction
+import CppBlockUtils as Cpp
+
+
+#############################################################################
+def calcWalletIDFromRoot(root, chain):
+ """ Helper method for computing a wallet ID """
+ root = PyBtcAddress().createFromPlainKeyData(SecureBinaryData(root))
+ root.chaincode = SecureBinaryData(chain)
+ first = root.extendAddressChain()
+ return binary_to_base58((ADDRBYTE + first.getAddr160()[:5])[::-1])
+
+class PyBtcAddress(object):
+ """
+ PyBtcAddress --
+
+ This class encapsulated EVERY kind of address object:
+ -- Plaintext private-key-bearing addresses
+ -- Encrypted private key addresses, with AES locking and unlocking
+ -- Watching-only public-key addresses
+ -- Address-only storage, representing someone else's key
+ -- Deterministic address generation from previous addresses
+ -- Serialization and unserialization of key data under all conditions
+ -- Checksums on all serialized fields to protect against HDD byte errors
+
+ For deterministic wallets, new addresses will be created from a chaincode
+ and the previous address. What is implemented here is a special kind of
+ deterministic calculation that actually allows the user to securely
+ generate new addresses even if they don't have the private key. This
+ method uses Diffie-Hellman shared-secret calculations to produce the new
+ keys, and has the same level of security as all other ECDSA operations.
+ There's a lot of fantastic benefits to doing this:
+
+ (1) If all addresses in wallet are chained, then you only need to backup
+ your wallet ONCE -- when you first create it. Print it out, put it
+ in a safety-deposit box, or tattoo the generator key to the inside
+ of your eyelid: it will never change.
+
+ (2) You can keep your private keys on an offline machine, and keep a
+ watching-only wallet online. You will be able to generate new
+ keys/addresses, and verify incoming transactions, without ever
+ requiring your private key to touch the internet.
+
+ (3) If your friend has the chaincode and your first public key, they
+ too can generate new addresses for you -- allowing them to send
+ you money multiple times, with different addresses, without ever
+ needing to specifically request the addresses.
+ (the downside to this is if the chaincode is compromised, all
+ chained addresses become de-anonymized -- but is only a loss of
+ privacy, not security)
+
+ However, we do require some fairly complicated logic, due to the fact
+ that a user with a full, private-key-bearing wallet, may try to generate
+ a new key/address without supplying a passphrase. If this happens, the
+ wallet logic gets very complicated -- we don't want to reject the request
+ to generate a new address, but we can't compute the private key until the
+ next time the user unlocks their wallet. Thus, we have to save off the
+ data they will need to create the key, to be applied on next unlock.
+ """
+
+ #############################################################################
+ def __init__(self):
+ """
+ We use SecureBinaryData objects to store pub, priv and IV objects,
+ because that is what is required by the C++ code. See EncryptionUtils.h
+ to see that available methods.
+ """
+ self.addrStr20 = ''
+ self.binPublicKey65 = SecureBinaryData() # 0x04 X(BE) Y(BE)
+ self.binPrivKey32_Encr = SecureBinaryData() # BIG-ENDIAN
+ self.binPrivKey32_Plain = SecureBinaryData()
+ self.binInitVect16 = SecureBinaryData()
+ self.isLocked = False
+ self.useEncryption = False
+ self.isInitialized = False
+ self.keyChanged = False # ...since last key encryption
+ self.walletByteLoc = -1
+ self.chaincode = SecureBinaryData()
+ self.chainIndex = 0
+
+ # Information to be used by C++ to know where to search for transactions
+ # in the blockchain (disabled in favor of a better search method)
+ self.timeRange = [2**32-1, 0]
+ self.blkRange = [2**32-1, 0]
+
+ # This feels like a hack, but it's the only way I can think to handle
+ # the case of generating new, chained addresses, even without the
+ # private key currently in memory. i.e. - If we can't unlock the priv
+ # key when creating a new chained priv key, we will simply extend the
+ # public key, and store the last-known chain info, so that it can be
+ # generated the next time the address is unlocked
+ self.createPrivKeyNextUnlock = False
+ self.createPrivKeyNextUnlock_IVandKey = [None, None] # (IV,Key)
+ self.createPrivKeyNextUnlock_ChainDepth = -1
+
+ #############################################################################
+ def isInitialized(self):
+ """ Keep track of whether this address has been initialized """
+ return self.isInitialized
+
+ #############################################################################
+ def hasPrivKey(self):
+ """
+ We have a private key if either the plaintext, or ciphertext private-key
+ fields are non-empty. We also consider ourselves to "have" the private
+ key if this address was chained from a key that has the private key, even
+ if we haven't computed it yet (due to not having unlocked the private key
+ before creating the new address).
+ """
+ return (self.binPrivKey32_Encr.getSize() != 0 or \
+ self.binPrivKey32_Plain.getSize() != 0 or \
+ self.createPrivKeyNextUnlock)
+
+ #############################################################################
+ def hasPubKey(self):
+ return (self.binPublicKey65.getSize() != 0)
+
+ #############################################################################
+ def getAddrStr(self, netbyte=ADDRBYTE):
+ chksum = hash256(netbyte + self.addrStr20)[:4]
+ return binary_to_base58(netbyte + self.addrStr20 + chksum)
+
+ #############################################################################
+ def getAddr160(self):
+ if len(self.addrStr20)!=20:
+ raise KeyDataError, 'PyBtcAddress does not have an address string!'
+ return self.addrStr20
+
+
+ #############################################################################
+ def isCompressed(self):
+ # Armory wallets (v1.35) do not support compressed keys
+ return False
+
+
+ #############################################################################
+ def touch(self, unixTime=None, blkNum=None):
+ """
+ Just like "touching" a file, this makes sure that the firstSeen and
+ lastSeen fields for this address are updated to include "now"
+
+ If we include only a block number, we will fill in the timestamp with
+ the unix-time for that block (if the BlockDataManager is availabled)
+ """
+ if self.blkRange[0]==0:
+ self.blkRange[0]=2**32-1
+ if self.timeRange[0]==0:
+ self.timeRange[0]=2**32-1
+
+ if blkNum==None:
+ if TheBDM.getBDMState()=='BlockchainReady':
+ topBlk = TheBDM.getTopBlockHeight()
+ self.blkRange[0] = long(min(self.blkRange[0], topBlk))
+ self.blkRange[1] = long(max(self.blkRange[1], topBlk))
+ else:
+ self.blkRange[0] = long(min(self.blkRange[0], blkNum))
+ self.blkRange[1] = long(max(self.blkRange[1], blkNum))
+
+ if unixTime==None and TheBDM.getBDMState()=='BlockchainReady':
+ unixTime = TheBDM.getHeaderByHeight(blkNum).getTimestamp()
+
+ if unixTime==None:
+ unixTime = RightNow()
+
+ self.timeRange[0] = long(min(self.timeRange[0], unixTime))
+ self.timeRange[1] = long(max(self.timeRange[1], unixTime))
+
+
+
+ #############################################################################
+ def copy(self):
+ newAddr = PyBtcAddress().unserialize(self.serialize())
+ newAddr.binPrivKey32_Plain = self.binPrivKey32_Plain.copy()
+ newAddr.binPrivKey32_Encr = self.binPrivKey32_Encr.copy()
+ newAddr.binPublicKey65 = self.binPublicKey65.copy()
+ newAddr.binInitVect16 = self.binInitVect16.copy()
+ newAddr.isLocked = self.isLocked
+ newAddr.useEncryption = self.useEncryption
+ newAddr.isInitialized = self.isInitialized
+ newAddr.keyChanged = self.keyChanged
+ newAddr.walletByteLoc = self.walletByteLoc
+ newAddr.chaincode = self.chaincode
+ newAddr.chainIndex = self.chainIndex
+ return newAddr
+
+
+
+ #############################################################################
+ def getTimeRange(self):
+ return self.timeRange
+
+ #############################################################################
+ def getBlockRange(self):
+ return self.blkRange
+
+ #############################################################################
+ def serializePublicKey(self):
+ """Converts the SecureBinaryData public key to a 65-byte python string"""
+ return self.binPublicKey65.toBinStr()
+
+ #############################################################################
+ def serializeEncryptedPrivateKey(self):
+ """Converts SecureBinaryData encrypted private key to python string"""
+ return self.binPrivKey32_Encr.toBinStr()
+
+ #############################################################################
+ # NOTE: This method should rarely be used, unless we are only printing it
+ # to the screen. Actually, it will be used for unencrypted wallets
+ def serializePlainPrivateKey(self):
+ return self.binPrivKey32_Plain.toBinStr()
+
+ def serializeInitVector(self):
+ return self.binInitVect16.toBinStr()
+
+
+ #############################################################################
+ def verifyEncryptionKey(self, secureKdfOutput):
+ """
+ Determine if this data is the decryption key for this encrypted address
+ """
+ if not self.useEncryption or not self.hasPrivKey():
+ return False
+
+ if self.useEncryption and not secureKdfOutput:
+ LOGERROR('No encryption key supplied to verifyEncryption!')
+ return False
+
+
+ decryptedKey = CryptoAES().DecryptCFB( self.binPrivKey32_Encr, \
+ SecureBinaryData(secureKdfOutput), \
+ self.binInitVect16)
+ verified = False
+
+ if not self.isLocked:
+ if decryptedKey==self.binPrivKey32_Plain:
+ verified = True
+ else:
+ computedPubKey = CryptoECDSA().ComputePublicKey(decryptedKey)
+ if self.hasPubKey():
+ verified = (self.binPublicKey65==computedPubKey)
+ else:
+ verified = (computedPubKey.getHash160()==self.addrStr20)
+ if verified:
+ self.binPublicKey65 = computedPubKey
+
+ decryptedKey.destroy()
+ return verified
+
+
+
+ #############################################################################
+ def setInitializationVector(self, IV16=None, random=False, force=False):
+ """
+ Either set the IV through input arg, or explicitly call random=True
+ Returns the IV -- which is especially important if it is randomly gen
+
+ This method is mainly for PREVENTING you from changing an existing IV
+ without meaning to. Losing the IV for encrypted data is almost as bad
+ as losing the encryption key. Caller must use force=True in order to
+ override this warning -- otherwise this method will abort.
+ """
+ if self.binInitVect16.getSize()==16:
+ if self.isLocked:
+ LOGERROR('Address already locked with different IV.')
+ LOGERROR('Changing IV may cause loss of keydata.')
+ else:
+ LOGERROR('Address already contains an initialization')
+ LOGERROR('vector. If you change IV without updating')
+ LOGERROR('the encrypted storage, you may permanently')
+ LOGERROR('lose the encrypted data')
+
+ if not force:
+ LOGERROR('If you really want to do this, re-execute this call with force=True')
+ return ''
+
+ if IV16:
+ self.binInitVect16 = SecureBinaryData(IV16)
+ elif random==True:
+ self.binInitVect16 = SecureBinaryData().GenerateRandom(16)
+ else:
+ raise KeyDataError, 'setInitVector: set IV data, or random=True'
+ return self.binInitVect16
+
+ #############################################################################
+ def enableKeyEncryption(self, IV16=None, generateIVIfNecessary=False):
+ """
+ setIV method will raise error is we don't specify any args, but it is
+ acceptable HERE to not specify any args just to enable encryption
+ """
+ self.useEncryption = True
+ if IV16:
+ self.setInitializationVector(IV16)
+ elif generateIVIfNecessary and self.binInitVect16.getSize()<16:
+ self.setInitializationVector(random=True)
+
+
+ #############################################################################
+ def isKeyEncryptionEnabled(self):
+ return self.useEncryption
+
+ #############################################################################
+ def createFromEncryptedKeyData(self, addr20, encrPrivKey32, IV16, \
+ chkSum=None, pubKey=None):
+ # We expect both private key and IV to the right size
+ assert(encrPrivKey32.getSize()==32)
+ assert(IV16.getSize()==16)
+ self.__init__()
+ self.addrStr20 = addr20
+ self.binPrivKey32_Encr = SecureBinaryData(encrPrivKey32)
+ self.setInitializationVector(IV16)
+ self.isLocked = True
+ self.useEncryption = True
+ self.isInitialized = True
+ if chkSum and not self.binPrivKey32_Encr.getHash256().startswith(chkSum):
+ raise ChecksumError, "Checksum doesn't match encrypted priv key data!"
+ if pubKey:
+ self.binPublicKey65 = SecureBinaryData(pubKey)
+ if not self.binPublicKey65.getHash160()==self.addrStr20:
+ raise KeyDataError, "Public key does not match supplied address"
+
+ return self
+
+
+ #############################################################################
+ def createFromPlainKeyData(self, plainPrivKey, addr160=None, willBeEncr=False, \
+ generateIVIfNecessary=False, IV16=None, \
+ chksum=None, publicKey65=None, \
+ skipCheck=False, skipPubCompute=False):
+
+ assert(plainPrivKey.getSize()==32)
+
+ if not addr160:
+ addr160 = convertKeyDataToAddress(privKey=plainPrivKey)
+
+ self.__init__()
+ self.addrStr20 = addr160
+ self.isInitialized = True
+ self.binPrivKey32_Plain = SecureBinaryData(plainPrivKey)
+ self.isLocked = False
+
+ if willBeEncr:
+ self.enableKeyEncryption(IV16, generateIVIfNecessary)
+ elif IV16:
+ self.binInitVect16 = IV16
+
+ if chksum and not verifyChecksum(self.binPrivKey32_Plain.toBinStr(), chksum):
+ raise ChecksumError, "Checksum doesn't match plaintext priv key!"
+ if publicKey65:
+ self.binPublicKey65 = SecureBinaryData(publicKey65)
+ if not self.binPublicKey65.getHash160()==self.addrStr20:
+ raise KeyDataError, "Public key does not match supplied address"
+ if not skipCheck:
+ if not CryptoECDSA().CheckPubPrivKeyMatch(self.binPrivKey32_Plain,\
+ self.binPublicKey65):
+ raise KeyDataError, 'Supplied pub and priv key do not match!'
+ elif not skipPubCompute:
+ # No public key supplied, but we do want to calculate it
+ self.binPublicKey65 = CryptoECDSA().ComputePublicKey(plainPrivKey)
+
+ return self
+
+
+ #############################################################################
+ def createFromPublicKeyData(self, publicKey65, chksum=None):
+
+ assert(publicKey65.getSize()==65)
+ self.__init__()
+ self.addrStr20 = publicKey65.getHash160()
+ self.binPublicKey65 = publicKey65
+ self.isInitialized = True
+ self.isLocked = False
+ self.useEncryption = False
+
+ if chksum and not verifyChecksum(self.binPublicKey65.toBinStr(), chksum):
+ raise ChecksumError, "Checksum doesn't match supplied public key!"
+
+ return self
+
+
+
+ #############################################################################
+ def safeExtendPrivateKey(self, privKey, chn, pubKey=None):
+ # We do this computation twice, in case one is somehow corrupted
+ # (Must be ultra paranoid with computing keys)
+ logMult1 = SecureBinaryData()
+ logMult2 = SecureBinaryData()
+ a160hex = ''
+
+ # Can provide a pre-computed public key to skip that part of the compute
+ if pubKey is None:
+ pubKey = SecureBinaryData(0)
+ else:
+ a160hex = binary_to_hex(pubKey.getHash160())
+
+ newPriv1 = CryptoECDSA().ComputeChainedPrivateKey(privKey, chn, pubKey, logMult1)
+ newPriv2 = CryptoECDSA().ComputeChainedPrivateKey(privKey, chn, pubKey, logMult2)
+
+ if newPriv1==newPriv2:
+ newPriv2.destroy()
+ with open(MULT_LOG_FILE,'a') as f:
+ f.write('PrvChain (pkh, mult): %s,%s\n' % (a160hex,logMult1.toHexStr()))
+ return newPriv1
+
+ else:
+ LOGCRIT('Chaining failed! Computed keys are different!')
+ LOGCRIT('Recomputing chained key 3 times; bail if they do not match')
+ newPriv1.destroy()
+ newPriv2.destroy()
+ logMult3 = SecureBinaryData()
+ newPriv1 = CryptoECDSA().ComputeChainedPrivateKey(privKey, chn, pubKey, logMult1)
+ newPriv2 = CryptoECDSA().ComputeChainedPrivateKey(privKey, chn, pubKey, logMult2)
+ newPriv3 = CryptoECDSA().ComputeChainedPrivateKey(privKey, chn, pubKey, logMult3)
+ LOGCRIT(' Multiplier1: ' + logMult1.toHexStr())
+ LOGCRIT(' Multiplier2: ' + logMult2.toHexStr())
+ LOGCRIT(' Multiplier3: ' + logMult3.toHexStr())
+
+ if newPriv1==newPriv2 and newPriv1==newPriv3:
+ newPriv2.destroy()
+ newPriv3.destroy()
+ with open(MULT_LOG_FILE,'a') as f:
+ f.write('PrvChain (pkh, mult): %s,%s\n' % (a160hex,logMult1.toHexStr()))
+ return newPriv1
+ else:
+ LOGCRIT('Chaining failed again! Returning empty private key.')
+ newPriv1.destroy()
+ newPriv2.destroy()
+ newPriv3.destroy()
+ # This should crash just about any process that would try to use it
+ # without checking for empty private key.
+ return SecureBinaryData(0)
+
+
+ #############################################################################
+ def safeExtendPublicKey(self, pubKey, chn):
+ # We do this computation twice, in case one is somehow corrupted
+ # (Must be ultra paranoid with computing keys)
+ a160hex = binary_to_hex(pubKey.getHash160())
+ logMult1 = SecureBinaryData()
+ logMult2 = SecureBinaryData()
+ newPub1 = CryptoECDSA().ComputeChainedPublicKey(pubKey, chn, logMult1)
+ newPub2 = CryptoECDSA().ComputeChainedPublicKey(pubKey, chn, logMult2)
+
+ if newPub1==newPub2:
+ newPub2.destroy()
+ with open(MULT_LOG_FILE,'a') as f:
+ f.write('PubChain (pkh, mult): %s,%s\n' % (a160hex, logMult1.toHexStr()))
+ return newPub1
+ else:
+ LOGCRIT('Chaining failed! Computed keys are different!')
+ LOGCRIT('Recomputing chained key 3 times; bail if they do not match')
+ newPub1.destroy()
+ newPub2.destroy()
+ logMult3 = SecureBinaryData()
+ newPub1 = CryptoECDSA().ComputeChainedPublicKey(pubKey, chn, logMult1)
+ newPub2 = CryptoECDSA().ComputeChainedPublicKey(pubKey, chn, logMult2)
+ newPub3 = CryptoECDSA().ComputeChainedPublicKey(pubKey, chn, logMult3)
+ LOGCRIT(' Multiplier1: ' + logMult1.toHexStr())
+ LOGCRIT(' Multiplier2: ' + logMult2.toHexStr())
+ LOGCRIT(' Multiplier3: ' + logMult3.toHexStr())
+
+ if newPub1==newPub2 and newPub1==newPub3:
+ newPub2.destroy()
+ newPub3.destroy()
+ with open(MULT_LOG_FILE,'a') as f:
+ f.write('PubChain (pkh, mult): %s,%s\n' % (a160hex, logMult1.toHexStr()))
+ return newPub1
+ else:
+ LOGCRIT('Chaining failed again! Returning empty public key.')
+ newPub1.destroy()
+ newPub2.destroy()
+ newPub3.destroy()
+ # This should crash just about any process that would try to use it
+ # without checking for empty public key.
+ return SecureBinaryData(0)
+
+ #############################################################################
+ def lock(self, secureKdfOutput=None, generateIVIfNecessary=False):
+ # We don't want to destroy the private key if it's not supposed to be
+ # encrypted. Similarly, if we haven't actually saved the encrypted
+ # version, let's not lock it
+ newIV = False
+ if not self.useEncryption or not self.hasPrivKey():
+ # This isn't supposed to be encrypted, or there's no privkey to encrypt
+ return
+ else:
+ if self.binPrivKey32_Encr.getSize()==32 and not self.keyChanged:
+ # Addr should be encrypted, and we already have encrypted priv key
+ self.binPrivKey32_Plain.destroy()
+ self.isLocked = True
+ elif self.binPrivKey32_Plain.getSize()==32:
+ # Addr should be encrypted, but haven't computed encrypted value yet
+ if secureKdfOutput!=None:
+ # We have an encryption key, use it
+ if self.binInitVect16.getSize() < 16:
+ if not generateIVIfNecessary:
+ raise KeyDataError, 'No Initialization Vector available'
+ else:
+ self.binInitVect16 = SecureBinaryData().GenerateRandom(16)
+ newIV = True
+
+ # Finally execute the encryption
+ self.binPrivKey32_Encr = CryptoAES().EncryptCFB( \
+ self.binPrivKey32_Plain, \
+ SecureBinaryData(secureKdfOutput), \
+ self.binInitVect16)
+ # Destroy the unencrypted key, reset the keyChanged flag
+ self.binPrivKey32_Plain.destroy()
+ self.isLocked = True
+ self.keyChanged = False
+ else:
+ # Can't encrypt the addr because we don't have encryption key
+ raise WalletLockError, ("\n\tTrying to destroy plaintext key, but no"
+ "\n\tencrypted key data is available, and no"
+ "\n\tencryption key provided to encrypt it.")
+
+
+ # In case we changed the IV, we should let the caller know this
+ return self.binInitVect16 if newIV else SecureBinaryData()
+
+
+ #############################################################################
+ def unlock(self, secureKdfOutput, skipCheck=False):
+ """
+ This method knows nothing about a key-derivation function. It simply
+ takes in an AES key and applies it to decrypt the data. However, it's
+ best if that AES key is actually derived from "heavy" key-derivation
+ function.
+ """
+ if not self.useEncryption or not self.isLocked:
+ # Bail out if the wallet is unencrypted, or already unlocked
+ self.isLocked = False
+ return
+
+
+ if self.createPrivKeyNextUnlock:
+ # This is SPECIFICALLY for the case that we didn't have the encr key
+ # available when we tried to extend our deterministic wallet, and
+ # generated a new address anyway
+ self.binPrivKey32_Plain = CryptoAES().DecryptCFB( \
+ self.createPrivKeyNextUnlock_IVandKey[1], \
+ SecureBinaryData(secureKdfOutput), \
+ self.createPrivKeyNextUnlock_IVandKey[0])
+
+ for i in range(self.createPrivKeyNextUnlock_ChainDepth):
+ #self.binPrivKey32_Plain = CryptoECDSA().ComputeChainedPrivateKey( \
+ #self.binPrivKey32_Plain, \
+ #self.chaincode)
+
+ self.binPrivKey32_Plain = self.safeExtendPrivateKey( \
+ self.binPrivKey32_Plain, \
+ self.chaincode)
+
+
+ # IV should have already been randomly generated, before
+ self.isLocked = False
+ self.createPrivKeyNextUnlock = False
+ self.createPrivKeyNextUnlock_IVandKey = []
+ self.createPrivKeyNextUnlock_ChainDepth = 0
+
+ # Lock/Unlock to make sure encrypted private key is filled
+ self.lock(secureKdfOutput,generateIVIfNecessary=True)
+ self.unlock(secureKdfOutput)
+
+ else:
+
+ if not self.binPrivKey32_Encr.getSize()==32:
+ raise WalletLockError, 'No encrypted private key to decrypt!'
+
+ if not self.binInitVect16.getSize()==16:
+ raise WalletLockError, 'Initialization Vect (IV) is missing!'
+
+ self.binPrivKey32_Plain = CryptoAES().DecryptCFB( \
+ self.binPrivKey32_Encr, \
+ secureKdfOutput, \
+ self.binInitVect16)
+
+ self.isLocked = False
+
+ if not skipCheck:
+ if not self.hasPubKey():
+ self.binPublicKey65 = CryptoECDSA().ComputePublicKey(\
+ self.binPrivKey32_Plain)
+ else:
+ # We should usually check that keys match, but may choose to skip
+ # if we have a lot of keys to load
+ # NOTE: I run into this error if I fill the keypool without first
+ # unlocking the wallet. I'm not sure why it doesn't work
+ # when locked (it should), but this wallet format has been
+ # working flawless for almost a year... and will be replaced
+ # soon, so I won't sweat it.
+ if not CryptoECDSA().CheckPubPrivKeyMatch(self.binPrivKey32_Plain, \
+ self.binPublicKey65):
+ raise KeyDataError, "Stored public key does not match priv key!"
+
+
+
+ #############################################################################
+ def changeEncryptionKey(self, secureOldKey, secureNewKey):
+ """
+ We will use None to specify "no encryption", either for old or new. Of
+ course we throw an error is old key is "None" but the address is actually
+ encrypted.
+ """
+ if not self.hasPrivKey():
+ raise KeyDataError, 'No private key available to re-encrypt'
+
+ if not secureOldKey and self.useEncryption and self.isLocked:
+ raise WalletLockError, 'Need old encryption key to unlock private keys'
+
+ wasLocked = self.isLocked
+
+ # Decrypt the original key
+ if self.isLocked:
+ self.unlock(secureOldKey, skipCheck=False)
+
+ # Keep the old IV if we are changing the key. IV reuse is perfectly
+ # fine for a new key, and might save us from disaster if we otherwise
+ # generated a new one and then forgot to take note of it.
+ self.keyChanged = True
+ if not secureNewKey:
+ # If we chose not to re-encrypt, make sure we clear the encryption
+ self.binInitVect16 = SecureBinaryData()
+ self.binPrivKey32_Encr = SecureBinaryData()
+ self.isLocked = False
+ self.useEncryption = False
+ else:
+ # Re-encrypt with new key (using same IV)
+ self.useEncryption = True
+ self.lock(secureNewKey) # do this to make sure privKey_Encr filled
+ if wasLocked:
+ self.isLocked = True
+ else:
+ self.unlock(secureNewKey)
+ self.isLocked = False
+
+
+
+
+ #############################################################################
+ # This is more of a static method
+ def checkPubPrivKeyMatch(self, securePriv, securePub):
+ CryptoECDSA().CheckPubPrivKeyMatch(securePriv, securePub)
+
+
+
+ #############################################################################
+ @TimeThisFunction
+ def generateDERSignature(self, binMsg, secureKdfOutput=None):
+ """
+ This generates a DER signature for this address using the private key.
+ Obviously, if we don't have the private key, we throw an error. Or if
+ the wallet is locked and no encryption key was provided.
+
+ If an encryption key IS provided, then we unlock the address just long
+ enough to sign the message and then re-lock it
+ """
+
+ if not self.hasPrivKey():
+ raise KeyDataError, 'Cannot sign for address without private key!'
+
+ if self.isLocked:
+ if secureKdfOutput==None:
+ raise WalletLockError, "Cannot sign Tx when private key is locked!"
+ else:
+ # Wallet is locked but we have a decryption key
+ self.unlock(secureKdfOutput, skipCheck=False)
+
+ try:
+ secureMsg = SecureBinaryData(binMsg)
+ sig = CryptoECDSA().SignData(secureMsg, self.binPrivKey32_Plain)
+ sigstr = sig.toBinStr()
+
+ rBin = sigstr[:32 ]
+ sBin = sigstr[ 32:]
+ return createSigScriptFromRS(rBin, sBin)
+
+ except:
+ LOGERROR('Failed signature generation')
+ finally:
+ # Always re-lock/cleanup after unlocking, even after an exception.
+ # If locking triggers an error too, we will just skip it.
+ try:
+ if secureKdfOutput!=None:
+ self.lock(secureKdfOutput)
+ except:
+ LOGERROR('Error re-locking address')
+ pass
+
+
+
+
+ #############################################################################
+ @TimeThisFunction
+ def verifyDERSignature(self, binMsgVerify, derSig):
+ if not self.hasPubKey():
+ raise KeyDataError, 'No public key available for this address!'
+
+ if not isinstance(derSig, str):
+ # In case this is a SecureBinaryData object...
+ derSig = derSig.toBinStr()
+
+ codeByte = derSig[0]
+ nBytes = binary_to_int(derSig[1])
+ rsStr = derSig[2:2+nBytes]
+ assert(codeByte == '\x30')
+ assert(nBytes == len(rsStr))
+ # Read r
+ codeByte = rsStr[0]
+ rBytes = binary_to_int(rsStr[1])
+ r = rsStr[2:2+rBytes]
+ assert(codeByte == '\x02')
+ sStr = rsStr[2+rBytes:]
+ # Read s
+ codeByte = sStr[0]
+ sBytes = binary_to_int(sStr[1])
+ s = sStr[2:2+sBytes]
+ assert(codeByte == '\x02')
+ # Now we have the (r,s) values of the
+
+ secMsg = SecureBinaryData(binMsgVerify)
+ secSig = SecureBinaryData(r[-32:] + s[-32:])
+ secPubKey = SecureBinaryData(self.binPublicKey65)
+ return CryptoECDSA().VerifyData(secMsg, secSig, secPubKey)
+
+ #############################################################################
+ def markAsRootAddr(self, chaincode):
+ if not chaincode.getSize()==32:
+ raise KeyDataError, 'Chaincode must be 32 bytes'
+ else:
+ self.chainIndex = -1
+ self.chaincode = chaincode
+
+
+ #############################################################################
+ def isAddrChainRoot(self):
+ return (self.chainIndex==-1)
+
+ #############################################################################
+ @TimeThisFunction
+ def extendAddressChain(self, secureKdfOutput=None, newIV=None):
+ """
+ We require some fairly complicated logic here, due to the fact that a
+ user with a full, private-key-bearing wallet, may try to generate a new
+ key/address without supplying a passphrase. If this happens, the wallet
+ logic gets mucked up -- we don't want to reject the request to
+ generate a new address, but we can't compute the private key until the
+ next time the user unlocks their wallet. Thus, we have to save off the
+ data they will need to create the key, to be applied on next unlock.
+ """
+ if not self.chaincode.getSize() == 32:
+ raise KeyDataError, 'No chaincode has been defined to extend chain'
+
+ newAddr = PyBtcAddress()
+ privKeyAvailButNotDecryptable = (self.hasPrivKey() and \
+ self.isLocked and \
+ not secureKdfOutput )
+
+
+ if self.hasPrivKey() and not privKeyAvailButNotDecryptable:
+ # We are extending a chain using private key data
+ wasLocked = self.isLocked
+ if self.useEncryption and self.isLocked:
+ if not secureKdfOutput:
+ raise WalletLockError, 'Cannot create new address without passphrase'
+ self.unlock(secureKdfOutput)
+ if not newIV:
+ newIV = SecureBinaryData().GenerateRandom(16)
+
+ if self.hasPubKey():
+ #newPriv = CryptoECDSA().ComputeChainedPrivateKey( \
+ #self.binPrivKey32_Plain, \
+ #self.chaincode, \
+ #self.binPublicKey65)
+ newPriv = self.safeExtendPrivateKey( \
+ self.binPrivKey32_Plain, \
+ self.chaincode, \
+ self.binPublicKey65)
+ else:
+ #newPriv = CryptoECDSA().ComputeChainedPrivateKey( \
+ #self.binPrivKey32_Plain, \
+ #self.chaincode)
+ newPriv = self.safeExtendPrivateKey( \
+ self.binPrivKey32_Plain, \
+ self.chaincode)
+
+ newPub = CryptoECDSA().ComputePublicKey(newPriv)
+ newAddr160 = newPub.getHash160()
+ newAddr.createFromPlainKeyData(newPriv, newAddr160, \
+ IV16=newIV, publicKey65=newPub)
+
+ newAddr.addrStr20 = newPub.getHash160()
+ newAddr.useEncryption = self.useEncryption
+ newAddr.isInitialized = True
+ newAddr.chaincode = self.chaincode
+ newAddr.chainIndex = self.chainIndex+1
+
+ # We can't get here without a secureKdfOutput (I think)
+ if newAddr.useEncryption:
+ newAddr.lock(secureKdfOutput)
+ if not wasLocked:
+ newAddr.unlock(secureKdfOutput)
+ self.unlock(secureKdfOutput)
+ return newAddr
+ else:
+ # We are extending the address based solely on its public key
+ if not self.hasPubKey():
+ raise KeyDataError, 'No public key available to extend chain'
+
+ #newAddr.binPublicKey65 = CryptoECDSA().ComputeChainedPublicKey( \
+ #self.binPublicKey65, self.chaincode)
+ newAddr.binPublicKey65 = self.safeExtendPublicKey( \
+ self.binPublicKey65, self.chaincode)
+
+ newAddr.addrStr20 = newAddr.binPublicKey65.getHash160()
+ newAddr.useEncryption = self.useEncryption
+ newAddr.isInitialized = True
+ newAddr.chaincode = self.chaincode
+ newAddr.chainIndex = self.chainIndex+1
+
+
+ if privKeyAvailButNotDecryptable:
+ # *** store what is needed to recover key on next addr unlock ***
+ newAddr.isLocked = True
+ newAddr.useEncryption = True
+ if not newIV:
+ newIV = SecureBinaryData().GenerateRandom(16)
+ newAddr.binInitVect16 = newIV
+ newAddr.createPrivKeyNextUnlock = True
+ newAddr.createPrivKeyNextUnlock_IVandKey = [None,None]
+ if self.createPrivKeyNextUnlock:
+ # We are chaining from address also requiring gen on next unlock
+ newAddr.createPrivKeyNextUnlock_IVandKey[0] = \
+ self.createPrivKeyNextUnlock_IVandKey[0].copy()
+ newAddr.createPrivKeyNextUnlock_IVandKey[1] = \
+ self.createPrivKeyNextUnlock_IVandKey[1].copy()
+ newAddr.createPrivKeyNextUnlock_ChainDepth = \
+ self.createPrivKeyNextUnlock_ChainDepth+1
+ else:
+ # The address from which we are extending has already been generated
+ newAddr.createPrivKeyNextUnlock_IVandKey[0] = self.binInitVect16.copy()
+ newAddr.createPrivKeyNextUnlock_IVandKey[1] = self.binPrivKey32_Encr.copy()
+ newAddr.createPrivKeyNextUnlock_ChainDepth = 1
+ return newAddr
+
+
+ def serialize(self):
+ """
+ We define here a binary serialization scheme that will write out ALL
+ information needed to completely reconstruct address data from file.
+ This method returns a string, but presumably will be used to write addr
+ data to file. The following format is used.
+
+ Address160 (20 bytes) : The 20-byte hash of the public key
+ This must always be the first field
+ AddressChk ( 4 bytes) : Checksum to make sure no error in addr160
+ AddrVersion ( 4 bytes) : Early version don't specify encrypt params
+ Flags ( 8 bytes) : Addr-specific info, including encrypt params
+
+ ChainCode (32 bytes) : For extending deterministic wallets
+ ChainChk ( 4 bytes) : Checksum for chaincode
+ ChainIndex ( 8 bytes) : Index in chain if deterministic addresses
+ ChainDepth ( 8 bytes) : How deep addr is in chain beyond last
+ computed private key (if base address was
+ locked when we tried to extend/chain it)
+
+ InitVect (16 bytes) : Initialization vector for encryption
+ InitVectChk ( 4 bytes) : Checksum for IV
+ PrivKey (32 bytes) : Private key data (may be encrypted)
+ PrivKeyChk ( 4 bytes) : Checksum for private key data
+
+ PublicKey (65 bytes) : Public key for this address
+ PubKeyChk ( 4 bytes) : Checksum for private key data
+
+
+ FirstTime ( 8 bytes) : The first time addr was seen in blockchain
+ LastTime ( 8 bytes) : The last time addr was seen in blockchain
+ FirstBlock ( 4 bytes) : The first block addr was seen in blockchain
+ LastBlock ( 4 bytes) : The last block addr was seen in blockchain
+ """
+
+ serializeWithEncryption = self.useEncryption
+
+ if self.useEncryption and \
+ self.binPrivKey32_Encr.getSize()==0 and \
+ self.binPrivKey32_Plain.getSize()>0:
+ LOGERROR('')
+ LOGERROR('***WARNING: you have chosen to serialize a key you hope to be')
+ LOGERROR(' encrypted, but have not yet chosen a passphrase for')
+ LOGERROR(' it. The only way to serialize this address is with ')
+ LOGERROR(' the plaintext keys. Please lock this address at')
+ LOGERROR(' least once in order to enable encrypted output.')
+ serializeWithEncryption = False
+
+ # Before starting, let's construct the flags for this address
+ nFlagBytes = 8
+ flags = [False]*nFlagBytes*8
+ flags[0] = self.hasPrivKey()
+ flags[1] = self.hasPubKey()
+ flags[2] = serializeWithEncryption
+ flags[3] = self.createPrivKeyNextUnlock
+ flags = ''.join([('1' if f else '0') for f in flags])
+
+ def raw(a):
+ if isinstance(a, str):
+ return a
+ else:
+ return a.toBinStr()
+
+ def chk(a):
+ if isinstance(a, str):
+ return computeChecksum(a,4)
+ else:
+ return computeChecksum(a.toBinStr(),4)
+
+ # Use BinaryPacker "width" fields to guaranteee BINARY_CHUNK width.
+ # Sure, if we have malformed data we might cut some of it off instead
+ # of writing it to the binary stream. But at least we'll ALWAYS be
+ # able to determine where each field is, and will never corrupt the
+ # whole wallet so badly we have to go hex-diving to figure out what
+ # happened.
+ binOut = BinaryPacker()
+ binOut.put(BINARY_CHUNK, self.addrStr20, width=20)
+ binOut.put(BINARY_CHUNK, chk(self.addrStr20), width= 4)
+ binOut.put(UINT32, getVersionInt(PYBTCWALLET_VERSION))
+ binOut.put(UINT64, bitset_to_int(flags))
+
+ # Write out address-chaining parameters (for deterministic wallets)
+ binOut.put(BINARY_CHUNK, raw(self.chaincode), width=32)
+ binOut.put(BINARY_CHUNK, chk(self.chaincode), width= 4)
+ binOut.put(INT64, self.chainIndex)
+ binOut.put(INT64, self.createPrivKeyNextUnlock_ChainDepth)
+
+ # Write out whatever is appropriate for private-key data
+ # Binary-unpacker will write all 0x00 bytes if empty values are given
+ if serializeWithEncryption:
+ if self.createPrivKeyNextUnlock:
+ binOut.put(BINARY_CHUNK, raw(self.createPrivKeyNextUnlock_IVandKey[0]), width=16)
+ binOut.put(BINARY_CHUNK, chk(self.createPrivKeyNextUnlock_IVandKey[0]), width= 4)
+ binOut.put(BINARY_CHUNK, raw(self.createPrivKeyNextUnlock_IVandKey[1]), width=32)
+ binOut.put(BINARY_CHUNK, chk(self.createPrivKeyNextUnlock_IVandKey[1]), width= 4)
+ else:
+ binOut.put(BINARY_CHUNK, raw(self.binInitVect16), width=16)
+ binOut.put(BINARY_CHUNK, chk(self.binInitVect16), width= 4)
+ binOut.put(BINARY_CHUNK, raw(self.binPrivKey32_Encr), width=32)
+ binOut.put(BINARY_CHUNK, chk(self.binPrivKey32_Encr), width= 4)
+ else:
+ binOut.put(BINARY_CHUNK, raw(self.binInitVect16), width=16)
+ binOut.put(BINARY_CHUNK, chk(self.binInitVect16), width= 4)
+ binOut.put(BINARY_CHUNK, raw(self.binPrivKey32_Plain), width=32)
+ binOut.put(BINARY_CHUNK, chk(self.binPrivKey32_Plain), width= 4)
+
+ binOut.put(BINARY_CHUNK, raw(self.binPublicKey65), width=65)
+ binOut.put(BINARY_CHUNK, chk(self.binPublicKey65), width= 4)
+
+ binOut.put(UINT64, self.timeRange[0])
+ binOut.put(UINT64, self.timeRange[1])
+ binOut.put(UINT32, self.blkRange[0])
+ binOut.put(UINT32, self.blkRange[1])
+
+ return binOut.getBinaryString()
+
+ #############################################################################
+ def scanBlockchainForAddress(self, abortIfBDMBusy=False):
+ """
+ This method will return null output if the BDM is currently in the
+ middle of a scan. You can use waitAsLongAsNecessary=True if you
+ want to wait for the previous scan AND the next scan. Otherwise,
+ you can check for bal==-1 and then try again later...
+
+ This is particularly relevant if you know that an address has already
+ been scanned, and you expect this method to return immediately. Thus,
+ you don't want to wait for any scan at all...
+
+ This one-stop-shop method has to be blocking. You might want to
+ register the address and rescan asynchronously, skipping this method
+ entirely:
+
+ cppWlt = Cpp.BtcWallet()
+ cppWlt.addScrAddress_1_(Hash160ToScrAddr(self.getAddr160()))
+ TheBDM.registerScrAddr(Hash160ToScrAddr(self.getAddr160()))
+ TheBDM.rescanBlockchain(wait=False)
+
+ <... do some other stuff ...>
+
+ if TheBDM.getBDMState()=='BlockchainReady':
+ TheBDM.updateWalletsAfterScan(wait=True) # fast after a rescan
+ bal = cppWlt.getBalance('Spendable')
+ utxoList = cppWlt.getUnspentTxOutList()
+ else:
+ <...come back later...>
+
+ """
+ if TheBDM.getBDMState()=='BlockchainReady' or \
+ (TheBDM.isScanning() and not abortIfBDMBusy):
+ LOGDEBUG('Scanning blockchain for address')
+
+ # We are expecting this method to return balance
+ # and UTXO data, so we must make sure we're blocking.
+ cppWlt = Cpp.BtcWallet()
+ cppWlt.addScrAddress_1_(Hash160ToScrAddr(self.getAddr160()))
+ TheBDM.registerWallet(cppWlt, wait=True)
+ TheBDM.scanBlockchainForTx(cppWlt, wait=True)
+
+ utxoList = cppWlt.getUnspentTxOutList()
+ bal = cppWlt.getSpendableBalance(-1, IGNOREZC)
+ return (bal, utxoList)
+ else:
+ return (-1, [])
+
+ #############################################################################
+ def unserialize(self, toUnpack):
+ """
+ We reconstruct the address from a serialized version of it. See the help
+ text for "serialize()" for information on what fields need to
+ be included and the binary mapping
+
+ We verify all checksums, correct for one byte errors, and raise exceptions
+ for bigger problems that can't be fixed.
+ """
+ if isinstance(toUnpack, BinaryUnpacker):
+ serializedData = toUnpack
+ else:
+ serializedData = BinaryUnpacker( toUnpack )
+
+
+ def chkzero(a):
+ """
+ Due to fixed-width fields, we will get lots of zero-bytes
+ even when the binary data container was empty
+ """
+ if a.count('\x00')==len(a):
+ return ''
+ else:
+ return a
+
+
+ # Start with a fresh new address
+ self.__init__()
+
+ self.addrStr20 = serializedData.get(BINARY_CHUNK, 20)
+ chkAddr20 = serializedData.get(BINARY_CHUNK, 4)
+
+ addrVerInt = serializedData.get(UINT32)
+ flags = serializedData.get(UINT64)
+ self.addrStr20 = verifyChecksum(self.addrStr20, chkAddr20)
+ flags = int_to_bitset(flags, widthBytes=8)
+
+ # Interpret the flags
+ containsPrivKey = (flags[0]=='1')
+ containsPubKey = (flags[1]=='1')
+ self.useEncryption = (flags[2]=='1')
+ self.createPrivKeyNextUnlock = (flags[3]=='1')
+
+ addrChkError = False
+ if len(self.addrStr20)==0:
+ addrChkError = True
+ if not containsPrivKey and not containsPubKey:
+ raise UnserializeError, 'Checksum mismatch in addrStr'
+
+
+
+ # Write out address-chaining parameters (for deterministic wallets)
+ self.chaincode = chkzero(serializedData.get(BINARY_CHUNK, 32))
+ chkChaincode = serializedData.get(BINARY_CHUNK, 4)
+ self.chainIndex = serializedData.get(INT64)
+ depth = serializedData.get(INT64)
+ self.createPrivKeyNextUnlock_ChainDepth = depth
+
+ # Correct errors, convert to secure container
+ self.chaincode = SecureBinaryData(verifyChecksum(self.chaincode, chkChaincode))
+
+
+ # Write out whatever is appropriate for private-key data
+ # Binary-unpacker will write all 0x00 bytes if empty values are given
+ iv = chkzero(serializedData.get(BINARY_CHUNK, 16))
+ chkIv = serializedData.get(BINARY_CHUNK, 4)
+ privKey = chkzero(serializedData.get(BINARY_CHUNK, 32))
+ chkPriv = serializedData.get(BINARY_CHUNK, 4)
+ iv = SecureBinaryData(verifyChecksum(iv, chkIv))
+ privKey = SecureBinaryData(verifyChecksum(privKey, chkPriv))
+
+ # If this is SUPPOSED to contain a private key...
+ if containsPrivKey:
+ if privKey.getSize()==0:
+ raise UnserializeError, 'Checksum mismatch in PrivateKey '+\
+ '('+hash160_to_addrStr(self.addrStr20)+')'
+
+ if self.useEncryption:
+ if iv.getSize()==0:
+ raise UnserializeError, 'Checksum mismatch in IV ' +\
+ '('+hash160_to_addrStr(self.addrStr20)+')'
+ if self.createPrivKeyNextUnlock:
+ self.createPrivKeyNextUnlock_IVandKey[0] = iv.copy()
+ self.createPrivKeyNextUnlock_IVandKey[1] = privKey.copy()
+ else:
+ self.binInitVect16 = iv.copy()
+ self.binPrivKey32_Encr = privKey.copy()
+ else:
+ self.binInitVect16 = iv.copy()
+ self.binPrivKey32_Plain = privKey.copy()
+
+ pubKey = chkzero(serializedData.get(BINARY_CHUNK, 65))
+ chkPub = serializedData.get(BINARY_CHUNK, 4)
+ pubKey = SecureBinaryData(verifyChecksum(pubKey, chkPub))
+
+ if containsPubKey:
+ if not pubKey.getSize()==65:
+ if self.binPrivKey32_Plain.getSize()==32:
+ pubKey = CryptoAES().ComputePublicKey(self.binPrivKey32_Plain)
+ else:
+ raise UnserializeError, 'Checksum mismatch in PublicKey ' +\
+ '('+hash160_to_addrStr(self.addrStr20)+')'
+
+ self.binPublicKey65 = pubKey
+
+ if addrChkError:
+ self.addrStr20 = self.binPublicKey65.getHash160()
+
+ self.timeRange[0] = serializedData.get(UINT64)
+ self.timeRange[1] = serializedData.get(UINT64)
+ self.blkRange[0] = serializedData.get(UINT32)
+ self.blkRange[1] = serializedData.get(UINT32)
+
+ self.isInitialized = True
+ return self
+
+
+
+ #############################################################################
+ # The following methods are the SIMPLE address operations that can be used
+ # to juggle address data without worrying at all about encryption details.
+ # The addresses created here can later be endowed with encryption.
+ #############################################################################
+ def createFromPrivateKey(self, privKey, pubKey=None, skipCheck=False):
+ """
+ Creates address from a user-supplied random INTEGER.
+ This method DOES perform elliptic-curve operations
+ """
+ if isinstance(privKey, str) and len(privKey)==32:
+ self.binPrivKey32_Plain = SecureBinaryData(privKey)
+ elif isinstance(privKey, int) or isinstance(privKey, long):
+ binPriv = int_to_binary(privKey, widthBytes=32, endOut=BIGENDIAN)
+ self.binPrivKey32_Plain = SecureBinaryData(binPriv)
+ else:
+ raise KeyDataError, 'Unknown private key format'
+
+ if pubKey==None:
+ self.binPublicKey65 = CryptoECDSA().ComputePublicKey(self.binPrivKey32_Plain)
+ else:
+ self.binPublicKey65 = SecureBinaryData(pubKey)
+
+ if not skipCheck:
+ assert(CryptoECDSA().CheckPubPrivKeyMatch( \
+ self.binPrivKey32_Plain, \
+ self.binPublicKey65))
+
+ self.addrStr20 = self.binPublicKey65.getHash160()
+
+ self.isInitialized = True
+ return self
+
+
+
+ #############################################################################
+ def createFromPublicKey(self, pubkey):
+ """
+ Creates address from a user-supplied ECDSA public key.
+
+ The key can be supplied as an (x,y) pair of integers, an EC_Point
+ as defined in the lisecdsa class, or as a 65-byte binary string
+ (the 64 public key bytes with a 0x04 prefix byte)
+
+ This method will fail if the supplied pair of points is not
+ on the secp256k1 curve.
+ """
+ if isinstance(pubkey, tuple) and len(pubkey)==2:
+ # We are given public-key (x,y) pair
+ binXBE = int_to_binary(pubkey[0], widthBytes=32, endOut=BIGENDIAN)
+ binYBE = int_to_binary(pubkey[1], widthBytes=32, endOut=BIGENDIAN)
+ self.binPublicKey65 = SecureBinaryData('\x04' + binXBE + binYBE)
+ if not CryptoECDSA().VerifyPublicKeyValid(self.binPublicKey65):
+ raise KeyDataError, 'Supplied public key is not on secp256k1 curve'
+ elif isinstance(pubkey, str) and len(pubkey)==65:
+ self.binPublicKey65 = SecureBinaryData(pubkey)
+ if not CryptoECDSA().VerifyPublicKeyValid(self.binPublicKey65):
+ raise KeyDataError, 'Supplied public key is not on secp256k1 curve'
+ else:
+ raise KeyDataError, 'Unknown public key format!'
+
+ # TODO: I should do a test to see which is faster:
+ # 1) Compute the hash directly like this
+ # 2) Get the string, hash it in python
+ self.addrStr20 = self.binPublicKey65.getHash160()
+ self.isInitialized = True
+ return self
+
+
+ def createFromPublicKeyHash160(self, pubkeyHash160, netbyte=ADDRBYTE):
+ """
+ Creates an address from just the 20-byte binary hash of a public key.
+
+ In binary form without a chksum, there is no protection against byte
+ errors, since there's no way to distinguish an invalid address from
+ a valid one (they both look like random data).
+
+ If you are creating an address using 20 bytes you obtained in an
+ unreliable manner (such as manually typing them in), you should
+ double-check the input before sending money using the address created
+ here -- the tx will appear valid and be accepted by the network,
+ but will be permanently tied up in the network
+ """
+ self.__init__()
+ self.addrStr20 = pubkeyHash160
+ self.isInitialized = True
+ return self
+
+ def createFromAddrStr(self, addrStr):
+ """
+ Creates an address from a Base58 address string. Since the address
+ string includes a checksum, this method will fail if there was any
+ errors entering/copying the address
+ """
+ self.__init__()
+ self.addrStr = addrStr
+ if not self.checkAddressValid():
+ raise BadAddressError, 'Invalid address string: '+addrStr
+ self.isInitialized = True
+ return self
+
+ def calculateAddrStr(self, netbyte=ADDRBYTE):
+ """
+ Forces a recalculation of the address string from the public key
+ """
+ if not self.hasPubKey():
+ raise KeyDataError, 'Cannot compute address without PublicKey'
+ keyHash = self.binPublicKey65.getHash160()
+ chksum = hash256(netbyte + keyHash)[:4]
+ return binary_to_base58(netbyte + keyHash + chksum)
+
+
+
+ def checkAddressValid(self):
+ return checkAddrStrValid(self.addrStr);
+
+
+ def pprint(self, withPrivKey=True, indent=''):
+ def pp(x, nchar=1000):
+ if x.getSize()==0:
+ return '--'*32
+ else:
+ return x.toHexStr()[:nchar]
+ print indent + 'BTC Address :', self.getAddrStr()
+ print indent + 'Hash160[BE] :', binary_to_hex(self.getAddr160())
+ print indent + 'Wallet Location :', self.walletByteLoc
+ print indent + 'Chained Address :', self.chainIndex >= -1
+ print indent + 'Have (priv,pub) : (%s,%s)' % \
+ (str(self.hasPrivKey()), str(self.hasPubKey()))
+ print indent + 'First/Last Time : (%s,%s)' % \
+ (str(self.timeRange[0]), str(self.timeRange[1]))
+ print indent + 'First/Last Block : (%s,%s)' % \
+ (str(self.blkRange[0]), str(self.blkRange[1]))
+ if self.hasPubKey():
+ print indent + 'PubKeyX(BE) :', \
+ binary_to_hex(self.binPublicKey65.toBinStr()[1:33 ])
+ print indent + 'PubKeyY(BE) :', \
+ binary_to_hex(self.binPublicKey65.toBinStr()[ 33:])
+ print indent + 'Encryption parameters:'
+ print indent + ' UseEncryption :', self.useEncryption
+ print indent + ' IsLocked :', self.isLocked
+ print indent + ' KeyChanged :', self.keyChanged
+ print indent + ' ChainIndex :', self.chainIndex
+ print indent + ' Chaincode :', pp(self.chaincode)
+ print indent + ' InitVector :', pp(self.binInitVect16)
+ if withPrivKey and self.hasPrivKey():
+ print indent + 'PrivKeyPlain(BE) :', pp(self.binPrivKey32_Plain)
+ print indent + 'PrivKeyCiphr(BE) :', pp(self.binPrivKey32_Encr)
+ else:
+ print indent + 'PrivKeyPlain(BE) :', pp(SecureBinaryData())
+ print indent + 'PrivKeyCiphr(BE) :', pp(SecureBinaryData())
+ if self.createPrivKeyNextUnlock:
+ print indent + ' ***** :', 'PrivKeys available on next unlock'
+
+ def toString(self, withPrivKey=True, indent=''):
+ def pp(x, nchar=1000):
+ if x.getSize()==0:
+ return '--'*32
+ else:
+ return x.toHexStr()[:nchar]
+ result = ''.join([indent + 'BTC Address :', self.getAddrStr()])
+ result = ''.join([result, '\n', indent + 'Hash160[BE] :', binary_to_hex(self.getAddr160())])
+ result = ''.join([result, '\n', indent + 'Wallet Location :', str(self.walletByteLoc)])
+ result = ''.join([result, '\n', indent + 'Chained Address :', str(self.chainIndex >= -1)])
+ result = ''.join([result, '\n', indent + 'Have (priv,pub) : (%s,%s)' % \
+ (str(self.hasPrivKey()), str(self.hasPubKey()))])
+ result = ''.join([result, '\n', indent + 'First/Last Time : (%s,%s)' % \
+ (str(self.timeRange[0]), str(self.timeRange[1]))])
+ result = ''.join([result, '\n', indent + 'First/Last Block : (%s,%s)' % \
+ (str(self.blkRange[0]), str(self.blkRange[1]))])
+ if self.hasPubKey():
+ result = ''.join([result, '\n', indent + 'PubKeyX(BE) :', \
+ binary_to_hex(self.binPublicKey65.toBinStr()[1:33 ])])
+ result = ''.join([result, '\n', indent + 'PubKeyY(BE) :', \
+ binary_to_hex(self.binPublicKey65.toBinStr()[ 33:])])
+ result = ''.join([result, '\n', indent + 'Encryption parameters:'])
+ result = ''.join([result, '\n', indent + ' UseEncryption :', str(self.useEncryption)])
+ result = ''.join([result, '\n', indent + ' IsLocked :', str(self.isLocked)])
+ result = ''.join([result, '\n', indent + ' KeyChanged :', str(self.keyChanged)])
+ result = ''.join([result, '\n', indent + ' ChainIndex :', str(self.chainIndex)])
+ result = ''.join([result, '\n', indent + ' Chaincode :', pp(self.chaincode)])
+ result = ''.join([result, '\n', indent + ' InitVector :', pp(self.binInitVect16)])
+ if withPrivKey and self.hasPrivKey():
+ result = ''.join([result, '\n', indent + 'PrivKeyPlain(BE) :', pp(self.binPrivKey32_Plain)])
+ result = ''.join([result, '\n', indent + 'PrivKeyCiphr(BE) :', pp(self.binPrivKey32_Encr)])
+ else:
+ result = ''.join([result, '\n', indent + 'PrivKeyPlain(BE) :', pp(SecureBinaryData())])
+ result = ''.join([result, '\n', indent + 'PrivKeyCiphr(BE) :', pp(SecureBinaryData())])
+ if self.createPrivKeyNextUnlock:
+ result = ''.join([result, '\n', indent + ' ***** :', 'PrivKeys available on next unlock'])
+ return result
+
+# Put the import at the end to avoid circular reference problem
+from armoryengine.BDM import *
diff --git a/armoryengine/PyBtcWallet.py b/armoryengine/PyBtcWallet.py
new file mode 100644
index 000000000..23593f349
--- /dev/null
+++ b/armoryengine/PyBtcWallet.py
@@ -0,0 +1,2918 @@
+################################################################################
+# #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
+# Distributed under the GNU Affero General Public License (AGPL v3) #
+# See LICENSE or http://www.gnu.org/licenses/agpl.html #
+# #
+################################################################################
+import os.path
+import shutil
+
+from CppBlockUtils import SecureBinaryData, KdfRomix, CryptoAES, CryptoECDSA
+import CppBlockUtils as Cpp
+from armoryengine.ArmoryUtils import *
+from armoryengine.BinaryPacker import *
+from armoryengine.BinaryUnpacker import *
+from armoryengine.Timer import *
+# This import is causing a circular import problem when used by findpass and promokit
+# it is imported at the end of the file. Do not add it back at the begining
+# from armoryengine.Transaction import *
+
+
+BLOCKCHAIN_READONLY = 0
+BLOCKCHAIN_READWRITE = 1
+BLOCKCHAIN_DONOTUSE = 2
+
+WLT_UPDATE_ADD = 0
+WLT_UPDATE_MODIFY = 1
+
+WLT_DATATYPE_KEYDATA = 0
+WLT_DATATYPE_ADDRCOMMENT = 1
+WLT_DATATYPE_TXCOMMENT = 2
+WLT_DATATYPE_OPEVAL = 3
+WLT_DATATYPE_DELETED = 4
+
+DEFAULT_COMPUTE_TIME_TARGET = 0.25
+DEFAULT_MAXMEM_LIMIT = 32*1024*1024
+
+class PyBtcWallet(object):
+ """
+ This class encapsulates all the concepts and variables in a "wallet",
+ and maintains the passphrase protection, key stretching, encryption,
+ etc, required to maintain the wallet. This class also includes the
+ file I/O methods for storing and loading wallets.
+
+ ***NOTE: I have ONLY implemented deterministic wallets, using ECDSA
+ Diffie-Hellman shared-secret crypto operations. This allows
+ one to actually determine the next PUBLIC KEY in the address
+ chain without actually having access to the private keys.
+ This makes it possible to synchronize online-offline computers
+ once and never again.
+
+ You can import random keys into your wallet, but if it is
+ encrypted, you will have to supply a passphrase to make sure
+ it can be encrypted as well.
+
+ Presumably, wallets will be used for one of three purposes:
+
+ (1) Spend money and receive payments
+ (2) Watching-only wallets - have the private keys, just not on this computer
+ (3) May be watching *other* people's addrs. There's a variety of reasons
+ we might want to watch other peoples' addresses, but most them are not
+ relevant to a "basic" BTC user. Nonetheless it should be supported to
+ watch money without considering it part of our own assets
+
+ This class is included in the combined-python-cpp module, because we really
+ need to maintain a persistent Cpp.BtcWallet if this class is to be useful
+ (we don't want to have to rescan the entire blockchain every time we do any
+ wallet operations).
+
+ The file format was designed from the outset with lots of unused space to
+ allow for expansion without having to redefine the file format and break
+ previous wallets. Luckily, wallet information is cheap, so we don't have
+ to stress too much about saving space (100,000 addresses should take 15 MB)
+
+ This file is NOT for storing Tx-related information. I want this file to
+ be the minimal amount of information you need to secure and backup your
+ entire wallet. Tx information can always be recovered from examining the
+ blockchain... your private keys cannot be.
+
+ We track version numbers, just in case. We start with 1.0
+
+ Version 1.0:
+ ---
+ fileID -- (8) '\xbaWALLET\x00' for wallet files
+ version -- (4) getVersionInt(PYBTCWALLET_VERSION)
+ magic bytes -- (4) defines the blockchain for this wallet (BTC, NMC)
+ wlt flags -- (8) 64 bits/flags representing info about wallet
+ binUniqueID -- (6) first 5 bytes of first address in wallet
+ (rootAddr25Bytes[:5][::-1]), reversed
+ This is not intended to look like the root addr str
+ and is reversed to avoid having all wallet IDs start
+ with the same characters (since the network byte is front)
+ create date -- (8) unix timestamp of when this wallet was created
+ (actually, the earliest creation date of any addr
+ in this wallet -- in the case of importing addr
+ data). This is used to improve blockchain searching
+ Short Name -- (32) Null-terminated user-supplied short name for wlt
+ Long Name -- (256) Null-terminated user-supplied description for wlt
+ Highest Used-- (8) The chain index of the highest used address
+ ---
+ Crypto/KDF -- (512) information identifying the types and parameters
+ of encryption used to secure wallet, and key
+ stretching used to secure your passphrase.
+ Includes salt. (the breakdown of this field will
+ be described separately)
+ KeyGenerator-- (237) The base address for a determinstic wallet.
+ Just a serialized PyBtcAddress object.
+ ---
+ UNUSED -- (1024) unused space for future expansion of wallet file
+ ---
+ Remainder of file is for key storage and various other things. Each
+ "entry" will start with a 4-byte code identifying the entry type, then
+ 20 bytes identifying what address the data is for, and finally then
+ the subsequent data . So far, I have three types of entries that can
+ be included:
+
+ \x01 -- Address/Key data (as of PyBtcAddress version 1.0, 237 bytes)
+ \x02 -- Address comments (variable-width field)
+ \x03 -- Address comments (variable-width field)
+ \x04 -- OP_EVAL subscript (when this is enabled, in the future)
+
+ Please see PyBtcAddress for information on how key data is serialized.
+ Comments (\x02) are var-width, and if a comment is changed to
+ something longer than the existing one, we'll just blank out the old
+ one and append a new one to the end of the file. It looks like
+
+ 02000000 01 4f This comment is enabled (01) with 4f characters
+
+
+ For file syncing, we protect against corrupted wallets by doing atomic
+ operations before even telling the user that new data has been added.
+ We do this by copying the wallet file, and creating a walletUpdateFailed
+ file. We then modify the original, verify its integrity, and then delete
+ the walletUpdateFailed file. Then we create a backupUpdateFailed flag,
+ do the identical update on the backup file, and delete the failed flag.
+ This guaranatees that no matter which nanosecond the power goes out,
+ there will be an uncorrupted wallet and we know which one it is.
+
+ We never let the user see any data until the atomic write-to-file operation
+ has completed
+
+
+ Additionally, we implement key locking and unlocking, with timeout. These
+ key locking features are only DEFINED here, not actually enforced (because
+ this is a library, not an application). You can set the default/temporary
+ time that the KDF key is maintained in memory after the passphrase is
+ entered, and this class will keep track of when the wallet should be next
+ locked. It is up to the application to check whether the current time
+ exceeds the lock time. This will probably be done in a kind of heartbeat
+ method, which checks every few seconds for all sorts of things -- including
+ wallet locking.
+ """
+
+ #############################################################################
+ def __init__(self):
+ self.fileTypeStr = '\xbaWALLET\x00'
+ self.magicBytes = MAGIC_BYTES
+ self.version = PYBTCWALLET_VERSION # (Major, Minor, Minor++, even-more-minor)
+ self.eofByte = 0
+ self.cppWallet = None # Mirror of PyBtcWallet in C++ object
+ self.cppInfo = {} # Extra info about each address to help sync
+ self.watchingOnly = False
+ self.wltCreateDate = 0
+
+ # Three dictionaries hold all data
+ self.addrMap = {} # maps 20-byte addresses to PyBtcAddress objects
+ self.commentsMap = {} # maps 20-byte addresses to user-created comments
+ self.commentLocs = {} # map comment keys to wallet file locations
+ self.opevalMap = {} # maps 20-byte addresses to OP_EVAL data (future)
+ self.labelName = ''
+ self.labelDescr = ''
+ self.linearAddr160List = []
+ self.chainIndexMap = {}
+ self.txAddrMap = {} # cache for getting tx-labels based on addr search
+ if USE_TESTNET:
+ self.addrPoolSize = 10 # this makes debugging so much easier!
+ else:
+ self.addrPoolSize = CLI_OPTIONS.keypool
+
+ # For file sync features
+ self.walletPath = ''
+ self.doBlockchainSync = BLOCKCHAIN_READONLY
+ self.lastSyncBlockNum = 0
+
+ # Private key encryption details
+ self.useEncryption = False
+ self.kdf = None
+ self.crypto = None
+ self.kdfKey = None
+ self.defaultKeyLifetime = 10 # seconds after unlock, that key is discarded
+ self.lockWalletAtTime = 0 # seconds after unlock, that key is discarded
+ self.isLocked = False
+ self.testedComputeTime=None
+
+ # Deterministic wallet, need a root key. Though we can still import keys.
+ # The unique ID contains the network byte (id[-1]) but is not intended to
+ # resemble the address of the root key
+ self.uniqueIDBin = ''
+ self.uniqueIDB58 = '' # Base58 version of reversed-uniqueIDBin
+ self.lastComputedChainAddr160 = ''
+ self.lastComputedChainIndex = 0
+ self.highestUsedChainIndex = 0
+
+ # All PyBtcAddress serializations are exact same size, figure it out now
+ self.pybtcaddrSize = len(PyBtcAddress().serialize())
+
+
+ # All BDM calls by default go on the multi-thread-queue. But if the BDM
+ # is the one calling the PyBtcWallet methods, it will deadlock if it uses
+ # the queue. Therefore, the BDM will set this flag before making any
+ # calls, which will tell PyBtcWallet to use __direct methods.
+ self.calledFromBDM = False
+
+ # Finally, a bunch of offsets that tell us where data is stored in the
+ # file: this can be generated automatically on unpacking (meaning it
+ # doesn't require manually updating offsets if I change the format), and
+ # will save us a couple lines of code later, when we need to update things
+ self.offsetWltFlags = -1
+ self.offsetLabelName = -1
+ self.offsetLabelDescr = -1
+ self.offsetTopUsed = -1
+ self.offsetRootAddr = -1
+ self.offsetKdfParams = -1
+ self.offsetCrypto = -1
+
+ # These flags are ONLY for unit-testing the walletFileSafeUpdate function
+ self.interruptTest1 = False
+ self.interruptTest2 = False
+ self.interruptTest3 = False
+
+ #flags the wallet if it has off chain imports (from a consistency repair)
+ self.hasNegativeImports = False
+
+
+ #############################################################################
+ def getWalletVersion(self):
+ return (getVersionInt(self.version), getVersionString(self.version))
+
+ #############################################################################
+ def getTimeRangeForAddress(self, addr160):
+ if not self.addrMap.has_key(addr160):
+ return None
+ else:
+ return self.addrMap[addr160].getTimeRange()
+
+ #############################################################################
+ def getBlockRangeForAddress(self, addr160):
+ if not self.addrMap.has_key(addr160):
+ return None
+ else:
+ return self.addrMap[addr160].getBlockRange()
+
+ #############################################################################
+ def setBlockchainSyncFlag(self, syncYes=True):
+ self.doBlockchainSync = syncYes
+
+ #############################################################################
+ @TimeThisFunction
+ def syncWithBlockchain(self, startBlk=None):
+ """
+ Will block until getTopBlockHeader() returns, which could be a while.
+ If you don't want to wait, check TheBDM.getBDMState()=='BlockchainReady'
+ before calling this method. If you expect the blockchain will have to
+ be rescanned, then call TheBDM.rescanBlockchain or TheBDM.loadBlockchain
+
+ If this method is called from the BDM itself, calledFromBDM will signal
+ to use the BDM methods directly, not the queue. This will deadlock
+ otherwise.
+ """
+ if TheBDM.getBDMState() in ('Offline', 'Uninitialized'):
+ LOGWARN('Called syncWithBlockchain but BDM is %s', TheBDM.getBDMState())
+ return
+
+ if not self.doBlockchainSync==BLOCKCHAIN_DONOTUSE:
+ if startBlk==None:
+ startBlk = self.lastSyncBlockNum + 1
+
+ # calledFromBDM means that ultimately the BDM itself called this
+ # method and is blocking waiting for it. So we can't use the
+ # BDM-thread queue, must call its methods directly
+ if self.calledFromBDM:
+ TheBDM.scanBlockchainForTx_bdm_direct(self.cppWallet, startBlk)
+ self.lastSyncBlockNum = TheBDM.getTopBlockHeight_bdm_direct()
+ else:
+ TheBDM.scanBlockchainForTx(self.cppWallet, startBlk, wait=True)
+ self.lastSyncBlockNum = TheBDM.getTopBlockHeight(wait=True)
+ else:
+ LOGERROR('Blockchain-sync requested, but current wallet')
+ LOGERROR('is set to BLOCKCHAIN_DONOTUSE')
+
+ #############################################################################
+ @TimeThisFunction
+ def syncWithBlockchainLite(self, startBlk=None):
+ """
+ This is just like a regular sync, but it won't rescan the whole blockchain
+ if the wallet is dirty -- if addresses were imported recently, it will
+ still only scan what the blockchain picked up on the last scan. Use the
+ non-lite version to allow a full scan.
+ """
+
+ if TheBDM.getBDMState() in ('Offline', 'Uninitialized'):
+ LOGWARN('Called syncWithBlockchainLite but BDM is %s', TheBDM.getBDMState())
+ return
+
+ if not self.doBlockchainSync==BLOCKCHAIN_DONOTUSE:
+ if startBlk==None:
+ if self.lastSyncBlockNum is not None:
+ startBlk = self.lastSyncBlockNum + 1
+
+ # calledFromBDM means that ultimately the BDM itself called this
+ # method and is blocking waiting for it. So we can't use the
+ # BDM-thread queue, must call its methods directly
+
+ if self.calledFromBDM:
+ TheBDM.scanRegisteredTxForWallet_bdm_direct(self.cppWallet, startBlk)
+ self.lastSyncBlockNum = TheBDM.getTopBlockHeight_bdm_direct()
+ else:
+ TheBDM.scanRegisteredTxForWallet(self.cppWallet, startBlk, wait=True)
+ self.lastSyncBlockNum = TheBDM.getTopBlockHeight(wait=True)
+
+ wltLE = self.cppWallet.getTxLedgerForComments()
+ for le in wltLE:
+ txHash = le.getTxHash()
+ if not self.txAddrMap.has_key(txHash):
+ self.txAddrMap[txHash] = []
+ scrAddr = SecureBinaryData(le.getScrAddr())
+ try:
+ addrStr = scrAddr_to_addrStr(scrAddr.toBinStr())
+ addr160 = addrStr_to_hash160(addrStr)[1]
+ if addr160 not in self.txAddrMap[txHash]:
+ self.txAddrMap[txHash].append(addr160)
+ except:
+ continue
+ else:
+ LOGERROR('Blockchain-sync requested, but current wallet')
+ LOGERROR('is set to BLOCKCHAIN_DONOTUSE')
+
+ #############################################################################
+ def getCommentForAddrBookEntry(self, abe):
+ comment = self.getComment(abe.getAddr160())
+ if len(comment)>0:
+ return comment
+
+ # SWIG BUG!
+ # http://sourceforge.net/tracker/?func=detail&atid=101645&aid=3403085&group_id=1645
+ # Apparently, using the -threads option when compiling the swig module
+ # causes the "for i in vector<...>:" mechanic to sometimes throw seg faults!
+ # For this reason, this method was replaced with the one below:
+ for regTx in abe.getTxList():
+ comment = self.getComment(regTx.getTxHash())
+ if len(comment)>0:
+ return comment
+
+ return ''
+
+ #############################################################################
+ def getCommentForTxList(self, a160, txhashList):
+ comment = self.getComment(a160)
+ if len(comment)>0:
+ return comment
+
+ for txHash in txhashList:
+ comment = self.getComment(txHash)
+ if len(comment)>0:
+ return comment
+
+ return ''
+
+ #############################################################################
+ def printAddressBook(self):
+ addrbook = self.cppWallet.createAddressBook()
+ for abe in addrbook:
+ print hash160_to_addrStr(abe.getAddr160()),
+ txlist = abe.getTxList()
+ print len(txlist)
+ for rtx in txlist:
+ print '\t', binary_to_hex(rtx.getTxHash(), BIGENDIAN)
+
+ #############################################################################
+ def hasAnyImported(self):
+ for a160,addr in self.addrMap.iteritems():
+ if addr.chainIndex == -2:
+ return True
+ return False
+
+
+ #############################################################################
+ # The IGNOREZC args on the get*Balance calls determine whether unconfirmed
+ # change (sent-to-self) will be considered spendable or unconfirmed. This
+ # was added after the malleability issues cropped up in Feb 2014. Zero-conf
+ # change was always deprioritized, but using --nospendzeroconfchange makes
+ # it totally unspendable
+ def getBalance(self, balType="Spendable"):
+ if not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM:
+ return -1
+ else:
+ currBlk = TheBDM.getTopBlockHeight(calledFromBDM=self.calledFromBDM)
+ if balType.lower() in ('spendable','spend'):
+ return self.cppWallet.getSpendableBalance(currBlk, IGNOREZC)
+ elif balType.lower() in ('unconfirmed','unconf'):
+ return self.cppWallet.getUnconfirmedBalance(currBlk, IGNOREZC)
+ elif balType.lower() in ('total','ultimate','unspent','full'):
+ return self.cppWallet.getFullBalance()
+ else:
+ raise TypeError('Unknown balance type! "' + balType + '"')
+
+
+ #############################################################################
+ def getAddrBalance(self, addr160, balType="Spendable", currBlk=UINT32_MAX):
+ if (not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM) or \
+ not self.hasAddr(addr160):
+ return -1
+ else:
+ addr = self.cppWallet.getScrAddrObjByKey(Hash160ToScrAddr(addr160))
+ if balType.lower() in ('spendable','spend'):
+ return addr.getSpendableBalance(currBlk, IGNOREZC)
+ elif balType.lower() in ('unconfirmed','unconf'):
+ return addr.getUnconfirmedBalance(currBlk, IGNOREZC)
+ elif balType.lower() in ('ultimate','unspent','full'):
+ return addr.getFullBalance()
+ else:
+ raise TypeError('Unknown balance type!')
+
+ #############################################################################
+ def getTxLedger(self, ledgType='Full'):
+ """
+ Gets the ledger entries for the entire wallet, from C++/SWIG data structs
+ """
+ if not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM:
+ return []
+ else:
+ ledgBlkChain = self.cppWallet.getTxLedger()
+ ledgZeroConf = self.cppWallet.getZeroConfLedger()
+ if ledgType.lower() in ('full','all','ultimate'):
+ ledg = []
+ ledg.extend(ledgBlkChain)
+ ledg.extend(ledgZeroConf)
+ return ledg
+ elif ledgType.lower() in ('blk', 'blkchain', 'blockchain'):
+ return ledgBlkChain
+ elif ledgType.lower() in ('zeroconf', 'zero'):
+ return ledgZeroConf
+ else:
+ raise TypeError('Unknown ledger type! "' + ledgType + '"')
+
+
+
+
+ #############################################################################
+ def getAddrTxLedger(self, addr160, ledgType='Full'):
+ """
+ Gets the ledger entries for the entire wallet, from C++/SWIG data structs
+ """
+ if (not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM) or \
+ not self.hasAddr(addr160):
+ return []
+ else:
+ scrAddr = Hash160ToScrAddr(addr160)
+ ledgBlkChain = self.cppWallet.getScrAddrObjByKey(scrAddr).getTxLedger()
+ ledgZeroConf = self.cppWallet.getScrAddrObjByKey(scrAddr).getZeroConfLedger()
+ if ledgType.lower() in ('full','all','ultimate'):
+ ledg = []
+ ledg.extend(ledgBlkChain)
+ ledg.extend(ledgZeroConf)
+ return ledg
+ elif ledgType.lower() in ('blk', 'blkchain', 'blockchain'):
+ return ledgBlkChain
+ elif ledgType.lower() in ('zeroconf', 'zero'):
+ return ledgZeroConf
+ else:
+ raise TypeError('Unknown balance type! "' + ledgType + '"')
+
+
+ #############################################################################
+ def getTxOutList(self, txType='Spendable'):
+ """ Returns UnspentTxOut/C++ objects """
+ if TheBDM.getBDMState()=='BlockchainReady' and \
+ not self.doBlockchainSync==BLOCKCHAIN_DONOTUSE:
+
+ currBlk = TheBDM.getTopBlockHeight(calledFromBDM=self.calledFromBDM)
+ self.syncWithBlockchain()
+ if txType.lower() in ('spend', 'spendable'):
+ return self.cppWallet.getSpendableTxOutList(currBlk, IGNOREZC);
+ elif txType.lower() in ('full', 'all', 'unspent', 'ultimate'):
+ return self.cppWallet.getFullTxOutList(currBlk);
+ else:
+ raise TypeError('Unknown balance type! ' + txType)
+ else:
+ LOGERROR('***Blockchain is not available for accessing wallet-tx data')
+ return []
+
+ #############################################################################
+ def getAddrTxOutList(self, addr160, txType='Spendable'):
+ """ Returns UnspentTxOut/C++ objects """
+ if TheBDM.getBDMState()=='BlockchainReady' and \
+ self.hasAddr(addr160) and \
+ not self.doBlockchainSync==BLOCKCHAIN_DONOTUSE:
+
+ currBlk = TheBDM.getTopBlockHeight(calledFromBDM=self.calledFromBDM)
+ self.syncWithBlockchain()
+ scrAddrStr = Hash160ToScrAddr(addr160)
+ cppAddr = self.cppWallet.getScrAddrObjByKey(scrAddrStr)
+ if txType.lower() in ('spend', 'spendable'):
+ return cppAddr.getSpendableTxOutList(currBlk, IGNOREZC);
+ elif txType.lower() in ('full', 'all', 'unspent', 'ultimate'):
+ return cppAddr.getFullTxOutList(currBlk);
+ else:
+ raise TypeError('Unknown TxOutList type! ' + txType)
+ else:
+ LOGERROR('***Blockchain is not available for accessing wallet-tx data')
+ return []
+
+
+ #############################################################################
+ def getAddrByHash160(self, addr160):
+ return (None if not self.hasAddr(addr160) else self.addrMap[addr160])
+
+ #############################################################################
+ def hasAddr(self, addrData):
+ if isinstance(addrData, str):
+ if len(addrData) == 20:
+ return self.addrMap.has_key(addrData)
+ elif isLikelyDataType(addrData)==DATATYPE.Base58:
+ return self.addrMap.has_key(addrStr_to_hash160(addrData)[1])
+ else:
+ return False
+ elif isinstance(addrData, PyBtcAddress):
+ return self.addrMap.has_key(addrData.getAddr160())
+ else:
+ return False
+
+
+ #############################################################################
+ def setDefaultKeyLifetime(self, newlifetime):
+ """ Set a new default lifetime for holding the unlock key. Min 2 sec """
+ self.defaultKeyLifetime = max(newlifetime, 2)
+
+ #############################################################################
+ def checkWalletLockTimeout(self):
+ if not self.isLocked and self.kdfKey and RightNow()>self.lockWalletAtTime:
+ self.lock()
+ if self.kdfKey:
+ self.kdfKey.destroy()
+ self.kdfKey = None
+
+ if self.useEncryption:
+ self.isLocked = True
+
+
+
+ #############################################################################
+ def lockTxOutsOnNewTx(self, pytxObj):
+ for txin in pytxObj.inputs:
+ self.cppWallet.lockTxOutSwig(txin.outpoint.txHash, \
+ txin.outpoint.txOutIndex)
+
+
+ #############################################################################
+ # THIS WAS CREATED ORIGINALLY TO SUPPORT BITSAFE INTEGRATION INTO ARMORY
+ # But it's also a good first step into general BIP 32 support
+ def getChildExtPubFromRoot(self, i):
+ root = self.addrMap['ROOT']
+ ekey = ExtendedKey().CreateFromPublic(root.binPublicKey65, root.chaincode)
+ newKey = HDWalletCrypto().ChildKeyDeriv(ekey, i)
+ newKey.setIndex(i)
+ return newKey
+ #newAddr = PyBtcAddress().createFromExtendedPublicKey(newKey)
+
+ #############################################################################
+ #def createFromExtendedPublicKey(self, ekey):
+ #pub65 = ekey.getPub()
+ #chain = ekey.getChain()
+ #newAddr = self.createFromPublicKeyData(pub65, chain)
+ #newAddr.chainIndex = newAddr.getIndex()
+ #return newAddr
+
+ #############################################################################
+ #def deriveChildPublicKey(self, i):
+ #newKey = HDWalletCrypto().ChildKeyDeriv(self.getExtendedPublicKey(), i)
+ #newAddr = PyBtcAddress().createFromExtendedPublicKey(newKey)
+
+ #############################################################################
+ # Copy the wallet file to backup
+ def backupWalletFile(self, backupPath = None):
+ walletFileBackup = self.getWalletPath('backup') if backupPath == None \
+ else backupPath
+ shutil.copy(self.walletPath, walletFileBackup)
+
+ #############################################################################
+ # THIS WAS CREATED ORIGINALLY TO SUPPORT BITSAFE INTEGRATION INTO ARMORY
+ # But it's also a good first step into general BIP 32 support
+ def createWalletFromMasterPubKey(self, masterHex, \
+ isActuallyNew=True, \
+ doRegisterWithBDM=True):
+ # This function eats hex inputs, not sure why I chose to do that...
+ p0 = masterHex.index('4104') + 2
+ pubkey = SecureBinaryData(hex_to_binary(masterHex[p0:p0+130]))
+ c0 = masterHex.index('1220') + 4
+ chain = SecureBinaryData(hex_to_binary(masterHex[c0:c0+64]))
+
+ # Create the root address object
+ rootAddr = PyBtcAddress().createFromPublicKeyData( pubkey )
+ rootAddr.markAsRootAddr(chain)
+ self.addrMap['ROOT'] = rootAddr
+
+ ekey = self.getChildExtPubFromRoot(0)
+ firstAddr = PyBtcAddress().createFromPublicKeyData(ekey.getPub())
+ firstAddr.chaincode = ekey.getChain()
+ firstAddr.chainIndex = 0
+ first160 = firstAddr.getAddr160()
+
+ # Update wallet object with the new data
+ # NEW IN WALLET VERSION 1.35: unique ID is now based on
+ # the first chained address: this guarantees that the unique ID
+ # is based not only on the private key, BUT ALSO THE CHAIN CODE
+ self.useEncryption = False
+ self.addrMap[firstAddr.getAddr160()] = firstAddr
+ self.uniqueIDBin = (ADDRBYTE + firstAddr.getAddr160()[:5])[::-1]
+ self.uniqueIDB58 = binary_to_base58(self.uniqueIDBin)
+ self.labelName = 'BitSafe Demo Wallet'
+ self.labelDescr = 'We\'ll be lucky if this works!'
+ self.lastComputedChainAddr160 = first160
+ self.lastComputedChainIndex = firstAddr.chainIndex
+ self.highestUsedChainIndex = firstAddr.chainIndex-1
+ self.wltCreateDate = long(RightNow())
+ self.linearAddr160List = [first160]
+ self.chainIndexMap[firstAddr.chainIndex] = first160
+ self.watchingOnly = True
+
+ # We don't have to worry about atomic file operations when
+ # creating the wallet: so we just do it naively here.
+ newWalletFilePath = os.path.join(ARMORY_HOME_DIR, 'bitsafe_demo_%s.wallet' % self.uniqueIDB58)
+ self.walletPath = newWalletFilePath
+ if not newWalletFilePath:
+ shortName = self.labelName .replace(' ','_')
+ # This was really only needed when we were putting name in filename
+ #for c in ',?;:\'"?/\\=+-|[]{}<>':
+ #shortName = shortName.replace(c,'_')
+ newName = 'armory_%s_.wallet' % self.uniqueIDB58
+ self.walletPath = os.path.join(ARMORY_HOME_DIR, newName)
+
+ LOGINFO(' New wallet will be written to: %s', self.walletPath)
+ newfile = open(self.walletPath, 'wb')
+ fileData = BinaryPacker()
+
+ # packHeader method writes KDF params and root address
+ headerBytes = self.packHeader(fileData)
+
+ # We make sure we have byte locations of the two addresses, to start
+ self.addrMap[first160].walletByteLoc = headerBytes + 21
+
+ fileData.put(BINARY_CHUNK, '\x00' + first160 + firstAddr.serialize())
+
+
+ # Store the current localtime and blocknumber. Block number is always
+ # accurate if available, but time may not be exactly right. Whenever
+ # basing anything on time, please assume that it is up to one day off!
+ time0,blk0 = getCurrTimeAndBlock() if isActuallyNew else (0,0)
+
+ # Don't forget to sync the C++ wallet object
+ self.cppWallet = Cpp.BtcWallet()
+ self.cppWallet.addAddress_5_(rootAddr.getAddr160(), time0,blk0,time0,blk0)
+ self.cppWallet.addAddress_5_(first160, time0,blk0,time0,blk0)
+
+ # We might be holding the wallet temporarily and not ready to register it
+ if doRegisterWithBDM:
+ TheBDM.registerWallet(self.cppWallet, isFresh=isActuallyNew) # new wallet
+
+ newfile.write(fileData.getBinaryString())
+ newfile.close()
+
+ walletFileBackup = self.getWalletPath('backup')
+ shutil.copy(self.walletPath, walletFileBackup)
+
+
+ # Let's fill the address pool while we are unlocked
+ # It will get a lot more expensive if we do it on the next unlock
+ if doRegisterWithBDM:
+ self.fillAddressPool(self.addrPoolSize, isActuallyNew=isActuallyNew)
+
+ return self
+
+
+
+
+ #############################################################################
+ def createNewWallet(self, newWalletFilePath=None, \
+ plainRootKey=None, chaincode=None, \
+ withEncrypt=True, IV=None, securePassphrase=None, \
+ kdfTargSec=DEFAULT_COMPUTE_TIME_TARGET, \
+ kdfMaxMem=DEFAULT_MAXMEM_LIMIT, \
+ shortLabel='', longLabel='', isActuallyNew=True, \
+ doRegisterWithBDM=True, skipBackupFile=False, \
+ extraEntropy=None, Progress=emptyFunc):
+ """
+ This method will create a new wallet, using as much customizability
+ as you want. You can enable encryption, and set the target params
+ of the key-derivation function (compute-time and max memory usage).
+ The KDF parameters will be experimentally determined to be as hard
+ as possible for your computer within the specified time target
+ (default, 0.25s). It will aim for maximizing memory usage and using
+ only 1 or 2 iterations of it, but this can be changed by scaling
+ down the kdfMaxMem parameter (default 32 MB).
+
+ If you use encryption, don't forget to supply a 32-byte passphrase,
+ created via SecureBinaryData(pythonStr). This method will apply
+ the passphrase so that the wallet is "born" encrypted.
+
+ The field plainRootKey could be used to recover a written backup
+ of a wallet, since all addresses are deterministically computed
+ from the root address. This obviously won't reocver any imported
+ keys, but does mean that you can recover your ENTIRE WALLET from
+ only those 32 plaintext bytes AND the 32-byte chaincode.
+
+ We skip the atomic file operations since we don't even have
+ a wallet file yet to safely update.
+
+ DO NOT CALL THIS FROM BDM METHOD. IT MAY DEADLOCK.
+ """
+
+
+ if self.calledFromBDM:
+ LOGERROR('Called createNewWallet() from BDM method!')
+ LOGERROR('Don\'t do this!')
+ return None
+
+ if securePassphrase:
+ securePassphrase = SecureBinaryData(securePassphrase)
+ if plainRootKey:
+ plainRootKey = SecureBinaryData(plainRootKey)
+ if chaincode:
+ chaincode = SecureBinaryData(chaincode)
+
+ if withEncrypt and not securePassphrase:
+ raise EncryptionError('Cannot create encrypted wallet without passphrase')
+
+ LOGINFO('***Creating new deterministic wallet')
+
+ # Set up the KDF
+ if not withEncrypt:
+ self.kdfKey = None
+ else:
+ LOGINFO('(with encryption)')
+ self.kdf = KdfRomix()
+ LOGINFO('Target (time,RAM)=(%0.3f,%d)', kdfTargSec, kdfMaxMem)
+ (mem,niter,salt) = self.computeSystemSpecificKdfParams( \
+ kdfTargSec, kdfMaxMem)
+ self.kdf.usePrecomputedKdfParams(mem, niter, salt)
+ self.kdfKey = self.kdf.DeriveKey(securePassphrase)
+
+ if not plainRootKey:
+ # TODO: We should find a source for injecting extra entropy
+ # At least, Crypto++ grabs from a few different sources, itself
+ if not extraEntropy:
+ extraEntropy = SecureBinaryData(0)
+ plainRootKey = SecureBinaryData().GenerateRandom(32, extraEntropy)
+
+ if not chaincode:
+ #chaincode = SecureBinaryData().GenerateRandom(32)
+ # For wallet 1.35a, derive chaincode deterministically from root key
+ # The root key already has 256 bits of entropy which is excessive,
+ # anyway. And my original reason for having the chaincode random is
+ # no longer valid.
+ chaincode = DeriveChaincodeFromRootKey(plainRootKey)
+
+
+
+ # Create the root address object
+ rootAddr = PyBtcAddress().createFromPlainKeyData( \
+ plainRootKey, \
+ IV16=IV, \
+ willBeEncr=withEncrypt, \
+ generateIVIfNecessary=True)
+ rootAddr.markAsRootAddr(chaincode)
+
+ # This does nothing if no encryption
+ rootAddr.lock(self.kdfKey)
+ rootAddr.unlock(self.kdfKey)
+
+ firstAddr = rootAddr.extendAddressChain(self.kdfKey)
+ first160 = firstAddr.getAddr160()
+
+ # Update wallet object with the new data
+ # NEW IN WALLET VERSION 1.35: unique ID is now based on
+ # the first chained address: this guarantees that the unique ID
+ # is based not only on the private key, BUT ALSO THE CHAIN CODE
+ self.useEncryption = withEncrypt
+ self.addrMap['ROOT'] = rootAddr
+ self.addrMap[firstAddr.getAddr160()] = firstAddr
+ self.uniqueIDBin = (ADDRBYTE + firstAddr.getAddr160()[:5])[::-1]
+ self.uniqueIDB58 = binary_to_base58(self.uniqueIDBin)
+ self.labelName = shortLabel[:32]
+ self.labelDescr = longLabel[:256]
+ self.lastComputedChainAddr160 = first160
+ self.lastComputedChainIndex = firstAddr.chainIndex
+ self.highestUsedChainIndex = firstAddr.chainIndex-1
+ self.wltCreateDate = long(RightNow())
+ self.linearAddr160List = [first160]
+ self.chainIndexMap[firstAddr.chainIndex] = first160
+
+ # We don't have to worry about atomic file operations when
+ # creating the wallet: so we just do it naively here.
+ self.walletPath = newWalletFilePath
+ if not newWalletFilePath:
+ shortName = self.labelName .replace(' ','_')
+ # This was really only needed when we were putting name in filename
+ #for c in ',?;:\'"?/\\=+-|[]{}<>':
+ #shortName = shortName.replace(c,'_')
+ newName = 'armory_%s_.wallet' % self.uniqueIDB58
+ self.walletPath = os.path.join(ARMORY_HOME_DIR, newName)
+
+ LOGINFO(' New wallet will be written to: %s', self.walletPath)
+ newfile = open(self.walletPath, 'wb')
+ fileData = BinaryPacker()
+
+ # packHeader method writes KDF params and root address
+ headerBytes = self.packHeader(fileData)
+
+ # We make sure we have byte locations of the two addresses, to start
+ self.addrMap[first160].walletByteLoc = headerBytes + 21
+
+ fileData.put(BINARY_CHUNK, '\x00' + first160 + firstAddr.serialize())
+
+
+ # Store the current localtime and blocknumber. Block number is always
+ # accurate if available, but time may not be exactly right. Whenever
+ # basing anything on time, please assume that it is up to one day off!
+ time0,blk0 = getCurrTimeAndBlock() if isActuallyNew else (0,0)
+
+ # Don't forget to sync the C++ wallet object
+ self.cppWallet = Cpp.BtcWallet()
+ self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(rootAddr.getAddr160()), \
+ time0,blk0,time0,blk0)
+ self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(first160), \
+ time0,blk0,time0,blk0)
+
+ # We might be holding the wallet temporarily and not ready to register it
+ if doRegisterWithBDM:
+ TheBDM.registerWallet(self.cppWallet, isFresh=isActuallyNew) # new wallet
+
+
+ newfile.write(fileData.getBinaryString())
+ newfile.close()
+
+ if not skipBackupFile:
+ walletFileBackup = self.getWalletPath('backup')
+ shutil.copy(self.walletPath, walletFileBackup)
+
+ # Lock/unlock to make sure encrypted keys are computed and written to file
+ if self.useEncryption:
+ self.unlock(secureKdfOutput=self.kdfKey, Progress=Progress)
+
+ # Let's fill the address pool while we are unlocked
+ # It will get a lot more expensive if we do it on the next unlock
+ if doRegisterWithBDM:
+ self.fillAddressPool(self.addrPoolSize, isActuallyNew=isActuallyNew,
+ Progress=Progress)
+
+ if self.useEncryption:
+ self.lock()
+
+ return self
+
+ #############################################################################
+ def advanceHighestIndex(self, ct=1):
+ topIndex = self.highestUsedChainIndex + ct
+ topIndex = min(topIndex, self.lastComputedChainIndex)
+ topIndex = max(topIndex, 0)
+
+ self.highestUsedChainIndex = topIndex
+ self.walletFileSafeUpdate( [[WLT_UPDATE_MODIFY, self.offsetTopUsed, \
+ int_to_binary(self.highestUsedChainIndex, widthBytes=8)]])
+ self.fillAddressPool()
+
+ #############################################################################
+ def rewindHighestIndex(self, ct=1):
+ self.advanceHighestIndex(-ct)
+
+
+ #############################################################################
+ def peekNextUnusedAddr160(self):
+ try:
+ return self.getAddress160ByChainIndex(self.highestUsedChainIndex+1)
+ except:
+ # Not sure why we'd fail, maybe addrPoolSize==0?
+ return ''
+
+ #############################################################################
+ def getNextUnusedAddress(self):
+ if self.lastComputedChainIndex - self.highestUsedChainIndex < \
+ max(self.addrPoolSize-1,1):
+ self.fillAddressPool(self.addrPoolSize)
+
+ self.advanceHighestIndex(1)
+ new160 = self.getAddress160ByChainIndex(self.highestUsedChainIndex)
+ self.addrMap[new160].touch()
+ self.walletFileSafeUpdate( [[WLT_UPDATE_MODIFY, \
+ self.addrMap[new160].walletByteLoc, \
+ self.addrMap[new160].serialize()]] )
+ return self.addrMap[new160]
+
+
+ #############################################################################
+ def computeNextAddress(self, addr160=None, isActuallyNew=True, doRegister=True):
+ """
+ Use this to extend the chain beyond the last-computed address.
+
+ We will usually be computing the next address from the tip of the
+ chain, but I suppose someone messing with the file format may
+ leave gaps in the chain requiring some to be generated in the middle
+ (then we can use the addr160 arg to specify which address to extend)
+ """
+ if not addr160:
+ addr160 = self.lastComputedChainAddr160
+
+ newAddr = self.addrMap[addr160].extendAddressChain(self.kdfKey)
+ new160 = newAddr.getAddr160()
+ newDataLoc = self.walletFileSafeUpdate( \
+ [[WLT_UPDATE_ADD, WLT_DATATYPE_KEYDATA, new160, newAddr]])
+ self.addrMap[new160] = newAddr
+ self.addrMap[new160].walletByteLoc = newDataLoc[0] + 21
+
+ if newAddr.chainIndex > self.lastComputedChainIndex:
+ self.lastComputedChainAddr160 = new160
+ self.lastComputedChainIndex = newAddr.chainIndex
+
+ self.linearAddr160List.append(new160)
+ self.chainIndexMap[newAddr.chainIndex] = new160
+
+ # In the future we will enable first/last seen, but not yet
+ time0,blk0 = getCurrTimeAndBlock() if isActuallyNew else (0,0)
+ self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(new160), \
+ time0,blk0,time0,blk0)
+
+ # For recovery rescans, this method will be called directly by
+ # the BDM, which may cause a deadlock if we go through the
+ # thread queue. The calledFromBDM is "permission" to access the
+ # BDM private methods directly
+ if doRegister:
+ if self.calledFromBDM:
+ TheBDM.registerScrAddr_bdm_direct(Hash160ToScrAddr(new160), timeInfo=isActuallyNew)
+ else:
+ # This uses the thread queue, which means the address will be
+ # registered next time the BDM is not busy
+ TheBDM.registerScrAddr(Hash160ToScrAddr(new160), isFresh=isActuallyNew)
+
+ return new160
+
+ #############################################################################
+ def fillAddressPool(self, numPool=None, isActuallyNew=True,
+ doRegister=True, Progress=emptyFunc):
+ """
+ Usually, when we fill the address pool, we are generating addresses
+ for the first time, and thus there is no chance it's ever seen the
+ blockchain. However, this method is also used for recovery/import
+ of wallets, where the address pool has addresses that probably have
+ transactions already in the blockchain.
+ """
+ if not numPool:
+ numPool = self.addrPoolSize
+
+ gap = self.lastComputedChainIndex - self.highestUsedChainIndex
+ numToCreate = max(numPool - gap, 0)
+ for i in range(numToCreate):
+ Progress(i+1, numToCreate)
+ self.computeNextAddress(isActuallyNew=isActuallyNew,
+ doRegister=doRegister)
+ #dlgPrg.UpdateHBar(i+1)
+
+ return self.lastComputedChainIndex
+
+ #############################################################################
+ def setAddrPoolSize(self, newSize):
+ if newSize<5:
+ LOGERROR('Will not allow address pool sizes smaller than 5...')
+ return
+
+ self.addrPoolSize = newSize
+ self.fillAddressPool(newSize)
+
+
+ #############################################################################
+ def getHighestUsedIndex(self):
+ """
+ This only retrieves the stored value, but it may not be correct if,
+ for instance, the wallet was just imported but has been used before.
+ """
+ return self.highestUsedChainIndex
+
+
+ #############################################################################
+ def getHighestComputedIndex(self):
+ """
+ This only retrieves the stored value, but it may not be correct if,
+ for instance, the wallet was just imported but has been used before.
+ """
+ return self.lastComputedChainIndex
+
+
+
+ #############################################################################
+ def detectHighestUsedIndex(self, writeResultToWallet=False, fullscan=False):
+ """
+ This method is used to find the highestUsedChainIndex value of the
+ wallet WITHIN its address pool. It will NOT extend its address pool
+ in this search, because it is assumed that the wallet couldn't have
+ used any addresses it had not calculated yet.
+
+ If you have a wallet IMPORT, though, or a wallet that has been used
+ before but does not have this information stored with it, then you
+ should be using the next method:
+
+ self.freshImportFindHighestIndex()
+
+ which will actually extend the address pool as necessary to find the
+ highest address used.
+ """
+ if not TheBDM.getBDMState()=='BlockchainReady' and not self.calledFromBDM:
+ LOGERROR('Cannot detect any usage information without the blockchain')
+ return -1
+
+ oldSync = self.doBlockchainSync
+ self.doBlockchainSync = BLOCKCHAIN_READONLY
+ if fullscan:
+ # Will initiate rescan if wallet is dirty
+ self.syncWithBlockchain(self.lastSyncBlockNum)
+ else:
+ # Will only use data already scanned, even if wallet is dirty
+ self.syncWithBlockchainLite(self.lastSyncBlockNum)
+ self.doBlockchainSync = oldSync
+
+ highestIndex = max(self.highestUsedChainIndex, 0)
+ for addr in self.getLinearAddrList(withAddrPool=True):
+ a160 = addr.getAddr160()
+ if len(self.getAddrTxLedger(a160)) > 0:
+ highestIndex = max(highestIndex, addr.chainIndex)
+
+ if writeResultToWallet:
+ self.highestUsedChainIndex = highestIndex
+ self.walletFileSafeUpdate( [[WLT_UPDATE_MODIFY, self.offsetTopUsed, \
+ int_to_binary(highestIndex, widthBytes=8)]])
+
+
+ return highestIndex
+
+
+
+
+ #############################################################################
+ @TimeThisFunction
+ def freshImportFindHighestIndex(self, stepSize=None):
+ """
+ This is much like detectHighestUsedIndex, except this will extend the
+ address pool as necessary. It assumes that you have a fresh wallet
+ that has been used before, but was deleted and restored from its root
+ key and chaincode, and thus we don't know if only 10 or 10,000 addresses
+ were used.
+
+ If this was an exceptionally active wallet, it's possible that we
+ may need to manually increase the step size to be sure we find
+ everything. In fact, there is no way to tell FOR SURE what is the
+ last addressed used: one must make an assumption that the wallet
+ never calculated more than X addresses without receiving a payment...
+ """
+ if not stepSize:
+ stepSize = self.addrPoolSize
+
+ topCompute = 0
+ topUsed = 0
+ oldPoolSize = self.addrPoolSize
+ self.addrPoolSize = stepSize
+ # When we hit the highest address, the topCompute value will extend
+ # out [stepsize] addresses beyond topUsed, and the topUsed will not
+ # change, thus escaping the while loop
+ nWhile = 0
+ while topCompute - topUsed < 0.9*stepSize:
+ topCompute = self.fillAddressPool(stepSize, isActuallyNew=False)
+ topUsed = self.detectHighestUsedIndex(True)
+ nWhile += 1
+ if nWhile>10000:
+ raise WalletAddressError('Escaping inf loop in freshImport...')
+
+
+ self.addrPoolSize = oldPoolSize
+ return topUsed
+
+
+ #############################################################################
+ def writeFreshWalletFile(self, path, newName='', newDescr=''):
+ newFile = open(path, 'wb')
+ bp = BinaryPacker()
+ self.packHeader(bp)
+ newFile.write(bp.getBinaryString())
+
+ for addr160,addrObj in self.addrMap.iteritems():
+ if not addr160=='ROOT':
+ newFile.write('\x00' + addr160 + addrObj.serialize())
+
+ for hashVal,comment in self.commentsMap.iteritems():
+ twoByteLength = int_to_binary(len(comment), widthBytes=2)
+ if len(hashVal)==20:
+ typestr = int_to_binary(WLT_DATATYPE_ADDRCOMMENT)
+ newFile.write(typestr + hashVal + twoByteLength + comment)
+ elif len(hashVal)==32:
+ typestr = int_to_binary(WLT_DATATYPE_TXCOMMENT)
+ newFile.write(typestr + hashVal + twoByteLength + comment)
+
+ newFile.close()
+
+
+ #############################################################################
+ def makeUnencryptedWalletCopy(self, newPath, securePassphrase=None):
+
+ self.writeFreshWalletFile(newPath)
+ if not self.useEncryption:
+ return True
+
+ if self.isLocked:
+ if not securePassphrase:
+ LOGERROR('Attempted to make unencrypted copy without unlocking')
+ return False
+ else:
+ self.unlock(securePassphrase=SecureBinaryData(securePassphrase))
+
+ newWlt = PyBtcWallet().readWalletFile(newPath)
+ newWlt.unlock(self.kdfKey)
+ newWlt.changeWalletEncryption(None)
+
+
+ walletFileBackup = newWlt.getWalletPath('backup')
+ if os.path.exists(walletFileBackup):
+ LOGINFO('New wallet created, deleting backup file')
+ os.remove(walletFileBackup)
+ return True
+
+
+ #############################################################################
+ def makeEncryptedWalletCopy(self, newPath, securePassphrase=None):
+ """
+ Unlike the previous method, I can't just copy it if it's unencrypted,
+ because the target device probably shouldn't be exposed to the
+ unencrypted wallet. So for that case, we will encrypt the wallet
+ in place, copy, then remove the encryption.
+ """
+
+ if self.useEncryption:
+ # Encrypted->Encrypted: Easy!
+ self.writeFreshWalletFile(newPath)
+ return True
+
+ if not securePassphrase:
+ LOGERROR("Tried to make encrypted copy, but no passphrase supplied")
+ return False
+
+ # If we're starting unencrypted...encrypt it in place
+ (mem,nIter,salt) = self.computeSystemSpecificKdfParams(0.25)
+ self.changeKdfParams(mem, nIter, salt)
+ self.changeWalletEncryption(securePassphrase=securePassphrase)
+
+ # Write the encrypted wallet to the target directory
+ self.writeFreshWalletFile(newPath)
+
+ # Unencrypt the wallet now
+ self.unlock(securePassphrase=securePassphrase)
+ self.changeWalletEncryption(None)
+ return True
+
+
+
+
+
+ #############################################################################
+ def forkOnlineWallet(self, newWalletFile, shortLabel='', longLabel=''):
+ """
+ Make a copy of this wallet that contains no private key data
+ """
+ if not self.addrMap['ROOT'].hasPrivKey():
+ LOGWARN('This wallet is already void of any private key data!')
+ LOGWARN('Aborting wallet fork operation.')
+
+ onlineWallet = PyBtcWallet()
+ onlineWallet.fileTypeStr = self.fileTypeStr
+ onlineWallet.version = self.version
+ onlineWallet.magicBytes = self.magicBytes
+ onlineWallet.wltCreateDate = self.wltCreateDate
+ onlineWallet.useEncryption = False
+ onlineWallet.watchingOnly = True
+
+ if not shortLabel:
+ shortLabel = self.labelName
+ if not longLabel:
+ longLabel = self.labelDescr
+
+ onlineWallet.labelName = (shortLabel + ' (Watch)')[:32]
+ onlineWallet.labelDescr = (longLabel + ' (Watching-only copy)')[:256]
+
+ newAddrMap = {}
+ for addr160,addrObj in self.addrMap.iteritems():
+ onlineWallet.addrMap[addr160] = addrObj.copy()
+ onlineWallet.addrMap[addr160].binPrivKey32_Encr = SecureBinaryData()
+ onlineWallet.addrMap[addr160].binPrivKey32_Plain = SecureBinaryData()
+ onlineWallet.addrMap[addr160].binInitVector16 = SecureBinaryData()
+ onlineWallet.addrMap[addr160].useEncryption = False
+ onlineWallet.addrMap[addr160].createPrivKeyNextUnlock = False
+
+ onlineWallet.commentsMap = self.commentsMap
+ onlineWallet.opevalMap = self.opevalMap
+
+ onlineWallet.uniqueIDBin = self.uniqueIDBin
+ onlineWallet.highestUsedChainIndex = self.highestUsedChainIndex
+ onlineWallet.lastComputedChainAddr160 = self.lastComputedChainAddr160
+ onlineWallet.lastComputedChainIndex = self.lastComputedChainIndex
+
+ onlineWallet.writeFreshWalletFile(newWalletFile, shortLabel, longLabel)
+ return onlineWallet
+
+
+ #############################################################################
+ def supplyRootKeyForWatchingOnlyWallet(self, securePlainRootKey32, \
+ permanent=False):
+ """
+ If you have a watching only wallet, you might want to upgrade it to a
+ full wallet by supplying the 32-byte root private key. Generally, this
+ will be used to make a 'permanent' upgrade to your wallet, and the new
+ keys will be written to file ( NOTE: you should setup encryption just
+ after doing this, to make sure that the plaintext keys get wiped from
+ your wallet file).
+
+ On the other hand, if you don't want this to be a permanent upgrade,
+ this could potentially be used to maintain a watching only wallet on your
+ harddrive, and actually plug in your plaintext root key instead of an
+ encryption password whenever you want sign transactions.
+ """
+ pass
+
+
+ #############################################################################
+ def touchAddress(self, addr20):
+ """
+ Use this to update your wallet file to recognize the first/last times
+ seen for the address. This information will improve blockchain search
+ speed, if it knows not to search transactions that happened before they
+ were created.
+ """
+ pass
+
+ #############################################################################
+ def testKdfComputeTime(self):
+ """
+ Experimentally determines the compute time required by this computer
+ to execute with the current key-derivation parameters. This may be
+ useful for when you transfer a wallet to a new computer that has
+ different speed/memory characteristic.
+ """
+ testPassphrase = SecureBinaryData('This is a simple passphrase')
+ start = RightNow()
+ self.kdf.DeriveKey(testPassphrase)
+ self.testedComputeTime = (RightNow()-start)
+ return self.testedComputeTime
+
+ #############################################################################
+ def serializeKdfParams(self, kdfObj=None, binWidth=256):
+ """
+ Pack key-derivation function parameters into a binary stream.
+ As of wallet version 1.0, there is only one KDF technique used
+ in these wallets, and thus we only need to store the parameters
+ of this KDF. In the future, we may have multiple KDFs and have
+ to store the selection in this serialization.
+ """
+ if not kdfObj:
+ kdfObj = self.kdf
+
+ if not kdfObj:
+ return '\x00'*binWidth
+
+ binPacker = BinaryPacker()
+ binPacker.put(UINT64, kdfObj.getMemoryReqtBytes())
+ binPacker.put(UINT32, kdfObj.getNumIterations())
+ binPacker.put(BINARY_CHUNK, kdfObj.getSalt().toBinStr(), width=32)
+
+ kdfStr = binPacker.getBinaryString()
+ binPacker.put(BINARY_CHUNK, computeChecksum(kdfStr,4), width=4)
+ padSize = binWidth - binPacker.getSize()
+ binPacker.put(BINARY_CHUNK, '\x00'*padSize)
+
+ return binPacker.getBinaryString()
+
+
+
+ #############################################################################
+ def unserializeKdfParams(self, toUnpack, binWidth=256):
+
+ if isinstance(toUnpack, BinaryUnpacker):
+ binUnpacker = toUnpack
+ else:
+ binUnpacker = BinaryUnpacker(toUnpack)
+
+
+
+ allKdfData = binUnpacker.get(BINARY_CHUNK, 44)
+ kdfChksum = binUnpacker.get(BINARY_CHUNK, 4)
+ kdfBytes = len(allKdfData) + len(kdfChksum)
+ padding = binUnpacker.get(BINARY_CHUNK, binWidth-kdfBytes)
+
+ if allKdfData=='\x00'*44:
+ return None
+
+ fixedKdfData = verifyChecksum(allKdfData, kdfChksum)
+ if len(fixedKdfData)==0:
+ raise UnserializeError('Corrupted KDF params, could not fix')
+ elif not fixedKdfData==allKdfData:
+ self.walletFileSafeUpdate( \
+ [[WLT_UPDATE_MODIFY, self.offsetKdfParams, fixedKdfData]])
+ allKdfData = fixedKdfData
+ LOGWARN('KDF params in wallet were corrupted, but fixed')
+
+ kdfUnpacker = BinaryUnpacker(allKdfData)
+ mem = kdfUnpacker.get(UINT64)
+ nIter = kdfUnpacker.get(UINT32)
+ salt = kdfUnpacker.get(BINARY_CHUNK, 32)
+
+ kdf = KdfRomix(mem, nIter, SecureBinaryData(salt))
+ return kdf
+
+
+ #############################################################################
+ def serializeCryptoParams(self, binWidth=256):
+ """
+ As of wallet version 1.0, all wallets use the exact same encryption types,
+ so there is nothing to serialize or unserialize. The 256 bytes here may
+ be used in the future, though.
+ """
+ return '\x00'*binWidth
+
+ #############################################################################
+ def unserializeCryptoParams(self, toUnpack, binWidth=256):
+ """
+ As of wallet version 1.0, all wallets use the exact same encryption types,
+ so there is nothing to serialize or unserialize. The 256 bytes here may
+ be used in the future, though.
+ """
+ if isinstance(toUnpack, BinaryUnpacker):
+ binUnpacker = toUnpack
+ else:
+ binUnpacker = BinaryUnpacker(toUnpack)
+
+ binUnpacker.get(BINARY_CHUNK, binWidth)
+ return CryptoAES()
+
+ #############################################################################
+ def verifyPassphrase(self, securePassphrase):
+ """
+ Verify a user-submitted passphrase. This passphrase goes into
+ the key-derivation function to get actual encryption key, which
+ is what actually needs to be verified
+
+ Since all addresses should have the same encryption, we only need
+ to verify correctness on the root key
+ """
+ kdfOutput = self.kdf.DeriveKey(securePassphrase)
+ try:
+ isValid = self.addrMap['ROOT'].verifyEncryptionKey(kdfOutput)
+ return isValid
+ finally:
+ kdfOutput.destroy()
+
+
+ #############################################################################
+ def verifyEncryptionKey(self, secureKdfOutput):
+ """
+ Verify the underlying encryption key (from KDF).
+ Since all addresses should have the same encryption,
+ we only need to verify correctness on the root key.
+ """
+ return self.addrMap['ROOT'].verifyEncryptionKey(secureKdfOutput)
+
+
+ #############################################################################
+ def computeSystemSpecificKdfParams(self, targetSec=0.25, maxMem=32*1024*1024):
+ """
+ WARNING!!! DO NOT CHANGE KDF PARAMS AFTER ALREADY ENCRYPTED THE WALLET
+ By changing them on an already-encrypted wallet, we are going
+ to lose the original AES256-encryption keys -- which are
+ uniquely determined by (numIter, memReqt, salt, passphrase)
+
+ Only use this method before you have encrypted your wallet,
+ in order to determine good KDF parameters based on your
+ computer's specific speed/memory capabilities.
+ """
+ kdf = KdfRomix()
+ kdf.computeKdfParams(targetSec, long(maxMem))
+
+ mem = kdf.getMemoryReqtBytes()
+ nIter = kdf.getNumIterations()
+ salt = SecureBinaryData(kdf.getSalt().toBinStr())
+ return (mem, nIter, salt)
+
+ #############################################################################
+ def restoreKdfParams(self, mem, numIter, secureSalt):
+ """
+ This method should only be used when we are loading an encrypted wallet
+ from file. DO NOT USE THIS TO CHANGE KDF PARAMETERS. Doing so may
+ result in data loss!
+ """
+ self.kdf = KdfRomix(mem, numIter, secureSalt)
+
+
+ #############################################################################
+ def changeKdfParams(self, mem, numIter, salt, securePassphrase=None):
+ """
+ Changing KDF changes the wallet encryption key which means that a KDF
+ change is essentially the same as an encryption key change. As such,
+ the wallet must be unlocked if you intend to change an already-
+ encrypted wallet with KDF.
+
+ TODO: this comment doesn't belong here...where does it go? :
+ If the KDF is NOT yet setup, this method will do it. Supply the target
+ compute time, and maximum memory requirements, and the underlying C++
+ code will experimentally determine the "hardest" key-derivation params
+ that will run within the specified time and memory usage on the system
+ executing this method. You should set the max memory usage very low
+ (a few kB) for devices like smartphones, which have limited memory
+ availability. The KDF will then use less memory but more iterations
+ to achieve the same compute time.
+ """
+ if self.useEncryption:
+ if not securePassphrase:
+ LOGERROR('')
+ LOGERROR('You have requested changing the key-derivation')
+ LOGERROR('parameters on an already-encrypted wallet, which')
+ LOGERROR('requires modifying the encryption on this wallet.')
+ LOGERROR('Please unlock your wallet before attempting to')
+ LOGERROR('change the KDF parameters.')
+ raise WalletLockError('Cannot change KDF without unlocking wallet')
+ elif not self.verifyPassphrase(securePassphrase):
+ LOGERROR('Incorrect passphrase to unlock wallet')
+ raise PassphraseError('Incorrect passphrase to unlock wallet')
+
+ secureSalt = SecureBinaryData(salt)
+ newkdf = KdfRomix(mem, numIter, secureSalt)
+ bp = BinaryPacker()
+ bp.put(BINARY_CHUNK, self.serializeKdfParams(newkdf), width=256)
+ updList = [[WLT_UPDATE_MODIFY, self.offsetKdfParams, bp.getBinaryString()]]
+
+ if not self.useEncryption:
+ # We may be setting the kdf params before enabling encryption
+ self.walletFileSafeUpdate(updList)
+ else:
+ # Must change the encryption key: and we won't get here unless
+ # we have a passphrase to use. This call will take the
+ self.changeWalletEncryption(securePassphrase=securePassphrase, \
+ extraFileUpdates=updList, kdfObj=newkdf)
+
+ self.kdf = newkdf
+
+ #############################################################################
+ def changeWalletEncryption(self, secureKdfOutput=None, \
+ securePassphrase=None, \
+ extraFileUpdates=[],
+ kdfObj=None, Progress=emptyFunc):
+ """
+ Supply the passphrase you would like to use to encrypt this wallet
+ (or supply the KDF output directly, to skip the passphrase part).
+ This method will attempt to re-encrypt with the new passphrase.
+ This fails if the wallet is already locked with a different passphrase.
+ If encryption is already enabled, please unlock the wallet before
+ calling this method.
+
+ Make sure you set up the key-derivation function (KDF) before changing
+ from an unencrypted to an encrypted wallet. An error will be thrown
+ if you don't. You can use something like the following
+
+ # For a target of 0.05-0.1s compute time:
+ (mem,nIter,salt) = wlt.computeSystemSpecificKdfParams(0.1)
+ wlt.changeKdfParams(mem, nIter, salt)
+
+ Use the extraFileUpdates to pass in other changes that need to be
+ written to the wallet file in the same atomic operation as the
+ encryption key modifications.
+ """
+
+ if not kdfObj:
+ kdfObj = self.kdf
+
+ oldUsedEncryption = self.useEncryption
+ if securePassphrase or secureKdfOutput:
+ newUsesEncryption = True
+ else:
+ newUsesEncryption = False
+
+ oldKdfKey = None
+ if oldUsedEncryption:
+ if self.isLocked:
+ raise WalletLockError('Must unlock wallet to change passphrase')
+ else:
+ oldKdfKey = self.kdfKey.copy()
+
+
+ if newUsesEncryption and not self.kdf:
+ raise EncryptionError('KDF must be setup before encrypting wallet')
+
+ # Prep the file-update list with extras passed in as argument
+ walletUpdateInfo = list(extraFileUpdates)
+
+ # Derive the new KDF key if a passphrase was supplied
+ newKdfKey = secureKdfOutput
+ if securePassphrase:
+ newKdfKey = self.kdf.DeriveKey(securePassphrase)
+
+ if oldUsedEncryption and newUsesEncryption and self.verifyEncryptionKey(newKdfKey):
+ LOGWARN('Attempting to change encryption to same passphrase!')
+ return # Wallet is encrypted with the new passphrase already
+
+
+ # With unlocked key data, put the rest in a try/except/finally block
+ # To make sure we destroy the temporary kdf outputs
+ try:
+ # If keys were previously unencrypted, they will be not have
+ # initialization vectors and need to be generated before encrypting.
+ # This is why we have the enableKeyEncryption() call
+
+ if not oldUsedEncryption==newUsesEncryption:
+ # If there was an encryption change, we must change the flags
+ # in the wallet file in the same atomic operation as changing
+ # the stored keys. We can't let them get out of sync.
+ self.useEncryption = newUsesEncryption
+ walletUpdateInfo.append(self.createChangeFlagsEntry())
+ self.useEncryption = oldUsedEncryption
+ # Restore the old flag just in case the file write fails
+
+ newAddrMap = {}
+ i=1
+ nAddr = len(self.addrMap)
+
+ for addr160,addr in self.addrMap.iteritems():
+ Progress(i, nAddr)
+ i = i +1
+
+ newAddrMap[addr160] = addr.copy()
+ newAddrMap[addr160].enableKeyEncryption(generateIVIfNecessary=True)
+ newAddrMap[addr160].changeEncryptionKey(oldKdfKey, newKdfKey)
+ newAddrMap[addr160].walletByteLoc = addr.walletByteLoc
+ walletUpdateInfo.append( \
+ [WLT_UPDATE_MODIFY, addr.walletByteLoc, newAddrMap[addr160].serialize()])
+
+
+ # Try to update the wallet file with the new encrypted key data
+ updateSuccess = self.walletFileSafeUpdate( walletUpdateInfo )
+
+ if updateSuccess:
+ # Finally give the new data to the user
+ for addr160,addr in newAddrMap.iteritems():
+ self.addrMap[addr160] = addr.copy()
+
+ self.useEncryption = newUsesEncryption
+ if newKdfKey:
+ self.lock()
+ self.unlock(newKdfKey, Progress=Progress)
+
+ finally:
+ # Make sure we always destroy the temporary passphrase results
+ if newKdfKey: newKdfKey.destroy()
+ if oldKdfKey: oldKdfKey.destroy()
+
+ #############################################################################
+ def getWalletPath(self, nameSuffix=None):
+ fpath = self.walletPath
+
+ if self.walletPath=='':
+ fpath = os.path.join(ARMORY_HOME_DIR, 'armory_%s_.wallet' % self.uniqueIDB58)
+
+ if not nameSuffix==None:
+ pieces = os.path.splitext(fpath)
+ if not pieces[0].endswith('_'):
+ fpath = pieces[0] + '_' + nameSuffix + pieces[1]
+ else:
+ fpath = pieces[0] + nameSuffix + pieces[1]
+ return fpath
+
+
+
+ #############################################################################
+ def getCommentForAddress(self, addr160):
+ if self.commentsMap.has_key(addr160):
+ return self.commentsMap[addr160]
+ else:
+ return ''
+
+ #############################################################################
+ def getComment(self, hashVal):
+ """
+ This method is used for both address comments, as well as tx comments
+ In the first case, use the 20-byte binary pubkeyhash. Use 32-byte tx
+ hash for the tx-comment case.
+ """
+ if self.commentsMap.has_key(hashVal):
+ return self.commentsMap[hashVal]
+ else:
+ return ''
+
+ #############################################################################
+ def setComment(self, hashVal, newComment):
+ """
+ This method is used for both address comments, as well as tx comments
+ In the first case, use the 20-byte binary pubkeyhash. Use 32-byte tx
+ hash for the tx-comment case.
+ """
+ updEntry = []
+ isNewComment = False
+ if self.commentsMap.has_key(hashVal):
+ # If there is already a comment for this address, overwrite it
+ oldCommentLen = len(self.commentsMap[hashVal])
+ oldCommentLoc = self.commentLocs[hashVal]
+ # The first 23 bytes are the datatype, hashVal, and 2-byte comment size
+ offset = 1 + len(hashVal) + 2
+ updEntry.append([WLT_UPDATE_MODIFY, oldCommentLoc+offset, '\x00'*oldCommentLen])
+ else:
+ isNewComment = True
+
+
+ dtype = WLT_DATATYPE_ADDRCOMMENT
+ if len(hashVal)>20:
+ dtype = WLT_DATATYPE_TXCOMMENT
+
+ updEntry.append([WLT_UPDATE_ADD, dtype, hashVal, newComment])
+ newCommentLoc = self.walletFileSafeUpdate(updEntry)
+ self.commentsMap[hashVal] = newComment
+
+ # If there was a wallet overwrite, it's location is the first element
+ self.commentLocs[hashVal] = newCommentLoc[-1]
+
+
+
+ #############################################################################
+ def getAddrCommentIfAvail(self, txHash):
+ if not TheBDM.getBDMState()=='BlockchainReady':
+ return self.getComment(txHash)
+
+ # If we haven't extracted relevant addresses for this tx, yet -- do it
+ if not self.txAddrMap.has_key(txHash):
+ self.txAddrMap[txHash] = []
+ tx = TheBDM.getTxByHash(txHash)
+ if tx.isInitialized():
+ for i in range(tx.getNumTxOut()):
+ txout = tx.getTxOutCopy(i)
+ stype = getTxOutScriptType(txout.getScript())
+ scrAddr = tx.getScrAddrForTxOut(i)
+
+ if stype in CPP_TXOUT_HAS_ADDRSTR:
+ addrStr = scrAddr_to_addrStr(scrAddr)
+ addr160 = addrStr_to_hash160(addrStr)[1]
+ if self.hasAddr(addr160):
+ self.txAddrMap[txHash].append(addr160)
+ else:
+ LOGERROR("Unrecognized scraddr: " + binary_to_hex(scrAddr))
+
+
+
+ addrComments = []
+ for a160 in self.txAddrMap[txHash]:
+ if self.commentsMap.has_key(a160) and '[[' not in self.commentsMap[a160]:
+ addrComments.append(self.commentsMap[a160])
+
+ return '; '.join(addrComments)
+
+
+ #############################################################################
+ def getCommentForLE(self, le):
+ # Smart comments for LedgerEntry objects: get any direct comments ...
+ # if none, then grab the one for any associated addresses.
+ txHash = le.getTxHash()
+ if self.commentsMap.has_key(txHash):
+ comment = self.commentsMap[txHash]
+ else:
+ # [[ COMMENTS ]] are not meant to be displayed on main ledger
+ comment = self.getAddrCommentIfAvail(txHash)
+ if comment.startswith('[[') and comment.endswith(']]'):
+ comment = ''
+
+ return comment
+
+
+
+
+
+ #############################################################################
+ def setWalletLabels(self, lshort, llong=''):
+ self.labelName = lshort
+ self.labelDescr = llong
+ toWriteS = lshort.ljust( 32, '\x00')
+ toWriteL = llong.ljust(256, '\x00')
+
+ updList = []
+ updList.append([WLT_UPDATE_MODIFY, self.offsetLabelName, toWriteS])
+ updList.append([WLT_UPDATE_MODIFY, self.offsetLabelDescr, toWriteL])
+ self.walletFileSafeUpdate(updList)
+
+
+ #############################################################################
+ def packWalletFlags(self, binPacker):
+ nFlagBytes = 8
+ flags = [False]*nFlagBytes*8
+ flags[0] = self.useEncryption
+ flags[1] = self.watchingOnly
+ flagsBitset = ''.join([('1' if f else '0') for f in flags])
+ binPacker.put(UINT64, bitset_to_int(flagsBitset))
+
+ #############################################################################
+ def createChangeFlagsEntry(self):
+ """
+ Packs up the wallet flags and returns a update-entry that can be included
+ in a walletFileSafeUpdate call.
+ """
+ bp = BinaryPacker()
+ self.packWalletFlags(bp)
+ toWrite = bp.getBinaryString()
+ return [WLT_UPDATE_MODIFY, self.offsetWltFlags, toWrite]
+
+ #############################################################################
+ def unpackWalletFlags(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ flagData = toUnpack
+ else:
+ flagData = BinaryUnpacker( toUnpack )
+
+ wltflags = flagData.get(UINT64, 8)
+ wltflags = int_to_bitset(wltflags, widthBytes=8)
+ self.useEncryption = (wltflags[0]=='1')
+ self.watchingOnly = (wltflags[1]=='1')
+
+
+ #############################################################################
+ def packHeader(self, binPacker):
+ if not self.addrMap['ROOT']:
+ raise WalletAddressError('Cannot serialize uninitialzed wallet!')
+
+ startByte = binPacker.getSize()
+
+ binPacker.put(BINARY_CHUNK, self.fileTypeStr, width=8)
+ binPacker.put(UINT32, getVersionInt(self.version))
+ binPacker.put(BINARY_CHUNK, self.magicBytes, width=4)
+
+ # Wallet info flags
+ self.offsetWltFlags = binPacker.getSize() - startByte
+ self.packWalletFlags(binPacker)
+
+ # Binary Unique ID (firstAddr25bytes[:5][::-1])
+ binPacker.put(BINARY_CHUNK, self.uniqueIDBin, width=6)
+
+ # Unix time of wallet creations
+ binPacker.put(UINT64, self.wltCreateDate)
+
+ # User-supplied wallet label (short)
+ self.offsetLabelName = binPacker.getSize() - startByte
+ binPacker.put(BINARY_CHUNK, self.labelName , width=32)
+
+ # User-supplied wallet label (long)
+ self.offsetLabelDescr = binPacker.getSize() - startByte
+ binPacker.put(BINARY_CHUNK, self.labelDescr, width=256)
+
+ # Highest used address:
+ self.offsetTopUsed = binPacker.getSize() - startByte
+ binPacker.put(INT64, self.highestUsedChainIndex)
+
+ # Key-derivation function parameters
+ self.offsetKdfParams = binPacker.getSize() - startByte
+ binPacker.put(BINARY_CHUNK, self.serializeKdfParams(), width=256)
+
+ # Wallet encryption parameters (currently nothing to put here)
+ self.offsetCrypto = binPacker.getSize() - startByte
+ binPacker.put(BINARY_CHUNK, self.serializeCryptoParams(), width=256)
+
+ # Address-chain root, (base-address for deterministic wallets)
+ self.offsetRootAddr = binPacker.getSize() - startByte
+ self.addrMap['ROOT'].walletByteLoc = self.offsetRootAddr
+ binPacker.put(BINARY_CHUNK, self.addrMap['ROOT'].serialize())
+
+ # In wallet version 1.0, this next kB is unused -- may be used in future
+ binPacker.put(BINARY_CHUNK, '\x00'*1024)
+ return binPacker.getSize() - startByte
+
+
+
+
+ #############################################################################
+ def unpackHeader(self, binUnpacker):
+ """
+ Unpacking the header information from a wallet file. See the help text
+ on the base class, PyBtcWallet, for more information on the wallet
+ serialization.
+ """
+ self.fileTypeStr = binUnpacker.get(BINARY_CHUNK, 8)
+ self.version = readVersionInt(binUnpacker.get(UINT32))
+ self.magicBytes = binUnpacker.get(BINARY_CHUNK, 4)
+
+ # Decode the bits to get the flags
+ self.offsetWltFlags = binUnpacker.getPosition()
+ self.unpackWalletFlags(binUnpacker)
+
+ # This is the first 4 bytes of the 25-byte address-chain-root address
+ # This includes the network byte (i.e. main network, testnet, namecoin)
+ self.uniqueIDBin = binUnpacker.get(BINARY_CHUNK, 6)
+ self.uniqueIDB58 = binary_to_base58(self.uniqueIDBin)
+ self.wltCreateDate = binUnpacker.get(UINT64)
+
+ # We now have both the magic bytes and network byte
+ if not self.magicBytes == MAGIC_BYTES:
+ LOGERROR('Requested wallet is for a different blockchain!')
+ LOGERROR('Wallet is for: %s ', BLOCKCHAINS[self.magicBytes])
+ LOGERROR('ArmoryEngine: %s ', BLOCKCHAINS[MAGIC_BYTES])
+ return -1
+ if not self.uniqueIDBin[-1] == ADDRBYTE:
+ LOGERROR('Requested wallet is for a different network!')
+ LOGERROR('ArmoryEngine: %s ', NETWORKS[ADDRBYTE])
+ return -2
+
+ # User-supplied description/name for wallet
+ self.offsetLabelName = binUnpacker.getPosition()
+ self.labelName = binUnpacker.get(BINARY_CHUNK, 32).strip('\x00')
+
+
+ # Longer user-supplied description/name for wallet
+ self.offsetLabelDescr = binUnpacker.getPosition()
+ self.labelDescr = binUnpacker.get(BINARY_CHUNK, 256).strip('\x00')
+
+
+ self.offsetTopUsed = binUnpacker.getPosition()
+ self.highestUsedChainIndex = binUnpacker.get(INT64)
+
+
+ # Read the key-derivation function parameters
+ self.offsetKdfParams = binUnpacker.getPosition()
+ self.kdf = self.unserializeKdfParams(binUnpacker)
+
+ # Read the crypto parameters
+ self.offsetCrypto = binUnpacker.getPosition()
+ self.crypto = self.unserializeCryptoParams(binUnpacker)
+
+ # Read address-chain root address data
+ self.offsetRootAddr = binUnpacker.getPosition()
+
+
+ rawAddrData = binUnpacker.get(BINARY_CHUNK, self.pybtcaddrSize)
+ self.addrMap['ROOT'] = PyBtcAddress().unserialize(rawAddrData)
+ fixedAddrData = self.addrMap['ROOT'].serialize()
+ if not rawAddrData==fixedAddrData:
+ self.walletFileSafeUpdate([ \
+ [WLT_UPDATE_MODIFY, self.offsetRootAddr, fixedAddrData]])
+
+ self.addrMap['ROOT'].walletByteLoc = self.offsetRootAddr
+ if self.useEncryption:
+ self.addrMap['ROOT'].isLocked = True
+ self.isLocked = True
+
+ # In wallet version 1.0, this next kB is unused -- may be used in future
+ binUnpacker.advance(1024)
+
+ # TODO: automatic conversion if the code uses a newer wallet
+ # version than the wallet... got a manual script, but it
+ # would be nice to autodetect and correct
+ #convertVersion
+
+ return 0 #success
+
+ #############################################################################
+ def unpackNextEntry(self, binUnpacker):
+ dtype = binUnpacker.get(UINT8)
+ hashVal = ''
+ binData = ''
+ if dtype==WLT_DATATYPE_KEYDATA:
+ hashVal = binUnpacker.get(BINARY_CHUNK, 20)
+ binData = binUnpacker.get(BINARY_CHUNK, self.pybtcaddrSize)
+ elif dtype==WLT_DATATYPE_ADDRCOMMENT:
+ hashVal = binUnpacker.get(BINARY_CHUNK, 20)
+ commentLen = binUnpacker.get(UINT16)
+ binData = binUnpacker.get(BINARY_CHUNK, commentLen)
+ elif dtype==WLT_DATATYPE_TXCOMMENT:
+ hashVal = binUnpacker.get(BINARY_CHUNK, 32)
+ commentLen = binUnpacker.get(UINT16)
+ binData = binUnpacker.get(BINARY_CHUNK, commentLen)
+ elif dtype==WLT_DATATYPE_OPEVAL:
+ raise NotImplementedError('OP_EVAL not support in wallet yet')
+ elif dtype==WLT_DATATYPE_DELETED:
+ deletedLen = binUnpacker.get(UINT16)
+ binUnpacker.advance(deletedLen)
+
+
+ return (dtype, hashVal, binData)
+
+ #############################################################################
+ @TimeThisFunction
+ def readWalletFile(self, wltpath, verifyIntegrity=True, doScanNow=False):
+ if not os.path.exists(wltpath):
+ raise FileExistsError("No wallet file:"+wltpath)
+
+ self.__init__()
+ self.walletPath = wltpath
+
+ if verifyIntegrity:
+ try:
+ nError = self.doWalletFileConsistencyCheck()
+ except KeyDataError, errmsg:
+ LOGEXCEPT('***ERROR: Wallet file had unfixable errors.')
+ raise KeyDataError(errmsg)
+
+
+ wltfile = open(wltpath, 'rb')
+ wltdata = BinaryUnpacker(wltfile.read())
+ wltfile.close()
+
+ self.cppWallet = Cpp.BtcWallet()
+ self.unpackHeader(wltdata)
+
+ self.lastComputedChainIndex = -UINT32_MAX
+ self.lastComputedChainAddr160 = None
+ while wltdata.getRemainingSize()>0:
+ byteLocation = wltdata.getPosition()
+ dtype, hashVal, rawData = self.unpackNextEntry(wltdata)
+ if dtype==WLT_DATATYPE_KEYDATA:
+ newAddr = PyBtcAddress()
+ newAddr.unserialize(rawData)
+ newAddr.walletByteLoc = byteLocation + 21
+ # Fix byte errors in the address data
+ fixedAddrData = newAddr.serialize()
+
+ if not rawData==fixedAddrData:
+ self.walletFileSafeUpdate([ \
+ [WLT_UPDATE_MODIFY, newAddr.walletByteLoc, fixedAddrData]])
+ if newAddr.useEncryption:
+ newAddr.isLocked = True
+ self.addrMap[hashVal] = newAddr
+ if newAddr.chainIndex > self.lastComputedChainIndex:
+ self.lastComputedChainIndex = newAddr.chainIndex
+ self.lastComputedChainAddr160 = newAddr.getAddr160()
+
+ if newAddr.chainIndex < -2:
+ newAddr.chainIndex = -2
+ self.hasNegativeImports = True
+
+ self.linearAddr160List.append(newAddr.getAddr160())
+ self.chainIndexMap[newAddr.chainIndex] = newAddr.getAddr160()
+
+ # Update the parallel C++ object that scans the blockchain for us
+ timeRng = newAddr.getTimeRange()
+ blkRng = newAddr.getBlockRange()
+ self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(hashVal), \
+ timeRng[0], blkRng[0], \
+ timeRng[1], blkRng[1])
+
+ if dtype in (WLT_DATATYPE_ADDRCOMMENT, WLT_DATATYPE_TXCOMMENT):
+ self.commentsMap[hashVal] = rawData # actually ASCII data, here
+ self.commentLocs[hashVal] = byteLocation
+ if dtype==WLT_DATATYPE_OPEVAL:
+ raise NotImplementedError('OP_EVAL not support in wallet yet')
+ if dtype==WLT_DATATYPE_DELETED:
+ pass
+
+
+ if (not doScanNow or \
+ not TheBDM.getBDMState()=='BlockchainReady' or \
+ self.doBlockchainSync==BLOCKCHAIN_DONOTUSE):
+ pass
+ else:
+ self.syncWithBlockchain()
+
+
+ ### Update the wallet version if necessary ###
+ if getVersionInt(self.version) < getVersionInt(PYBTCWALLET_VERSION):
+ LOGERROR('Wallets older than version 1.35 no longer supported!')
+ return
+
+ return self
+
+
+
+ #############################################################################
+ def walletFileSafeUpdate(self, updateList):
+
+ """
+ The input "toAddDataList" should be a list of triplets, such as:
+ [
+ [WLT_DATA_ADD, WLT_DATATYPE_KEYDATA, addr160_1, PyBtcAddrObj1]
+ [WLT_DATA_ADD, WLT_DATATYPE_KEYDATA, addr160_2, PyBtcAddrObj2]
+ [WLT_DATA_MODIFY, modifyStartByte1, binDataForOverwrite1 ]
+ [WLT_DATA_ADD, WLT_DATATYPE_ADDRCOMMENT, addr160_3, 'Long-term savings']
+ [WLT_DATA_MODIFY, modifyStartByte2, binDataForOverwrite2 ]
+ ]
+
+ The return value is the list of new file byte offsets (from beginning of
+ the file), that specify the start of each modification made to the
+ wallet file. For MODIFY fields, this just returns the modifyStartByte
+ field that was provided as input. For adding data, it specifies the
+ starting byte of the new field (the DATATYPE byte). We keep this data
+ in PyBtcAddress objects so that we know where to apply modifications in
+ case we need to change something, like converting from unencrypted to
+ encrypted private keys.
+
+ If this method fails, we simply return an empty list. We can check for
+ an empty list to know if the file update succeeded.
+
+ WHY IS THIS SO COMPLICATED? -- Because it's atomic!
+
+ When we want to add data to the wallet file, we will do so in a completely
+ recoverable way. We define this method to make sure a backup exists when
+ we start modifying the file, and keep a flag to identify when the wallet
+ might be corrupt. If we ever try to load the wallet file and see another
+ file with the _update_unsuccessful suffix, we should instead just restore
+ from backup.
+
+ Similarly, we have to update the backup file after updating the main file
+ so we will use a similar technique with the backup_unsuccessful suffix.
+ We don't want to rely on a backup if somehow *the backup* got corrupted
+ and the original file is fine. THEREFORE -- this is implemented in such
+ a way that the user should know two things:
+
+ (1) No matter when the power goes out, we ALWAYS have a uncorrupted
+ wallet file, and know which one it is. Either the backup is safe,
+ or the original is safe. Based on the flag files, we know which
+ one is guaranteed to be not corrupted.
+ (2) ALWAYS DO YOUR FILE OPERATIONS BEFORE SETTING DATA IN MEMORY
+ You must write it to disk FIRST using this SafeUpdate method,
+ THEN give the new data to the user -- never give it to them
+ until you are sure that it was written safely to disk.
+
+ Number (2) is easy to screw up because you plan to write the file just
+ AFTER the data is created and stored in local memory. But an error
+ might be thrown halfway which is handled higher up, and instead the data
+ never made it to file. Then there is a risk that the user uses their
+ new address that never made it into the wallet file.
+ """
+
+ if not os.path.exists(self.walletPath):
+ raise FileExistsError('No wallet file exists to be updated!')
+
+ if len(updateList)==0:
+ return []
+
+ # Make sure that the primary and backup files are synced before update
+ self.doWalletFileConsistencyCheck()
+
+ walletFileBackup = self.getWalletPath('backup')
+ mainUpdateFlag = self.getWalletPath('update_unsuccessful')
+ backupUpdateFlag = self.getWalletPath('backup_unsuccessful')
+
+
+ # Will be passing back info about all data successfully added
+ oldWalletSize = os.path.getsize(self.walletPath)
+ updateLocations = []
+ dataToChange = []
+ toAppend = BinaryPacker()
+
+ try:
+ for entry in updateList:
+ modType = entry[0]
+ updateInfo = entry[1:]
+
+ if(modType==WLT_UPDATE_ADD):
+ dtype = updateInfo[0]
+ updateLocations.append(toAppend.getSize()+oldWalletSize)
+ if dtype==WLT_DATATYPE_KEYDATA:
+ if len(updateInfo[1])!=20 or not isinstance(updateInfo[2], PyBtcAddress):
+ raise Exception('Data type does not match update type')
+ toAppend.put(UINT8, WLT_DATATYPE_KEYDATA)
+ toAppend.put(BINARY_CHUNK, updateInfo[1])
+ toAppend.put(BINARY_CHUNK, updateInfo[2].serialize())
+
+ elif dtype in (WLT_DATATYPE_ADDRCOMMENT, WLT_DATATYPE_TXCOMMENT):
+ if not isinstance(updateInfo[2], str):
+ raise Exception('Data type does not match update type')
+ toAppend.put(UINT8, dtype)
+ toAppend.put(BINARY_CHUNK, updateInfo[1])
+ toAppend.put(UINT16, len(updateInfo[2]))
+ toAppend.put(BINARY_CHUNK, updateInfo[2])
+
+ elif dtype==WLT_DATATYPE_OPEVAL:
+ raise Exception('OP_EVAL not support in wallet yet')
+
+ elif(modType==WLT_UPDATE_MODIFY):
+ updateLocations.append(updateInfo[0])
+ dataToChange.append( updateInfo )
+ else:
+ LOGERROR('Unknown wallet-update type!')
+ raise Exception('Unknown wallet-update type!')
+ except Exception:
+ LOGEXCEPT('Bad input to walletFileSafeUpdate')
+ return []
+
+ binaryToAppend = toAppend.getBinaryString()
+
+ # We need to safely modify both the main wallet file and backup
+ # Start with main wallet
+ touchFile(mainUpdateFlag)
+
+ try:
+ wltfile = open(self.walletPath, 'ab')
+ wltfile.write(binaryToAppend)
+ wltfile.close()
+
+ # This is for unit-testing the atomic-wallet-file-update robustness
+ if self.interruptTest1: raise InterruptTestError
+
+ wltfile = open(self.walletPath, 'r+b')
+ for loc,replStr in dataToChange:
+ wltfile.seek(loc)
+ wltfile.write(replStr)
+ wltfile.close()
+
+ except IOError:
+ LOGEXCEPT('Could not write data to wallet. Permissions?')
+ shutil.copy(walletFileBackup, self.walletPath)
+ os.remove(mainUpdateFlag)
+ return []
+
+ # Write backup flag before removing main-update flag. If we see
+ # both flags, we know file IO was interrupted RIGHT HERE
+ touchFile(backupUpdateFlag)
+
+ # This is for unit-testing the atomic-wallet-file-update robustness
+ if self.interruptTest2: raise InterruptTestError
+
+ os.remove(mainUpdateFlag)
+
+ # Modify backup
+ try:
+ # This is for unit-testing the atomic-wallet-file-update robustness
+ if self.interruptTest3: raise InterruptTestError
+
+ backupfile = open(walletFileBackup, 'ab')
+ backupfile.write(binaryToAppend)
+ backupfile.close()
+
+ backupfile = open(walletFileBackup, 'r+b')
+ for loc,replStr in dataToChange:
+ backupfile.seek(loc)
+ backupfile.write(replStr)
+ backupfile.close()
+
+ except IOError:
+ LOGEXCEPT('Could not write backup wallet. Permissions?')
+ shutil.copy(self.walletPath, walletFileBackup)
+ os.remove(mainUpdateFlag)
+ return []
+
+ os.remove(backupUpdateFlag)
+
+ return updateLocations
+
+
+
+ #############################################################################
+ def doWalletFileConsistencyCheck(self, onlySyncBackup=True):
+ """
+ First we check the file-update flags (files we touched/removed during
+ file modification operations), and then restore the primary wallet file
+ and backup file to the exact same state -- we know that at least one of
+ them is guaranteed to not be corrupt, and we know based on the flags
+ which one that is -- so we execute the appropriate copy operation.
+
+ ***NOTE: For now, the remaining steps are untested and unused!
+
+ After we have guaranteed that main wallet and backup wallet are the
+ same, we want to do a check that the data is consistent. We do this
+ by simply reading in the key-data from the wallet, unserializing it
+ and reserializing it to see if it matches -- this works due to the
+ way the PyBtcAddress::unserialize() method works: it verifies the
+ checksums in the address data, and corrects errors automatically!
+ And it's part of the unit-tests that serialize/unserialize round-trip
+ is guaranteed to match for all address types if there's no byte errors.
+
+ If an error is detected, we do a safe-file-modify operation to re-write
+ the corrected information to the wallet file, in-place. We DO NOT
+ check comment fields, since they do not have checksums, and are not
+ critical to protect against byte errors.
+ """
+
+
+
+ if not os.path.exists(self.walletPath):
+ raise FileExistsError('No wallet file exists to be checked!')
+
+ walletFileBackup = self.getWalletPath('backup')
+ mainUpdateFlag = self.getWalletPath('update_unsuccessful')
+ backupUpdateFlag = self.getWalletPath('backup_unsuccessful')
+
+ if not os.path.exists(walletFileBackup):
+ # We haven't even created a backup file, yet
+ LOGDEBUG('Creating backup file %s', walletFileBackup)
+ touchFile(backupUpdateFlag)
+ shutil.copy(self.walletPath, walletFileBackup)
+ os.remove(backupUpdateFlag)
+
+ if os.path.exists(backupUpdateFlag) and os.path.exists(mainUpdateFlag):
+ # Here we actually have a good main file, but backup never succeeded
+ LOGWARN('***WARNING: error in backup file... how did that happen?')
+ shutil.copy(self.walletPath, walletFileBackup)
+ os.remove(mainUpdateFlag)
+ os.remove(backupUpdateFlag)
+ elif os.path.exists(mainUpdateFlag):
+ LOGWARN('***WARNING: last file operation failed! Restoring wallet from backup')
+ # main wallet file might be corrupt, copy from backup
+ shutil.copy(walletFileBackup, self.walletPath)
+ os.remove(mainUpdateFlag)
+ elif os.path.exists(backupUpdateFlag):
+ LOGWARN('***WARNING: creation of backup was interrupted -- fixing')
+ shutil.copy(self.walletPath, walletFileBackup)
+ os.remove(backupUpdateFlag)
+
+ if onlySyncBackup:
+ return 0
+
+
+
+
+
+
+ #############################################################################
+ #def getAddrByIndex(self, i):
+ #return self.addrMap.values()[i]
+
+ #############################################################################
+ def deleteImportedAddress(self, addr160):
+ """
+ We want to overwrite a particular key in the wallet. Before overwriting
+ the data looks like this:
+ [ \x00 | <20-byte addr160> | <237-byte keydata> ]
+ And we want it to look like:
+ [ \x04 | <2-byte length> | \x00\x00\x00... ]
+ So we need to construct a wallet-update vector to modify the data
+ starting at the first byte, replace it with 0x04, specifies how many
+ bytes are in the deleted entry, and then actually overwrite those
+ bytes with 0s
+ """
+
+ if not self.addrMap[addr160].chainIndex==-2:
+ raise WalletAddressError('You can only delete imported addresses!')
+
+ overwriteLoc = self.addrMap[addr160].walletByteLoc - 21
+ overwriteLen = 20 + self.pybtcaddrSize - 2
+
+ overwriteBin = ''
+ overwriteBin += int_to_binary(WLT_DATATYPE_DELETED, widthBytes=1)
+ overwriteBin += int_to_binary(overwriteLen, widthBytes=2)
+ overwriteBin += '\x00'*overwriteLen
+
+ self.walletFileSafeUpdate([[WLT_UPDATE_MODIFY, overwriteLoc, overwriteBin]])
+
+ # IMPORTANT: we need to update the wallet structures to reflect the
+ # new state of the wallet. This will actually be easiest
+ # if we just "forget" the current wallet state and re-read
+ # the wallet from file
+ wltPath = self.walletPath
+ self.readWalletFile(wltPath, doScanNow=True)
+
+
+ #############################################################################
+ def importExternalAddressData(self, privKey=None, privChk=None, \
+ pubKey=None, pubChk=None, \
+ addr20=None, addrChk=None, \
+ firstTime=UINT32_MAX, firstBlk=UINT32_MAX, \
+ lastTime=0, lastBlk=0):
+ """
+ This wallet fully supports importing external keys, even though it is
+ a deterministic wallet: determinism only adds keys to the pool based
+ on the address-chain, but there's nothing wrong with adding new keys
+ not on the chain.
+
+ We don't know when this address was created, so we have to set its
+ first/last-seen times to 0, to make sure we search the whole blockchain
+ for tx related to it. This data will be updated later after we've done
+ the search and know for sure when it is "relevant".
+ (alternatively, if you know it's first-seen time for some reason, you
+ can supply it as an input, but this seems rare: we don't want to get it
+ wrong or we could end up missing wallet-relevant transactions)
+
+ DO NOT CALL FROM A BDM THREAD FUNCTION. IT MAY DEADLOCK.
+ """
+
+ if self.calledFromBDM:
+ LOGERROR('Called importExternalAddressData() from BDM method!')
+ LOGERROR('Don\'t do this!')
+ return ''
+
+ if not privKey and not self.watchingOnly:
+ LOGERROR('')
+ LOGERROR('This wallet is strictly for addresses that you')
+ LOGERROR('own. You cannot import addresses without the')
+ LOGERROR('the associated private key. Instead, use a')
+ LOGERROR('watching-only wallet to import this address.')
+ LOGERROR('(actually, this is currently, completely disabled)')
+ raise WalletAddressError('Cannot import non-private-key addresses')
+
+
+
+ # First do all the necessary type conversions and error corrections
+ computedPubKey = None
+ computedAddr20 = None
+ if privKey:
+ if isinstance(privKey, str):
+ privKey = SecureBinaryData(privKey)
+
+ if privChk:
+ privKey = SecureBinaryData(verifyChecksum(privKey.toBinStr(), privChk))
+
+ computedPubkey = CryptoECDSA().ComputePublicKey(privKey)
+ computedAddr20 = convertKeyDataToAddress(pubKey=computedPubkey)
+
+ # If public key is provided, we prep it so we can verify Pub/Priv match
+ if pubKey:
+ if isinstance(pubKey, str):
+ pubKey = SecureBinaryData(pubKey)
+ if pubChk:
+ pubKey = SecureBinaryData(verifyChecksum(pubKey.toBinStr(), pubChk))
+
+ if not computedAddr20:
+ computedAddr20 = convertKeyDataToAddress(pubKey=pubKey)
+
+ # The 20-byte address (pubkey hash160) should always be a python string
+ if addr20:
+ if not isinstance(pubKey, str):
+ addr20 = addr20.toBinStr()
+ if addrChk:
+ addr20 = verifyChecksum(addr20, addrChk)
+
+
+ # Now a few sanity checks
+ if self.addrMap.has_key(addr20):
+ LOGWARN('This address is already in your wallet!')
+ return
+
+ #if pubKey and not computedPubkey==pubKey:
+ #raise ECDSA_Error('Private and public keys to be imported do not match!')
+ #if addr20 and not computedAddr20==addr20:
+ #raise ECDSA_Error('Supplied address hash does not match key data!')
+
+ addr20 = computedAddr20
+
+ if self.addrMap.has_key(addr20):
+ return None
+
+ # If a private key is supplied and this wallet is encrypted&locked, then
+ # we have no way to secure the private key without unlocking the wallet.
+ if self.useEncryption and privKey and not self.kdfKey:
+ raise WalletLockError('Cannot import private key when wallet is locked!')
+
+
+ if privKey:
+ # For priv key, lots of extra encryption and verification options
+ newAddr = PyBtcAddress().createFromPlainKeyData( addr160=addr20, \
+ plainPrivKey=privKey, publicKey65=computedPubkey, \
+ willBeEncr=self.useEncryption, \
+ generateIVIfNecessary=self.useEncryption, \
+ skipCheck=True, skipPubCompute=True)
+ if self.useEncryption:
+ newAddr.lock(self.kdfKey)
+ newAddr.unlock(self.kdfKey)
+ elif pubKey:
+ securePubKey = SecureBinaryData(pubKey)
+ newAddr = PyBtcAddress().createFromPublicKeyData(securePubKey)
+ else:
+ newAddr = PyBtcAddress().createFromPublicKeyHash160(addr20)
+
+
+ newAddr.chaincode = SecureBinaryData('\xff'*32)
+ newAddr.chainIndex = -2
+ newAddr.timeRange = [firstTime, lastTime]
+ newAddr.blkRange = [firstBlk, lastBlk ]
+ #newAddr.binInitVect16 = SecureBinaryData().GenerateRandom(16)
+ newAddr160 = newAddr.getAddr160()
+
+ newDataLoc = self.walletFileSafeUpdate( \
+ [[WLT_UPDATE_ADD, WLT_DATATYPE_KEYDATA, newAddr160, newAddr]])
+ self.addrMap[newAddr160] = newAddr.copy()
+ self.addrMap[newAddr160].walletByteLoc = newDataLoc[0] + 21
+ self.linearAddr160List.append(newAddr160)
+ if self.useEncryption and self.kdfKey:
+ self.addrMap[newAddr160].lock(self.kdfKey)
+ if not self.isLocked:
+ self.addrMap[newAddr160].unlock(self.kdfKey)
+
+ self.cppWallet.addScrAddress_5_(Hash160ToScrAddr(newAddr160), \
+ firstTime, firstBlk, lastTime, lastBlk)
+
+ # The following line MAY deadlock if this method is called from the BDM
+ # thread. Do not write any BDM methods that calls this method!
+ TheBDM.registerImportedScrAddr(Hash160ToScrAddr(newAddr160),
+ firstTime, firstBlk, lastTime, lastBlk)
+
+
+ return newAddr160
+
+
+ #############################################################################
+ def bulkImportAddresses(self, textBlock, privKeyEndian=BIGENDIAN, \
+ sepList=":;'[]()=-_*&^%$#@!,./?\n"):
+ """
+ Attempts to import plaintext key data stored in a file. This method
+ expects all data to be in hex or Base58:
+
+ 20 bytes / 40 hex chars -- public key hashes
+ 25 bytes / 50 hex chars -- full binary addresses
+ 65 bytes / 130 hex chars -- public key
+ 32 bytes / 64 hex chars -- private key
+
+ 33 or 34 Base58 chars -- address strings
+ 50 to 52 Base58 chars -- base58-encoded private key
+
+ Since this is python, I don't have to require any particular format:
+ I can pretty easily break apart the entire file into individual strings,
+ search for addresses and public keys, then, search for private keys that
+ correspond to that data. Obviously, simpler is better, but as long as
+ the data is encoded as in the above list and separated by whitespace or
+ punctuation, this method should succeed.
+
+ We must throw an error if this is NOT a watching-only address and we
+ find an address without a private key. We will need to create a
+ separate watching-only wallet in order to import these keys.
+
+ TODO: will finish this later
+ """
+
+ """
+ STUB: (AGAIN) I just can't make this work out to be as stupid-proof
+ as I originally planned. I'll have to put it on hold.
+ self.__init__()
+
+ newfile = open(filename,'rb')
+ newdata = newfile.read()
+ newfile.close()
+
+ # Change all punctuation to the same char so split() works easier
+ for ch in sepList:
+ newdata.replace(ch, ' ')
+
+ newdata = newdata.split()
+ hexChars = '01234567890abcdef'
+ b58Chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
+ DATATYPES = enum( 'UNKNOWN', \
+ 'Addr_Hex_20', \
+ 'Addr_B58_25', \
+ 'PubX_Hex_32', \
+ 'PubY_Hex_32', \
+ 'PubK_Hex_65', \
+ 'Priv_Hex_32', \
+ 'Priv_Hex_36', \
+ 'Priv_Hex_37', \
+ 'Priv_B58_32', \
+ 'Priv_B58_37', \
+ 'Priv_MiniPriv', \
+ 'PubK_Hex_33_Compressed', \
+ 'Priv_Hex_33_Compressed')
+
+ DTYPES = enum('Unknown', 'Hash160', 'PubKey', 'PrivKey', 'Byte32', 'Byte33')
+
+
+ lastAddr = None
+ lastPubK = None
+ lastPriv = None
+ for theStr in newdata:
+ if len(theStr)<20:
+ continue
+
+ hexCount = sum([1 if c in hexChars else 0 for c in theStr])
+ b58Count = sum([1 if c in b58Chars else 0 for c in theStr])
+ canBeHex = hexCount==len(theStr)
+ canBeB58 = b58Count==len(theStr)
+ isHex = canBeHex
+ isB58 = canBeB58 and not canBeHex
+ isStr = not isHex and not isB58
+
+ dataAndType = [DTYPES.Unknown, '']
+ if isHex:
+ binData = hex_to_binary(theStr)
+ sz = len(binData)
+
+ if sz==20:
+ dataAndType = [DTYPES.Hash160, binData]
+ elif sz==25:
+ dataAndType = [DTYPES.Hash160, binData[1:21]]
+ elif sz==32:
+ dataAndType = [DTYPES., binData[1:21]]
+ elif isB58:
+ binData = base58_to_binary(theStr)
+ sz = len(binData)
+
+
+ if isHex and sz==40:
+ elif isHex and sz==50:
+ dataAndType = [DTYPES.Hash160, hex_to_binary(theStr)[1:21]]
+ elif isB58 and sz>=31 and sz<=35:
+ dataAndType = [DTYPES.Hash160, addrStr_to_hash160(theStr)]
+ elif isHex is sz==130:
+ dataAndType = [DTYPES.PubKey, hex_to_binary(theStr)]
+ elif isHex is sz==128:
+ dataAndType = [DTYPES.PubKey, '\x04'+hex_to_binary(theStr)]
+ elif isHex is sz==128:
+
+
+
+ potentialKey = SecureBinaryData('\x04' + piece)
+ isValid = CryptoECDSA().VerifyPublicKeyValid(potentialKey)
+ """
+ pass
+
+
+
+
+
+ #############################################################################
+ def checkIfRescanRequired(self):
+ """
+ Returns true is we have to go back to disk/mmap and rescan more than two
+ weeks worth of blocks
+
+ DO NOT CALL FROM A BDM METHOD. Instead, call directly:
+ self.bdm.numBlocksToRescan(pywlt.cppWallet) > 2016
+ """
+ if self.calledFromBDM:
+ LOGERROR('Called checkIfRescanRequired() from BDM method!')
+ LOGERROR('Don\'t do this!')
+
+ if TheBDM.getBDMState()=='BlockchainReady':
+ return (TheBDM.numBlocksToRescan(self.cppWallet) > 2016)
+ else:
+ return False
+
+
+
+ #############################################################################
+ def signTxDistProposal(self, txdp, hashcode=1):
+ if not hashcode==1:
+ LOGERROR('hashcode!=1 is not supported at this time!')
+ return
+
+ # If the wallet is locked, we better bail now
+ if self.isLocked is True and self.kdfKey is None:
+ raise WalletLockError('Cannot sign tx without unlocking wallet')
+
+ numInputs = len(txdp.pytxObj.inputs)
+ wltAddr = []
+ for index,txin in enumerate(txdp.pytxObj.inputs):
+ scrType = txdp.scriptTypes[index]
+ if scrType in CPP_TXOUT_STDSINGLESIG:
+ scrAddr = txdp.inScrAddrList[index]
+ addr160 = scrAddr[1:]
+ if self.hasAddr(addr160) and self.addrMap[addr160].hasPrivKey():
+ wltAddr.append( (self.addrMap[addr160], index, 0))
+ elif scrType==CPP_TXOUT_MULTISIG:
+ # Basically the same check but multiple addresses to consider
+ # STUB -- this branch has never been tested
+ addrList = getMultisigScriptInfo(txdp.txOutScripts[index])[2]
+ for addrIdx, addr in enumerate(addrList):
+ if self.hasAddr(addr) and self.addrMap[addr].hasPrivKey():
+ wltAddr.append( (self.addrMap[addr], index, addrIdx) )
+ break
+
+ # WltAddr now contains a list of every input we can sign for, and the
+ # PyBtcAddress object that can be used to sign it. Let's do it.
+ numMyAddr = len(wltAddr)
+ LOGDEBUG('Total number of inputs in transaction: %d', numInputs)
+ LOGDEBUG('Number of inputs that you can sign for: %d', numMyAddr)
+
+
+ # Unlock the wallet if necessary, sign inputs
+ maxChainIndex = -1
+ for addrObj,idx, sigIdx in wltAddr:
+ maxChainIndex = max(maxChainIndex, addrObj.chainIndex)
+ if addrObj.isLocked:
+ if self.kdfKey:
+ if addrObj.createPrivKeyNextUnlock:
+ self.unlock(self.kdfKey)
+ else:
+ addrObj.unlock(self.kdfKey)
+ else:
+ self.lock()
+ raise WalletLockError('Cannot sign tx without unlocking wallet')
+
+ if not addrObj.hasPubKey():
+ # Make sure the public key is available for this address
+ addrObj.binPublicKey65 = \
+ CryptoECDSA().ComputePublicKey(addrObj.binPrivKey32_Plain)
+
+ # Copy the script, blank out out all other scripts (assume hashcode==1)
+ txCopy = PyTx().unserialize(txdp.pytxObj.serialize())
+ for i in range(len(txCopy.inputs)):
+ if not i==idx:
+ txCopy.inputs[i].binScript = ''
+ else:
+ txCopy.inputs[i].binScript = txdp.txOutScripts[i]
+
+ hashCode1 = int_to_binary(hashcode, widthBytes=1)
+ hashCode4 = int_to_binary(hashcode, widthBytes=4)
+ preHashMsg = txCopy.serialize() + hashCode4
+ signature = addrObj.generateDERSignature(preHashMsg) + hashCode1
+
+ # Now we attach a binary signature or full script, depending on the type
+ p2shScript = txdp.p2shScripts[idx]
+ p2shAppend = ''
+ if len(p2shScript) > 0:
+ LOGWARN('Signing for P2SH input')
+ p2shAppend = serializeBytesWithPushData(p2shScript)
+
+ scrType = txdp.scriptTypes[idx]
+ if scrType in [CPP_TXOUT_STDPUBKEY33, CPP_TXOUT_STDPUBKEY65]:
+ # Only need the signature to complete coinbase TxOut
+ serSignature = serializeBytesWithPushData(signature)
+ txdp.signatures[idx][0] = serSignature + p2shAppend
+ elif scrType==CPP_TXOUT_STDHASH160:
+ # Gotta include the public key, too, for standard TxOuts
+ pubkey = addrObj.binPublicKey65.toBinStr()
+ serSig = serializeBytesWithPushData(signature)
+ serPubKey = serializeBytesWithPushData(pubkey)
+ txdp.signatures[idx][0] = serSig + serPubKey + p2shAppend
+ elif txdp.scriptTypes[idx]==TXOUT_SCRIPT_MULTISIG:
+ # We attach just the sig for multi-sig transactions
+ serSignature = serializeBytesWithPushData(signature)
+ txdp.signatures[idx][sigIdx] = serSig
+ else:
+ LOGERROR('Unknown txOut script type')
+
+ self.lock()
+
+ prevHighestIndex = self.highestUsedChainIndex
+ if prevHighestIndex 0 and addrObjPrev.chainIndex > -1:
+ addrObj.createPrivKeyNextUnlock_IVandKey[0] = \
+ addrObjPrev.binInitVect16.copy()
+ addrObj.createPrivKeyNextUnlock_IVandKey[1] = \
+ addrObjPrev.binPrivKey32_Encr.copy()
+
+ addrObj.createPrivKeyNextUnlock_ChainDepth = ChainDepth
+
+ addrObj.unlock(self.kdfKey)
+ if addrObj.chainIndex > -1: addrObjPrev = addrObj
+
+ if needToSaveAddrAfterUnlock:
+ updateLoc = addrObj.walletByteLoc
+ self.walletFileSafeUpdate( [[WLT_UPDATE_MODIFY,
+ addrObj.walletByteLoc,
+ addrObj.serialize()]])
+
+ self.isLocked = False
+ LOGDEBUG('Unlock succeeded: %s', self.uniqueIDB58)
+
+ ############################################################################
+ def lock(self, Progress=emptyFunc):
+ """
+ We assume that we have already set all encryption parameters (such as
+ IVs for each key) and thus all we need to do is call the "lock" method
+ on each PyBtcAddress object.
+
+ If wallet is unlocked, try to re-lock addresses, regardless of whether
+ we have a kdfKey or not. In some circumstances (such as when the addrs
+ have never been locked before) we will need the key to encrypt them.
+ However, in most cases, the encrypted versions are already available
+ and the PyBtcAddress objects can destroy the plaintext keys without
+ ever needing access to the encryption keys.
+
+ ANY METHOD THAT CALLS THIS MUST CATCH WALLETLOCKERRORS UNLESS YOU ARE
+ POSITIVE THAT THE KEYS HAVE ALREADY BEEN ENCRYPTED BEFORE, OR ARE
+ ALREADY SITTING IN THE ENCRYPTED WALLET FILE. PyBtcAddress objects
+ were designed to do this, but in case of a bug, you don't want the
+ program crashing with money-bearing private keys sitting in memory only.
+
+ TODO: If things like IVs are not set properly, we should implement
+ a way to check for this, correct it, and update the wallet
+ file if necessary
+ """
+
+ # Wallet is unlocked, will try to re-lock addresses, regardless of whether
+ # we have a kdfKey or not. If a key is required, we will throw a
+ # WalletLockError, and the caller can get the passphrase from the user,
+ # unlock the wallet, then try locking again.
+ # NOTE: If we don't have kdfKey, it is set to None, which is the default
+ # input for PyBtcAddress::lock for "I don't have it". In most
+ # cases, it is actually possible to lock the wallet without the
+ # kdfKey because we saved the encrypted versions before unlocking
+ LOGDEBUG('Attempting to lock wallet: %s', self.uniqueIDB58)
+ i=1
+ nAddr = len(self.addrMap)
+ try:
+ for addr160,addrObj in self.addrMap.iteritems():
+ Progress(i, nAddr)
+ i = i +1
+
+ self.addrMap[addr160].lock(self.kdfKey)
+
+ if self.kdfKey:
+ self.kdfKey.destroy()
+ self.kdfKey = None
+ self.isLocked = True
+ except WalletLockError:
+ LOGERROR('Locking wallet requires encryption key. This error')
+ LOGERROR('Usually occurs on newly-encrypted wallets that have')
+ LOGERROR('never been encrypted before.')
+ raise WalletLockError('Unlock with passphrase before locking again')
+ LOGDEBUG('Wallet locked: %s', self.uniqueIDB58)
+
+ #############################################################################
+ def getAddrListSortedByChainIndex(self, withRoot=False):
+ """ Returns Addr160 list """
+ addrList = []
+ for addr160 in self.linearAddr160List:
+ addr=self.addrMap[addr160]
+ addrList.append( [addr.chainIndex, addr160, addr] )
+
+ addrList.sort(key=lambda x: x[0])
+ return addrList
+
+ #############################################################################
+ def getAddrList(self):
+ """ Returns list of PyBtcAddress objects """
+ addrList = []
+ for addr160,addrObj in self.addrMap.iteritems():
+ if addr160=='ROOT':
+ continue
+ # I assume these will be references, not copies
+ addrList.append( addrObj )
+ return addrList
+
+
+ #############################################################################
+ def getLinearAddrList(self, withImported=True, withAddrPool=False):
+ """
+ Retrieves a list of addresses, by hash, in the order they
+ appear in the wallet file. Can ignore the imported addresses
+ to get only chained addresses, if necessary.
+
+ I could do this with one list comprehension, but it would be long.
+ I'm resisting the urge...
+ """
+ addrList = []
+ for a160 in self.linearAddr160List:
+ addr = self.addrMap[a160]
+ if not a160=='ROOT' and (withImported or addr.chainIndex>=0):
+ # Either we want imported addresses, or this isn't one
+ if (withAddrPool or addr.chainIndex<=self.highestUsedChainIndex):
+ addrList.append(addr)
+
+ return addrList
+
+
+ #############################################################################
+ def getAddress160ByChainIndex(self, desiredIdx):
+ """
+ It should be safe to assume that if the index is less than the highest
+ computed, it will be in the chainIndexMap, but I don't like making such
+ assumptions. Perhaps something went wrong with the wallet, or it was
+ manually reconstructed and has holes in the chain. We will regenerate
+ addresses up to that point, if necessary (but nothing past the value
+ self.lastComputedChainIndex.
+ """
+ if desiredIdx>self.lastComputedChainIndex or desiredIdx<0:
+ # I removed the option for fillPoolIfNecessary, because of the risk
+ # that a bug may lead to generation of billions of addresses, which
+ # would saturate the system's resources and fill the HDD.
+ raise WalletAddressError('Chain index is out of range')
+
+
+ if self.chainIndexMap.has_key(desiredIdx):
+ return self.chainIndexMap[desiredIdx]
+ else:
+ # Somehow the address isn't here, even though it is less than the
+ # last computed index
+ closestIdx = 0
+ for idx,addr160 in self.chainIndexMap.iteritems():
+ if closestIdx- Building log file... '
+ Progress(self.UIreport)
+
+ if errorCode < 0:
+ if errorCode == -1:
+ errorstr = \
+ 'ERROR: Invalid path, or file is not a valid Armory wallet\r\n'
+ elif errorCode == -2:
+ errorstr = \
+ 'ERROR: file I/O failure. Do you have proper credentials?\r\n'
+ elif errorCode == -3:
+ errorstr = \
+ 'ERROR: This wallet file is for another network/blockchain\r\n'
+ elif errorCode == -4:
+ errorstr = \
+ 'ERROR: invalid or missing passphrase for encrypted wallet\r\n'
+ elif errorCode == -10:
+ errorstr = 'ERROR: no kdf parameters available\r\n'
+ elif errorCode == -12:
+ errorstr = 'ERROR: failed to unlock root key\r\n'
+
+ self.strOutput.append(' %s' % (errorstr))
+
+ self.UIreport = self.UIreport + errorstr
+ Progress(self.UIreport)
+ return self.FinalizeLog(errorCode, Progress, returnError)
+
+
+ if returnError == 'Dict':
+ errors = {}
+ errors['byteError'] = self.byteError
+ errors['brokenSequence'] = self.brokenSequence
+ errors['sequenceGaps'] = self.sequenceGaps
+ errors['forkedPublicKeyChain'] = self.forkedPublicKeyChain
+ errors['chainCodeCorruption'] = self.chainCodeCorruption
+ errors['invalidPubKey'] = self.invalidPubKey
+ errors['missingPubKey'] = self.missingPubKey
+ errors['hashValMismatch'] = self.hashValMismatch
+ errors['unmatchedPair'] = self.unmatchedPair
+ errors['misc'] = self.misc
+ errors['importedErr'] = self.importedErr
+ errors['negativeImports'] = self.negativeImports
+ errors['nErrors'] = nErrors
+ errors['privMult'] = self.privKeyMultipliers
+
+ return errors
+
+
+ if self.newwalletPath != None:
+ self.LogPath = self.newwalletPath + ".log"
+ else:
+ self.LogPath = self.WalletPath + ".log"
+ basename = os.path.basename(self.WalletPath)
+
+ if self.smode == RECOVERMODE.Check:
+ self.strOutput.append('Checking wallet %s (ID: %s) on %s \r\n' % \
+ ('\'' + self.labelName + '\'' \
+ if len(self.labelName) != 0 else basename, \
+ self.UID, ctime()))
+ else:
+ self.strOutput.append('Analyzing wallet %s (ID: %s) on %s \r\n' % \
+ ('\'' + self.labelName + '\'' if \
+ len(self.labelName) != 0 else basename, \
+ self.UID, ctime()))
+ self.strOutput.append('Using recovery mode: %d\r\n' % (self.smode))
+
+ if self.WO:
+ self.strOutput.append('Wallet is Watch Only\r\n')
+ else:
+ self.strOutput.append('Wallet contains private keys ')
+ if self.useEnc == 0:
+ self.strOutput.append('and doesn\'t use encryption\r\n')
+ else:
+ self.strOutput.append('and uses encryption\r\n')
+
+ # If all we have is these logs, should know num used, if avail
+ self.strOutput.append('Highest used index: %d\r\n' % self.highestUsed)
+
+
+ if self.smode == RECOVERMODE.Stripped and not self.WO:
+ self.strOutput.append(' Recovered root key and chaincode, stripped recovery done.')
+ return self.FinalizeLog(errorCode, Progress, returnError)
+
+ self.strOutput.append('The wallet file is %d bytes, of which %d bytes were read\r\n' % \
+ (self.fileSize, self.dataLastOffset))
+ self.strOutput.append('%d chain addresses, %d imported keys and %d comments were found\r\n' % \
+ (self.naddress, self.nImports, self.ncomments))
+
+ nErrors = 0
+ #### chained keys
+ self.strOutput.append('Found %d chained address entries\r\n' \
+ % (self.naddress))
+
+ if len(self.byteError) == 0:
+ self.strOutput.append('No byte errors were found in the wallet file\r\n')
+ else:
+ nErrors = nErrors + len(self.byteError)
+ self.strOutput.append('%d byte errors were found in the wallet file:\r\n' % (len(self.byteError)))
+ for i in range(0, len(self.byteError)):
+ self.strOutput.append(' chainIndex %s at file offset %s\r\n' \
+ % (self.byteError[i][0], self.byteError[i][1]))
+
+
+ if len(self.brokenSequence) == 0:
+ self.strOutput.append('All chained addresses were arranged sequentially in the wallet file\r\n')
+ else:
+ #nErrors = nErrors + len(self.brokenSequence)
+ self.strOutput.append('The following %d addresses were not arranged sequentially in the wallet file:\r\n' % \
+ (len(self.brokenSequence)))
+ for i in range(0, len(self.brokenSequence)):
+ self.strOutput.append(' chainIndex %s at file offset %s\r\n' % \
+ (self.brokenSequence[i][0], self.brokenSequence[i][1]))
+
+ if len(self.sequenceGaps) == 0:
+ self.strOutput.append('There are no gaps in the address chain\r\n')
+ else:
+ nErrors = nErrors + len(self.sequenceGaps)
+ self.strOutput.append('Found %d gaps in the address chain:\r\n' % \
+ (len(self.sequenceGaps)))
+ for i in range(0, len(self.sequenceGaps)):
+ self.strOutput.append(' from chainIndex %s to %s\r\n' % \
+ (self.sequenceGaps[i][0], self.sequenceGaps[i][1]))
+
+ if len(self.forkedPublicKeyChain) == 0:
+ self.strOutput.append('No chained address fork was found\r\n')
+ else:
+ nErrors = nErrors + len(self.forkedPublicKeyChain)
+ self.strOutput.append('Found %d forks within the address chain:\r\n' \
+ % (len(self.forkedPublicKeyChain)))
+ for i in range(0, len(self.forkedPublicKeyChain)):
+ self.strOutput.append(' at chainIndex %s, file offset %s\r\n' \
+ % (self.forkedPublicKeyChain[i][0], \
+ self.forkedPublicKeyChain[i][1]))
+
+ if len(self.chainCodeCorruption) == 0:
+ self.strOutput.append('No chaincode corruption was found\r\n')
+ else:
+ nErrors = nErrors + len(self.chainCodeCorruption)
+ self.strOutput.append(' \
+ Found %d instances of chaincode corruption:\r\n' % \
+ (len(self.chainCodeCorruption)))
+ for i in range(0, len(self.chainCodeCorruption)):
+ self.strOutput.append(' at chainIndex %s, file offset %s\r\n' % (self.chainCodeCorruption[i][0], \
+ self.chainCodeCorruption[i][1]))
+
+ if len(self.invalidPubKey) == 0:
+ self.strOutput.append('All chained public keys are valid EC points\r\n')
+ else:
+ nErrors = nErrors + len(self.invalidPubKey)
+ self.strOutput.append('%d chained public keys are invalid EC points:\r\n' % (len(self.invalidPubKey)))
+ for i in range(0, len(self.invalidPubKey)):
+ self.strOutput.append(' at chainIndex %s, file offset %s' % \
+ (self.invalidPubKey[i][0], \
+ self.invalidPubKey[i][1]))
+
+ if len(self.missingPubKey) == 0:
+ self.strOutput.append('No chained public key is missing\r\n')
+ else:
+ nErrors = nErrors + len(self.missingPubKey)
+ self.strOutput.append('%d chained public keys are missing:\r\n' % \
+ (len(self.missingPubKey)))
+ for i in range(0, len(self.missingPubKey)):
+ self.strOutput.append(' at chainIndex %s, file offset %s' % \
+ (self.missingPubKey[i][0], \
+ self.missingPubKey[i][1]))
+
+ if len(self.hashValMismatch) == 0:
+ self.strOutput.append('All entries were saved under their matching hashVal\r\n')
+ else:
+ nErrors = nErrors + len(self.hashValMismatch)
+ self.strOutput.append('%d address entries were saved under an erroneous hashVal:\r\n' % \
+ (len(self.hashValMismatch)))
+ for i in range(0, len(self.hashValMismatch)):
+ self.strOutput.append(' at chainIndex %s, file offset %s\r\n' \
+ % (self.hashValMismatch[i][0], \
+ self.hashValMismatch[i][1]))
+
+ if not self.WO:
+ if len(self.unmatchedPair) == 0:
+ self.strOutput.append('All chained public keys match their respective private keys\r\n')
+ else:
+ nErrors = nErrors + len(self.unmatchedPair)
+ self.strOutput.append('%d public keys do not match their respective private key:\r\n' % \
+ (len(self.unmatchedPair)))
+ for i in range(0, len(self.unmatchedPair)):
+ self.strOutput.append(' at chainIndex %s, file offset %s\r\n' \
+ % (self.unmatchedPair[i][0],
+ self.unmatchedPair[i][1]))
+
+ if len(self.misc) > 0:
+ nErrors = nErrors + len(self.misc)
+ self.strOutput.append('%d miscalleneous errors were found:\r\n' % \
+ (len(self.misc)))
+ for i in range(0, len(self.misc)):
+ self.strOutput.append(' %s\r\n' % self.misc[i])
+
+ #### imported keys
+ self.strOutput.append('Found %d imported address entries\r\n' % \
+ (self.nImports))
+
+ if self.nImports > 0:
+ if len(self.importedErr) == 0:
+ self.strOutput.append('No errors were found within the imported address entries\r\n')
+ else:
+ nErrors = nErrors + len(self.importedErr)
+ self.strOutput.append('%d errors were found within the imported address entries:\r\n' % \
+ (len(self.importedErr)))
+ for i in range(0, len(self.importedErr)):
+ self.strOutput.append(' %s\r\n' % (self.importedErr[i]))
+
+ if len(self.privKeyMultipliers) > 0:
+ self.strOutput.append('Inconsistent private keys were found!\r\n')
+ self.strOutput.append('Logging Multipliers (no private key data):\r\n')
+
+ for i in range(0, len(self.privKeyMultipliers)):
+ self.strOutput.append(' %s\r\n' % (self.privKeyMultipliers[i]))
+
+ ####TODO: comments error log
+ self.strOutput.append('%d errors were found\r\n' % (nErrors))
+ #self.UIreport += '- %d errors were found ' % \
+ #( ' style="color: red;"' if nErrors else '', nErrors)
+ return self.FinalizeLog(errorCode, Progress, returnError)
+
+
+ ############################################################################
+ def FinalizeLog(self, errorcode, Progress, returnError=False):
+
+ self.EndLog = ''
+
+ if errorcode < 0:
+ self.strOutput.append( \
+ 'Recovery failed: error code %d\r\n\r\n\r\n' % (errorcode))
+
+ self.EndLog = '- Recovery failed: error code %d ' % \
+ (errorcode)
+ Progress(self.UIreport + self.EndLog)
+ return errorcode
+ else:
+
+ self.strOutput.append('Recovery done\r\n\r\n\r\n')
+ self.EndLog = self.EndLog + '- Recovery done '
+ if self.newwalletPath: self.EndLog = self.EndLog + \
+ ' Recovered wallet saved at: - %s ' % \
+ (self.newwalletPath)
+ Progress(self.UIreport + self.EndLog)
+
+ self.strOutput.append('\r\n\r\n\r\n')
+
+ if not returnError:
+ self.EndLog = self.EndLog + ' Recovery log saved at: - %s ' \
+ % (self.LogPath)
+ Progress(self.UIreport + self.EndLog, True)
+
+ self.logfile = open(self.LogPath, 'ab')
+
+ for s in self.strOutput:
+ self.logfile.write(s)
+
+ self.logfile.close()
+
+ return errorcode
+ else:
+ return [errorcode, self.strOutput]
+
+ ############################################################################
+ def RecoverWallet(self, WalletPath, Passphrase=None, Mode=RECOVERMODE.Bare,
+ returnError=False, Progress=emptyFunc):
+
+ return self.ProcessWallet(WalletPath, None, Passphrase, Mode, None,
+ returnError, async=True, Progress=Progress)
+
+ ############################################################################
+ @AllowAsync
+ def ProcessWallet(self, WalletPath=None, Wallet=None, Passphrase=None,
+ Mode=RECOVERMODE.Stripped, prgAt=None,
+ returnError=False, Progress=emptyFunc):
+
+ self.__init__()
+
+ if not WalletPath:
+ if not Wallet: return -1
+ WalletPath = Wallet.walletPath
+
+ self.WalletPath = WalletPath
+
+ RecoveredWallet = None
+ SecurePassphrase = None
+
+ self.naddress = 0
+ #holds address chain sequentially, ordered by chainIndex, as lists:
+ #[addrEntry, hashVal, naddress, byteLocation, rawData]
+ #validChainDict uses the same list format, and is used to hold computed
+ #valid chain address entries
+ addrDict = {}
+ validChainDict = {}
+
+ self.nImports = 0
+ #holds imported address, by order of apparition, as lists:
+ #[addrEntry, hashVal, byteLocation, rawData]
+ importedDict = {}
+
+ self.ncomments = 0
+ #holds all comments entries, as lists: [rawData, hashVal, dtype]
+ commentDict = {}
+
+ #in meta mode, the wallet's short and long labels are saved in entries
+ #shortLabel and longLabel, pointing to a single str object
+
+ rmode = Mode
+ self.smode = Mode
+ if Mode == RECOVERMODE.Meta:
+ self.WO = True
+
+ self.fileSize=0
+ if not os.path.exists(WalletPath):
+ return self.BuildLogFile(-1, Progress, returnError)
+ else: self.fileSize = os.path.getsize(WalletPath)
+
+ toRecover = PyBtcWallet()
+ toRecover.walletPath = WalletPath
+
+ #consistency check
+ try:
+ toRecover.doWalletFileConsistencyCheck()
+ except:
+ #I expect 99% of errors raised here would be by Python's "os" module
+ #failing an I/O operations, mainly for lack of credentials.
+ LOGEXCEPT('')
+ return self.BuildLogFile(-2, Progress, returnError)
+
+ #fetch wallet content
+ wltfile = open(WalletPath, 'rb')
+ wltdata = BinaryUnpacker(wltfile.read())
+ wltfile.close()
+
+ #unpack header
+ try:
+ returned = toRecover.unpackHeader(wltdata)
+ except:
+ LOGEXCEPT('')
+ #Raises here come from invalid header parsing, meaning the file isn't
+ #an Armory wallet to begin with, or the header is fubar
+ return self.BuildLogFile(-1, Progress, returnError)
+
+ self.UID = toRecover.uniqueIDB58
+ self.labelName = toRecover.labelName
+ self.highestUsed = toRecover.highestUsedChainIndex
+ #TODO: try to salvage broken header
+ # compare uniqueIDB58 with recovered wallet
+
+ self.UIreport = 'Analyzing wallet: %s ' % (toRecover.labelName \
+ if len(toRecover.labelName) != 0 \
+ else os.path.basename(WalletPath))
+ Progress(self.UIreport)
+
+ if returned < 0: return self.BuildLogFile(-3, Progress, returnError)
+
+ self.useEnc=0
+ rootAddr = toRecover.addrMap['ROOT']
+
+ #check for private keys (watch only?)
+ if toRecover.watchingOnly is True:
+ self.WO = True
+
+ if not self.WO:
+ #check if wallet is encrypted
+ if toRecover.isLocked==True and rmode != RECOVERMODE.Meta:
+ '''
+ Passphrase can one of be 3 things:
+ 1) str
+ 2) SecureBinaryData
+ 3) a function that will return the passphrase (think user prompt)
+ '''
+ if isinstance(Passphrase, str):
+ SecurePassphrase = SecureBinaryData(Passphrase)
+ Passphrase = ''
+ elif isinstance(Passphrase, SecureBinaryData):
+ SecurePassphrase = Passphrase.copy()
+ elif hasattr(Passphrase, '__call__'):
+ getPassphrase = Passphrase(toRecover)
+
+ if isinstance(getPassphrase, SecureBinaryData):
+ SecurePassphrase = getPassphrase.copy()
+ getPassphrase.destroy()
+ else:
+ if rmode==RECOVERMODE.Check:
+ self.WO = True
+ else:
+ return self.BuildLogFile(-4, Progress, returnError)
+ else:
+ if rmode==RECOVERMODE.Check:
+ self.WO = True
+ else:
+ return self.BuildLogFile(-4, Progress, returnError)
+
+ #if the wallet uses encryption, unlock ROOT and verify it
+ if toRecover.isLocked and not self.WO:
+ self.useEnc=1
+ if not toRecover.kdf:
+ SecurePassphrase.destroy()
+ return self.BuildLogFile(-10, Progress, returnError)
+
+ secureKdfOutput = toRecover.kdf.DeriveKey(SecurePassphrase)
+
+ if not toRecover.verifyEncryptionKey(secureKdfOutput):
+ SecurePassphrase.destroy()
+ secureKdfOutput.destroy()
+ return self.BuildLogFile(-4, Progress, returnError)
+
+ #DlgUnlockWallet may have filled kdfKey. Since this code can be
+ #called with no UI and just the passphrase, gotta make sure this
+ #member is cleaned up before setting it
+ if isinstance(toRecover.kdfKey, SecureBinaryData):
+ toRecover.kdfKey.destroy()
+ toRecover.kdfKey = secureKdfOutput
+
+ try:
+ rootAddr.unlock(toRecover.kdfKey)
+ except:
+ LOGEXCEPT('')
+ SecurePassphrase.destroy()
+ return self.BuildLogFile(-12, Progress, returnError)
+ else:
+ SecurePassphrase = None
+
+ #stripped recovery, we're done
+ if rmode == RECOVERMODE.Stripped:
+ RecoveredWallet = self.createRecoveredWallet(toRecover, rootAddr, \
+ SecurePassphrase, Progress, returnError)
+ rootAddr.lock()
+ if SecurePassphrase: SecurePassphrase.destroy()
+
+ if not isinstance(RecoveredWallet, PyBtcWallet):
+ return RecoveredWallet
+
+ if isinstance(toRecover.kdfKey, SecureBinaryData):
+ toRecover.kdfKey.destroy()
+ if isinstance(RecoveredWallet.kdfKey, SecureBinaryData):
+ RecoveredWallet.kdfKey.destroy()
+
+ #stripped recovery, we are done
+ return self.BuildLogFile(1, Progress, returnError)
+
+ if rmode == RECOVERMODE.Meta:
+ commentDict["shortLabel"] = toRecover.labelName
+ commentDict["longLabel"] = toRecover.labelDescr
+
+ '''
+ address entries may not be saved sequentially. To check the address
+ chain is valid, all addresses will be unserialized and saved by
+ chainIndex in addrDict. Then all addresses will be checked for
+ consistency and proper chaining. Imported private keys and comments
+ will be added at the tail of the file.
+ '''
+
+ UIupdate = ""
+ self.misc = [] #miscellaneous errors
+ self.rawError = [] #raw binary errors'
+
+ if prgAt:
+ prgAt_in = prgAt[0]
+ prgAt[0] = prgAt_in +prgAt[1]*0.01
+
+
+ #move on to wallet body
+ toRecover.lastComputedChainIndex = -UINT32_MAX
+ toRecover.lastComputedChainAddr160 = None
+ while wltdata.getRemainingSize()>0:
+ byteLocation = wltdata.getPosition()
+
+
+ UIupdate = '- Reading wallet: %0.1f/%0.1f kB ' % \
+ (float(byteLocation)/KILOBYTE, float(self.fileSize)/KILOBYTE)
+ if Progress(self.UIreport + UIupdate) == 0:
+ if SecurePassphrase: SecurePassphrase.destroy()
+ if toRecover.kdfKey: toRecover.kdfKey.destroy()
+ rootAddr.lock()
+ return 0
+
+ newAddr = None
+ try:
+ dtype, hashVal, rawData = toRecover.unpackNextEntry(wltdata)
+ except NotImplementedError:
+ self.misc.append('Found OPEVAL data entry at offest: %d' % \
+ (byteLocation))
+ pass
+ except:
+ LOGEXCEPT('')
+ #Error in the binary file content. Try to skip an entry size amount
+ #of bytes to find a valid entry.
+ self.rawError.append('Raw binary error found at offset: %d' \
+ % (byteLocation))
+
+ dtype, hashVal, rawData, dataList = self.LookForFurtherEntry( \
+ wltdata, byteLocation)
+
+ if dtype is None:
+ #could not find anymore valid data
+ self.rawError.append('Could not find anymore valid data past \
+ offset: %d' % (byteLocation))
+ break
+
+ byteLocation = dataList[1]
+ self.rawError.append(' Found a valid data entry at offset: %d' \
+ % (byteLocation))
+
+ if dataList[0] == 0:
+ #found an address entry, but it has checksum errors
+ newAddr = dataList[2]
+
+ if dtype==WLT_DATATYPE_KEYDATA:
+ if rmode != RECOVERMODE.Meta:
+ if newAddr is None:
+ newAddr = PyBtcAddress()
+ try:
+ newAddr.unserialize(rawData)
+ except:
+ LOGEXCEPT('')
+ #unserialize error, try to recover the entry
+ self.rawError.append( \
+ ' Found checksum errors in address entry starting at offset: %d' \
+ % (byteLocation))
+
+ try:
+ newAddr, chksumError = \
+ self.addrEntry_unserialize_recover(rawData)
+ self.rawError.append(' Recovered damaged entry')
+ except:
+ LOGEXCEPT('')
+ #failed to recover the entry
+ self.rawError.append( \
+ ' Could not recover damaged entry')
+ newAddr = None
+
+ if newAddr is not None:
+ newAddr.walletByteLoc = byteLocation + 21
+
+ if newAddr.useEncryption:
+ newAddr.isLocked = True
+
+ #save address entry count in the file, to check
+ #for entry sequence
+ if newAddr.chainIndex > -2 :
+ addrDict[newAddr.chainIndex] = \
+ [newAddr, hashVal, self.naddress, byteLocation, rawData]
+ self.naddress = self.naddress +1
+ else:
+ importedDict[self.nImports] = \
+ [newAddr, hashVal, byteLocation, rawData]
+ self.nImports = self.nImports +1
+
+ else: self.naddress = self.naddress +1
+
+
+ elif dtype in (WLT_DATATYPE_ADDRCOMMENT, WLT_DATATYPE_TXCOMMENT):
+ #if rmode > 2:
+ if rmode in [RECOVERMODE.Full, RECOVERMODE.Meta, RECOVERMODE.Check]:
+ commentDict[self.ncomments] = [rawData, hashVal, dtype]
+ self.ncomments = self.ncomments +1
+
+ elif dtype==WLT_DATATYPE_OPEVAL:
+ self.misc.append('Found OPEVAL data entry at offest: %d' % \
+ (byteLocation))
+ pass
+ elif dtype==WLT_DATATYPE_DELETED:
+ pass
+ else:
+ self.misc.append('Found unknown data entry type at offset: %d' % \
+ (byteLocation))
+ #TODO: try same trick as recovering from unpack errors?
+
+ self.dataLastOffset = wltdata.getPosition()
+ UIupdate = '- Reading wallet: %0.1f/%0.1f kB ' % \
+ (float(self.dataLastOffset)/KILOBYTE, float(self.fileSize)/KILOBYTE)
+ self.UIreport = self.UIreport + UIupdate
+
+ #verify the root address is derived from the root key
+ if not self.WO:
+ testroot = PyBtcAddress().createFromPlainKeyData( \
+ rootAddr.binPrivKey32_Plain, None, None, \
+ generateIVIfNecessary=True)
+ if rootAddr.addrStr20 != testroot.addrStr20:
+ self.rawError.append( \
+ ' root address was not derived from the root key')
+
+
+ #verify chainIndex 0 was derived from the root address
+ firstAddr = rootAddr.extendAddressChain(toRecover.kdfKey)
+ if firstAddr.addrStr20 != addrDict[0][0].addrStr20:
+ self.rawError.append(' chainIndex 0 was not derived from the \
+ root address')
+
+ testroot.lock()
+
+ if rmode != RECOVERMODE.Meta:
+ currSequence = addrDict[0][2]
+ chaincode = addrDict[0][0].chaincode.toHexStr()
+ else:
+ currSequence = None
+ chaincode = None
+ commentDict['naddress'] = self.naddress
+ self.naddress = 0
+ commentDict['ncomments'] = self.ncomments
+
+ if prgAt:
+ prgTotal = len(addrDict) + len(importedDict) + len(commentDict)
+
+
+
+ #chained key pairs. for rmode is 4, no need to skip this part,
+ #naddress will be 0
+ n=0
+ for i in addrDict:
+ entrylist = []
+ entrylist = list(addrDict[i])
+ newAddr = entrylist[0]
+ rawData = entrylist[4]
+ byteLocation = entrylist[3]
+
+ n = n+1
+ UIupdate = '- Processing address entries: %d/%d ' % \
+ (n, self.naddress)
+ if Progress(self.UIreport + UIupdate) == 0:
+ if SecurePassphrase: SecurePassphrase.destroy()
+ if toRecover.kdfKey: toRecover.kdfKey.destroy()
+ rootAddr.lock()
+ return 0
+ if prgAt:
+ prgAt[0] = prgAt_in + (0.01 + 0.99*n/prgTotal)*prgAt[1]
+
+ # Fix byte errors in the address data
+ fixedAddrData = newAddr.serialize()
+ if not rawData==fixedAddrData:
+ self.byteError.append([newAddr.chainIndex, byteLocation])
+ fixedAddr = PyBtcAddress()
+ fixedAddr.unserialize(fixedAddrData)
+ newAddr = PyBtcAddress()
+ newAddr.unserialize(fixedAddrData)
+ entrylist[0] = newAddr
+ addrDict[i] = entrylist
+
+ #check public key is a valid EC point
+ if newAddr.hasPubKey():
+ if not CryptoECDSA().VerifyPublicKeyValid(newAddr.binPublicKey65):
+ self.invalidPubKey.append([newAddr.chainIndex, byteLocation])
+ else: self.missingPubKey.append([newAddr.chainIndex, byteLocation])
+
+ #check chaincode consistency
+ newCC = newAddr.chaincode.toHexStr()
+ if newCC != chaincode:
+ self.chainCodeCorruption.append([newAddr.chainIndex, byteLocation])
+
+ #check the address entry sequence
+ nextSequence = entrylist[2]
+ if nextSequence != currSequence:
+ if (nextSequence - currSequence) != 1:
+ self.brokenSequence.append([newAddr.chainIndex, byteLocation])
+ currSequence = nextSequence
+
+ #check for gaps in the sequence
+ isPubForked = False
+ if newAddr.chainIndex > 0:
+ seq = newAddr.chainIndex -1
+ prevEntry = []
+ while seq > -1:
+ if seq in addrDict: break
+ seq = seq -1
+
+ prevEntry = list(addrDict[seq])
+ prevAddr = prevEntry[0]
+
+ gap = newAddr.chainIndex - seq
+ if gap > 1:
+ self.sequenceGaps.append([seq, newAddr.chainIndex])
+
+ #check public address chain
+ if newAddr.hasPubKey():
+ cid = 0
+ extended = prevAddr.binPublicKey65
+ while cid < gap:
+ extended = CryptoECDSA().ComputeChainedPublicKey( \
+ extended, prevAddr.chaincode)
+ cid = cid +1
+
+ if extended.toHexStr() != newAddr.binPublicKey65.toHexStr():
+ self.forkedPublicKeyChain.append([newAddr.chainIndex, \
+ byteLocation])
+ isPubForked = True
+
+
+ if not self.WO:
+ #not a watch only wallet, check private/public key chaining and
+ #integrity
+
+ if newAddr.useEncryption != toRecover.useEncryption:
+ if newAddr.useEncryption:
+ self.misc.append('Encrypted address entry in a non encrypted \
+ wallet at chainIndex %d in wallet %s' % \
+ (newAddr.chainIndex, os.path.basename( \
+ WalletPath)))
+ else:
+ self.misc.append('Unencrypted address entry in an encrypted wallet at chainIndex %d in wallet %s' % \
+ (newAddr.chainIndex, os.path.basename( \
+ WalletPath)))
+
+ keymismatch=0
+ """
+ 0: public key matches private key
+ 1: public key doesn't match private key
+ 2: private key is missing (encrypted)
+ 3: public key is missing
+ 4: private key is missing (unencrypted)
+ """
+ if not newAddr.hasPrivKey():
+ #entry has no private key
+ keymismatch=2
+
+ if not newAddr.useEncryption:
+ #uncomputed private key in a non encrypted wallet?
+ #definitely not supposed to happen
+ keymismatch = 4
+ self.misc.append('Uncomputed private key in unencrypted wallet at chainIndex %d in wallet %s' \
+ % (newAddr.chainIndex, os.path.basename \
+ (WalletPath)))
+ else:
+ self.misc.append('Missing private key is not flagged for computation at chainIndex %d in wallet %s'\
+ % (newAddr.chainIndex, os.path.basename \
+ (WalletPath)))
+
+ else:
+ if newAddr.createPrivKeyNextUnlock:
+ #have to build the private key on unlock; we can use prevAddr
+ #for that purpose, used to chain the public key off of
+ newAddr.createPrivKeyNextUnlock_IVandKey[0] = \
+ prevAddr.binInitVect16.copy()
+ newAddr.createPrivKeyNextUnlock_IVandKey[1] = \
+ prevAddr.binPrivKey32_Encr.copy()
+
+ newAddr.createPrivKeyNextUnlock_ChainDepth = \
+ newAddr.chainIndex - prevAddr.chainIndex
+
+
+ #unlock if necessary
+ if keymismatch == 0:
+ if newAddr.isLocked:
+ try:
+ newAddr.unlock(toRecover.kdfKey)
+ keymismatch = 0
+ except KeyDataError:
+ keymismatch = 1
+
+ isPrivForked = False
+ validAddr = None
+ if newAddr.chainIndex > 0 and keymismatch != 2:
+ #if the wallet has the private key, derive it from the
+ #chainIndex and compare. If they mismatch, save the bad
+ #private key as index -3 in the saved wallet. Additionally,
+ #derive the private key in case it is missing (keymismatch==4)
+
+ gap = newAddr.chainIndex
+ prevkey = None
+
+ if prevAddr.useEncryption:
+ if prevAddr.binPrivKey32_Encr.getSize() == 32:
+ gap = newAddr.chainIndex - prevAddr.chainIndex
+ prevkey = CryptoAES().DecryptCFB( \
+ prevAddr.binPrivKey32_Encr, \
+ SecureBinaryData(toRecover.kdfKey), \
+ prevAddr.binInitVect16)
+ else:
+ if prevAddr.binPrivKey32_Plain.getSize() == 32:
+ gap = newAddr.chainIndex - prevAddr.chainIndex
+ prevkey = prevAddr.binPrivKey32_Plain
+
+ if gap == newAddr.chainIndex:
+ #coudln't get a private key from prevAddr,
+ #derive from root addr
+ prevAddr = addrDict[0][0]
+
+ if prevAddr.useEncryption:
+ prevkey = CryptoAES().DecryptCFB( \
+ prevAddr.binPrivKey32_Encr, \
+ SecureBinaryData(toRecover.kdfKey), \
+ prevAddr.binInitVect16)
+ else:
+ prevkey = prevAddr.binPrivKey32_Plain
+
+ for t in range(0, gap):
+ prevkey = prevAddr.safeExtendPrivateKey( \
+ prevkey, \
+ prevAddr.chaincode)
+
+ if keymismatch != 4:
+ if prevkey.toHexStr() != \
+ newAddr.binPrivKey32_Plain.toHexStr():
+ """
+ Special case: The private key saved in the wallet doesn't
+ match the extended private key.
+
+ 2 things to do:
+ 1) Save the current address entry as an import,
+ as -chainIndex -3
+ 2) After the address entry has been analyzed, replace it
+ with a valid one, to keep on checking the chain.
+ """
+ isPrivForked = True
+ validAddr = newAddr.copy()
+ validAddr.binPrivKey32_Plain = prevkey.copy()
+ validAddr.binPublicKey65 = CryptoECDSA().ComputePublicKey(\
+ validAddr.binPrivKey32_Plain)
+ validAddr.chainCode = prevAddr.chaincode.copy()
+ validAddr.keyChanged = True
+
+ if validAddr.useEncryption:
+ validAddr.lock()
+
+ if isPubForked is not True:
+ self.forkedPublicKeyChain.append([newAddr.chainIndex, \
+ byteLocation])
+
+ if isPrivForked is False:
+ chID = newAddr.chainIndex
+ validchID = 0
+ for ids in range(chID -1, 0, -1):
+ if ids in validChainDict:
+ validchID = ids
+ break
+
+ validChainAddr = validChainDict[validchID]
+ if validChainAddr.useEncryption:
+ validPrivKey = CryptoAES().DecryptCFB( \
+ validChainAddr.binPrivKey32_Encr, \
+ SecureBinaryData(toRecover.kdfKey), \
+ validChainAddr.binInitVect16)
+ else:
+ validPrivKey = validChainAddr.binPrivKey32_Plain.copy()
+
+ gap = chID - validchID
+ for t in range(0, gap):
+ validPrivKey = validChainAddr.safeExtendPrivateKey( \
+ validPrivKey, \
+ validChainAddr.chaincode)
+
+ if prevkey.toHexStr() != validPrivKey.toHexStr():
+ isPrivForked = True
+ validAddr = newAddr.copy()
+ validAddr.binPrivKey32_Plain = validPrivKey.copy()
+ validAddr.binPublicKey65 = \
+ CryptoECDSA().ComputePublicKey( \
+ validAddr.binPrivKey32_Plain)
+
+ validAddr.chainCode = validChainAddr.chaincode.copy()
+ validAddr.keyChanged = True
+
+ if validAddr.useEncryption:
+ validAddr.lock()
+
+ validPrivKey.destroy()
+
+ else:
+ newAddr.binPrivKey32_Plain = prevkey.copy()
+
+ prevkey.destroy()
+
+ if validAddr is None:
+ validChainDict[i] = newAddr
+ else:
+ validChainDict[i] = validAddr
+
+
+ #deal with mismatch scenarios
+ if keymismatch == 1:
+ self.unmatchedPair.append([newAddr.chainIndex, byteLocation])
+
+ #TODO: needs better handling for keymismatch == 2
+ elif keymismatch == 2:
+ self.misc.append('no private key at chainIndex %d in wallet %s'\
+ % (newAddr.chainIndex, WalletPath))
+
+ elif keymismatch == 3:
+ newAddr.binPublicKey65 = \
+ CryptoECDSA().ComputePublicKey(newAddr.binPrivKey32_Plain)
+ newAddr.addrStr20 = newAddr.binPublicKey65.getHash160()
+
+ #if we have clear possible mismatches (or there were none),
+ #proceed to consistency checks
+ if keymismatch == 0:
+ if not CryptoECDSA().CheckPubPrivKeyMatch( \
+ newAddr.binPrivKey32_Plain, \
+ newAddr.binPublicKey65):
+ self.unmatchedPair.append([newAddr.chainIndex, byteLocation])
+
+ if newAddr.addrStr20 != entrylist[1]:
+ self.hashValMismatch.append([newAddr.chainIndex, byteLocation])
+
+
+
+ if isPrivForked:
+ negImport = newAddr.copy()
+ negImport.chainIndex = -3 -newAddr.chainIndex
+
+ if negImport.useEncryption:
+ negImport.lock()
+
+ importedDict[self.nImports] = [negImport, 0, 0, 0]
+ self.nImports = self.nImports +1
+
+ if newAddr.useEncryption:
+ newAddr.lock()
+
+ if self.naddress > 0: self.UIreport = self.UIreport + UIupdate
+
+ #imported addresses
+ if not self.WO:
+ for i in range(0, self.nImports):
+ entrylist = []
+ entrylist = list(importedDict[i])
+ newAddr = entrylist[0]
+ rawData = entrylist[3]
+
+ UIupdate = '- Processing imported address entries: \
+ %d/%d ' % (i +1, self.nImports)
+ if Progress(self.UIreport + UIupdate) == 0:
+ if SecurePassphrase: SecurePassphrase.destroy()
+ if toRecover.kdfKey: toRecover.kdfKey.destroy()
+ rootAddr.lock()
+ return 0
+ if prgAt:
+ prgAt[0] = prgAt_in + (0.01 + 0.99*(newAddr.chainIndex +1) \
+ /prgTotal)*prgAt[1]
+
+ if newAddr.chainIndex < -2:
+ self.negativeImports.append(newAddr.addrStr20)
+ elif newAddr.chainIndex == -2:
+ # Fix byte errors in the address data
+ fixedAddrData = newAddr.serialize()
+ if not rawData==fixedAddrData:
+ self.importedErr.append('found byte error in imported \
+ address %d at file offset %d' % (i, entrylist[2]))
+ newAddr = PyBtcAddress()
+ newAddr.unserialize(fixedAddrData)
+ entrylist[0] = newAddr
+ importedDict[i] = entrylist
+
+ #marked forked imports
+
+
+ #check public key is a valid EC point
+ if newAddr.hasPubKey():
+ if not CryptoECDSA().VerifyPublicKeyValid( \
+ newAddr.binPublicKey65):
+ self.importedErr.append('invalid pub key for imported \
+ address %d at file offset %d\r\n' % (i, entrylist[2]))
+ else:
+ self.importedErr.append('missing pub key for imported \
+ address %d at file offset %d\r\n' % (i, entrylist[2]))
+
+ #if there a private key in the entry, check for consistency
+ if not newAddr.hasPrivKey():
+ self.importedErr.append('missing private key for imported \
+ address %d at file offset %d\r\n' % (i, entrylist[2]))
+ else:
+
+ if newAddr.useEncryption != toRecover.useEncryption:
+ if newAddr.useEncryption:
+ self.importedErr.append('Encrypted address entry in \
+ a non encrypted wallet for imported address %d at \
+ file offset %d\r\n' % (i, entrylist[2]))
+ else:
+ self.importedErr.append('Unencrypted address entry in \
+ an encrypted wallet for imported address %d at file \
+ offset %d\r\n' % (i, entrylist[2]))
+
+ keymismatch = 0
+ if newAddr.isLocked:
+ try:
+ newAddr.unlock(toRecover.kdfKey)
+ except KeyDataError:
+ keymismatch = 1
+ self.importedErr.append('pub key doesnt match private \
+ key for imported address %d at file offset %d\r\n' \
+ % (i, entrylist[2]))
+
+
+ if keymismatch == 0:
+ #pubkey is present, check against priv key
+ if not CryptoECDSA().CheckPubPrivKeyMatch( \
+ newAddr.binPrivKey32_Plain, newAddr.binPublicKey65):
+ keymismatch = 1
+ self.importedErr.append('pub key doesnt match private \
+ key for imported address %d at file offset %d\r\n' \
+ % (i, entrylist[2]))
+
+ if keymismatch == 1:
+ #compute missing/invalid pubkey
+ newAddr.binPublicKey65 = CryptoECDSA().ComputePublicKey( \
+ newAddr.binPrivKey32_Plain)
+
+ #check hashVal
+ if newAddr.addrStr20 != entrylist[1]:
+ newAddr.addrStr20 = newAddr.binPublicKey65.getHash160()
+ self.importedErr.append('hashVal doesnt match addrStr20 \
+ for imported address %d at file offset %d\r\n' \
+ % (i, entrylist[2]))
+
+ #if the entry was encrypted, lock it back with the new wallet
+ #kdfkey
+ if newAddr.useEncryption:
+ newAddr.lock()
+
+
+ if self.nImports > 0: self.UIreport = self.UIreport + UIupdate
+ #TODO: check comments consistency
+
+ nerrors = len(self.rawError) + len(self.byteError) + \
+ len(self.sequenceGaps) + len(self.forkedPublicKeyChain) + \
+ len(self.chainCodeCorruption) + len(self.invalidPubKey) + \
+ len(self.missingPubKey) + len(self.hashValMismatch) + \
+ len(self.unmatchedPair) + len(self.importedErr) + len(self.misc)
+
+ if nerrors:
+ if not self.WO or rmode == RECOVERMODE.Full:
+ if rmode < RECOVERMODE.Meta:
+
+ #create recovered wallet
+ RecoveredWallet = self.createRecoveredWallet(toRecover, \
+ rootAddr, SecurePassphrase, Progress, returnError)
+ if SecurePassphrase: RecoveredWallet.kdfKey = \
+ RecoveredWallet.kdf.DeriveKey(SecurePassphrase)
+ rootAddr.lock()
+
+ if not isinstance(RecoveredWallet, PyBtcWallet):
+ if SecurePassphrase: SecurePassphrase.destroy()
+ if toRecover.kdfKey: toRecover.kdfKey.destroy()
+ return RecoveredWallet
+
+ #build address pool
+ for i in range(1, self.naddress):
+ UIupdate = '- Building address chain: %d/%d ' % \
+ (i+1, self.naddress)
+ if Progress(self.UIreport + UIupdate) == 0:
+ if SecurePassphrase: SecurePassphrase.destroy()
+ if toRecover.kdfKey: toRecover.kdfKey.destroy()
+ if RecoveredWallet.kdfKey: RecoveredWallet.kdfKey.destroy()
+ return 0
+
+ #TODO: check this builds the proper address chain,
+ #and saves encrypted private keys
+ RecoveredWallet.computeNextAddress(None, False, True)
+
+ if Progress and self.naddress > 0:
+ self.UIreport = self.UIreport + UIupdate
+
+ #save imported addresses
+ if rootAddr.isLocked:
+ rootAddr.unlock(toRecover.kdfKey)
+ invQ = self.getInvModOfHMAC(rootAddr.binPrivKey32_Plain.toBinStr())
+ regQ = self.getValidKeyHMAC(rootAddr.binPrivKey32_Plain.toBinStr())
+ rootAddr.lock()
+
+ for i in range(0, self.nImports):
+ UIupdate = '- Saving imported addresses: %d/%d ' \
+ % (i+1, self.nImports)
+ if Progress(self.UIreport + UIupdate) == 0:
+ if SecurePassphrase: SecurePassphrase.destroy()
+ if toRecover.kdfKey: toRecover.kdfKey.destroy()
+ if RecoveredWallet.kdfKey: RecoveredWallet.kdfKey.destroy()
+ return 0
+
+ entrylist = []
+ entrylist = list(importedDict[i])
+ newAddr = entrylist[0]
+
+ if newAddr.isLocked:
+ newAddr.unlock(toRecover.kdfKey)
+
+ if newAddr.chainIndex < -2:
+ privMultiplier = CryptoECDSA().ECMultiplyScalars( \
+ newAddr.binPrivKey32_Plain.toBinStr(),
+ invQ.toBinStr())
+ self.privKeyMultipliers.append(binary_to_hex(privMultiplier))
+
+ # Sanity check that the multipliers are correct
+ recov = CryptoECDSA().ECMultiplyScalars(privMultiplier,
+ regQ.toBinStr())
+ if not recov==newAddr.binPrivKey32_Plain.toBinStr():
+ # Unfortunately I'm not sure what to do here if it doesn't match
+ # We know no ther way to handle it...
+ LOGERROR('Logging a multiplier that does not match!?')
+
+ if newAddr.isLocked:
+ newAddr.keyChanged = 1
+ newAddr.lock(RecoveredWallet.kdfKey)
+
+ RecoveredWallet.walletFileSafeUpdate([[WLT_UPDATE_ADD, \
+ WLT_DATATYPE_KEYDATA, newAddr.addrStr20, newAddr]])
+
+ if Progress and self.nImports > 0: self.UIreport = \
+ self.UIreport + UIupdate
+
+ invQ,regQ = None,None
+
+ #save comments
+ if rmode == RECOVERMODE.Full:
+ for i in range(0, self.ncomments):
+ UIupdate = '- Saving comment entries: %d/%d ' \
+ % (i+1, self.ncomments)
+ if Progress.UpdateText(self.UIreport + UIupdate) == 0:
+ if SecurePassphrase: SecurePassphrase.destroy()
+ if toRecover.kdfKey: toRecover.kdfKey.destroy()
+ if RecoveredWallet.kdfKey:
+ RecoveredWallet.kdfKey.destroy()
+ return 0
+
+ entrylist = []
+ entrylist = list(commentDict[i])
+ RecoveredWallet.walletFileSafeUpdate([[WLT_UPDATE_ADD, \
+ entrylist[2], entrylist[1], entrylist[0]]])
+
+ if Progress and self.ncomments > 0: self.UIreport = \
+ self.UIreport + UIupdate
+
+ if isinstance(rootAddr.binPrivKey32_Plain, SecureBinaryData):
+ rootAddr.lock()
+
+ #TODO: nothing to process anymore at this point. if the recovery mode
+ #is 4 (meta), just return the comments dict
+ if isinstance(toRecover.kdfKey, SecureBinaryData):
+ toRecover.kdfKey.destroy()
+ if RecoveredWallet is not None:
+ if isinstance(RecoveredWallet.kdfKey, SecureBinaryData):
+ RecoveredWallet.kdfKey.destroy()
+
+ if SecurePassphrase: SecurePassphrase.destroy()
+
+ if rmode != RECOVERMODE.Meta:
+ if nerrors == 0:
+ return self.BuildLogFile(0, Progress, returnError, nerrors)
+ else:
+ return self.BuildLogFile(1, Progress, returnError, nerrors)
+ else:
+ return commentDict
+
+ ############################################################################
+ def createRecoveredWallet(self, toRecover, rootAddr, SecurePassphrase,
+ Progress, returnError):
+ self.newwalletPath = os.path.join(os.path.dirname(toRecover.walletPath),
+ 'armory_%s_RECOVERED%s.wallet' % \
+ (toRecover.uniqueIDB58, '.watchonly' \
+ if self.WO else ''))
+
+ if os.path.exists(self.newwalletPath):
+ try:
+ os.remove(self.newwalletPath)
+ except:
+ LOGEXCEPT('')
+ return self.BuildLogFile(-2, Progress, returnError)
+
+ try:
+ if not self.WO:
+ RecoveredWallet = PyBtcWallet()
+ RecoveredWallet.createNewWallet( \
+ newWalletFilePath=self.newwalletPath, \
+ securePassphrase=SecurePassphrase, \
+ plainRootKey=rootAddr.binPrivKey32_Plain, \
+ chaincode=rootAddr.chaincode, \
+ #not registering with the BDM,
+ #so no addresses are computed
+ doRegisterWithBDM=False, \
+ shortLabel=toRecover.labelName, \
+ longLabel=toRecover.labelDescr)
+ else:
+ RecoveredWallet = self.createNewWO(toRecover, \
+ self.newwalletPath, rootAddr)
+ except:
+ LOGEXCEPT('')
+ #failed to create new file
+ return self.BuildLogFile(-2, Progress, returnError)
+
+ return RecoveredWallet
+
+ def LookForFurtherEntry(self, rawdata, loc):
+ """
+ Attempts to find valid data entries in wallet file by skipping known byte
+ widths.
+
+ The process:
+ 1) Assume an address entry with invalid data type key and/or the hash160.
+ Read ahead and try to unserialize a valid PyBtcAddress
+ 2) Assume a corrupt address entry. Move 1+20+237 bytes ahead, try to
+ unpack the next entry
+
+ At this point all entries are of random length. The most accurate way to
+ define them as valid is to try and unpack the next entry, or check end of
+ file has been hit gracefully
+
+ 3) Try for address comment
+ 4) Try for transaction comment
+ 5) Try for deleted entry
+
+ 6) At this point, can still try for random byte search. Essentially, push
+ an incremental amount of bytes until a valid entry or the end of the file
+ is hit. Simplest way around it is to recursively call this member with an
+ incremented loc
+
+
+
+ About address entries: currently, the code tries to fully unserialize
+ tentative address entries. It will most likely raise at the slightest
+ error. However, that doesn't mean the entry is entirely bogus, or not an
+ address entry at all. Individual data packets should be checked against
+ their checksum for validity in a full implementation of the raw data
+ recovery layer of this tool. Other entries do not carry checksums and
+ thus appear opaque to this recovery layer.
+
+ TODO:
+ 1) verify each checksum data block in address entries
+ 2) same with the file header
+ """
+
+ #check loc against data end.
+ if loc >= rawdata.getSize():
+ return None, None, None, [0]
+
+ #reset to last known good offset
+ rawdata.resetPosition(loc)
+
+ #try for address entry: push 1 byte for the key, 20 for the public key
+ #hash, try to unpack the next 237 bytes as an address entry
+ try:
+ rawdata.advance(1)
+ hash160 = rawdata.get(BINARY_CHUNK, 20)
+ chunk = rawdata.get(BINARY_CHUNK, self.pybtcaddrSize)
+
+ newAddr, chksumError = self.addrEntry_unserialize_recover(chunk)
+ #if we got this far, no exception was raised, return the valid entry
+ #and hash, but invalid key
+
+ if chksumError != 0:
+ #had some checksum errors, pass the data on
+ return 0, hash160, chunk, [0, loc, newAddr, chksumError]
+
+ return 0, hash160, chunk, [1, loc]
+ except:
+ LOGEXCEPT('')
+ #unserialize error, move on
+ rawdata.resetPosition(loc)
+
+ #try for next entry
+ try:
+ rawdata.advance(1+20+237)
+ dtype, hash, chunk = PyBtcWallet().unpackNextEntry(rawdata)
+ if dtype>-1 and dtype<5:
+ return dtype, hash, chunk, [1, loc +1+20+237]
+ else:
+ rawdata.resetPosition(loc)
+ except:
+ LOGEXCEPT('')
+ rawdata.resetPosition(loc)
+
+ #try for addr comment: push 1 byte for the key, 20 for the hash160,
+ #2 for the N and N for the comment
+ try:
+ rawdata.advance(1)
+ hash160 = rawdata.get(BINARY_CHUNK, 20)
+ chunk_length = rawdata.get(UINT16)
+ chunk = rawdata.get(BINARY_CHUNK, chunk_length)
+
+ #test the next entry
+ dtype, hash, chunk2 = PyBtcWallet().unpackNextEntry(rawdata)
+ if dtype>-1 and dtype<5:
+ #good entry, return it
+ return 1, hash160, chunk, [1, loc]
+ else:
+ rawdata.resetPosition(loc)
+ except:
+ LOGEXCEPT('')
+ rawdata.resetPosition(loc)
+
+ #try for txn comment: push 1 byte for the key, 32 for the txnhash,
+ #2 for N, and N for the comment
+ try:
+ rawdata.advance(1)
+ hash256 = rawdata.get(BINARY_CHUNK, 32)
+ chunk_length = rawdata.get(UINT16)
+ chunk = rawdata.get(BINARY_CHUNK, chunk_length)
+
+ #test the next entry
+ dtype, hash, chunk2 = PyBtcWallet().unpackNextEntry(rawdata)
+ if dtype>-1 and dtype<5:
+ #good entry, return it
+ return 2, hash256, chunk, [1, loc]
+ else:
+ rawdata.resetPosition(loc)
+ except:
+ LOGEXCEPT('')
+ rawdata.resetPosition(loc)
+
+ #try for deleted entry: 1 byte for the key, 2 bytes for N, N bytes
+ #worth of 0s
+ try:
+ rawdata.advance(1)
+ chunk_length = rawdata.get(UINT16)
+ chunk = rawdata.get(BINARY_CHUNK, chunk_length)
+
+ #test the next entry
+ dtype, hash, chunk2 = PyBtcWallet().unpackNextEntry(rawdata)
+ if dtype>-1 and dtype<5:
+ baddata = 0
+ for i in len(chunk):
+ if i != 0:
+ baddata = 1
+ break
+
+ if baddata != 0:
+ return 4, None, chunk, [1, loc]
+
+ rawdata.resetPosition(loc)
+ except:
+ LOGEXCEPT('')
+ rawdata.resetPosition(loc)
+
+ #couldn't find any valid entries, push loc by 1 and try again
+ loc = loc +1
+ return self.LookForFurtherEntry(rawdata, loc)
+
+ ############################################################################
+ def addrEntry_unserialize_recover(self, toUnpack):
+ """
+ Unserialze a raw address entry, test all checksum carrying members
+
+ On errors, flags chksumError bits as follows:
+
+ bit 0: addrStr20 error
+
+ bit 1: private key error
+ bit 2: contains a valid private key even though containsPrivKey is 0
+
+ bit 3: iv error
+ bit 4: contains a valid iv even though useEncryption is 0
+
+ bit 5: pubkey error
+ bit 6: contains a valid pubkey even though containsPubKey is 0
+
+ bit 7: chaincode error
+ """
+
+ if isinstance(toUnpack, BinaryUnpacker):
+ serializedData = toUnpack
+ else:
+ serializedData = BinaryUnpacker( toUnpack )
+
+
+ def chkzero(a):
+ """
+ Due to fixed-width fields, we will get lots of zero-bytes
+ even when the binary data container was empty
+ """
+ if a.count('\x00')==len(a):
+ return ''
+ else:
+ return a
+
+ chksumError = 0
+
+ # Start with a fresh new address
+ retAddr = PyBtcAddress()
+
+ retAddr.addrStr20 = serializedData.get(BINARY_CHUNK, 20)
+ chkAddr20 = serializedData.get(BINARY_CHUNK, 4)
+
+ addrVerInt = serializedData.get(UINT32)
+ flags = serializedData.get(UINT64)
+ retAddr.addrStr20 = verifyChecksum(self.addrStr20, chkAddr20)
+ flags = int_to_bitset(flags, widthBytes=8)
+
+ # Interpret the flags
+ containsPrivKey = (flags[0]=='1')
+ containsPubKey = (flags[1]=='1')
+ retAddr.useEncryption = (flags[2]=='1')
+ retAddr.createPrivKeyNextUnlock = (flags[3]=='1')
+
+ if len(self.addrStr20)==0:
+ chksumError |= 1
+
+
+
+ # Write out address-chaining parameters (for deterministic wallets)
+ retAddr.chaincode = chkzero(serializedData.get(BINARY_CHUNK, 32))
+ chkChaincode = serializedData.get(BINARY_CHUNK, 4)
+ retAddr.chainIndex = serializedData.get(INT64)
+ depth = serializedData.get(INT64)
+ retAddr.createPrivKeyNextUnlock_ChainDepth = depth
+
+ # Correct errors, convert to secure container
+ retAddr.chaincode = SecureBinaryData(verifyChecksum(retAddr.chaincode, \
+ chkChaincode))
+ if retAddr.chaincode.getSize == 0:
+ chksumError |= 128
+
+
+ # Write out whatever is appropriate for private-key data
+ # Binary-unpacker will write all 0x00 bytes if empty values are given
+ iv = chkzero(serializedData.get(BINARY_CHUNK, 16))
+ chkIv = serializedData.get(BINARY_CHUNK, 4)
+ privKey = chkzero(serializedData.get(BINARY_CHUNK, 32))
+ chkPriv = serializedData.get(BINARY_CHUNK, 4)
+ iv = SecureBinaryData(verifyChecksum(iv, chkIv))
+ privKey = SecureBinaryData(verifyChecksum(privKey, chkPriv))
+
+
+ # If this is SUPPOSED to contain a private key...
+ if containsPrivKey:
+ if privKey.getSize()==0:
+ chksumError |= 2
+ containsPrivKey = 0
+ else:
+ if privKey.getSize()==32:
+ chksumError |= 4
+ containsPrivKey = 1
+
+ if retAddr.useEncryption:
+ if iv.getSize()==0:
+ chksumError |= 8
+ retAddr.useEncryption = 0
+ else:
+ if iv.getSize()==16:
+ chksumError |= 16
+ retAddr.useEncryption = 1
+
+ if retAddr.useEncryption:
+ if retAddr.createPrivKeyNextUnlock:
+ retAddr.createPrivKeyNextUnlock_IVandKey[0] = iv.copy()
+ retAddr.createPrivKeyNextUnlock_IVandKey[1] = privKey.copy()
+ else:
+ retAddr.binInitVect16 = iv.copy()
+ retAddr.binPrivKey32_Encr = privKey.copy()
+ else:
+ retAddr.binInitVect16 = iv.copy()
+ retAddr.binPrivKey32_Plain = privKey.copy()
+
+ pubKey = chkzero(serializedData.get(BINARY_CHUNK, 65))
+ chkPub = serializedData.get(BINARY_CHUNK, 4)
+ pubKey = SecureBinaryData(verifyChecksum(pubKey, chkPub))
+
+ if containsPubKey:
+ if not pubKey.getSize()==65:
+ chksumError |= 32
+ if retAddr.binPrivKey32_Plain.getSize()==32:
+ pubKey = CryptoECDSA().ComputePublicKey(
+ retAddr.binPrivKey32_Plain)
+ else:
+ if pubKey.getSize()==65:
+ chksumError |= 64
+
+ retAddr.binPublicKey65 = pubKey
+
+ retAddr.timeRange[0] = serializedData.get(UINT64)
+ retAddr.timeRange[1] = serializedData.get(UINT64)
+ retAddr.blkRange[0] = serializedData.get(UINT32)
+ retAddr.blkRange[1] = serializedData.get(UINT32)
+
+ retAddr.isInitialized = True
+
+ if (chksumError and 171) == 171:
+ raise InvalidEntry
+
+ if chksumError != 0:
+ #write out errors to the list
+ self.rawError.append(' Encountered checksum errors in follolwing \
+ address entry members:')
+
+ if chksumError and 1:
+ self.rawError.append(' - addrStr20')
+ if chksumError and 2:
+ self.rawError.append(' - private key')
+ if chksumError and 4:
+ self.rawError.append(' - hasPrivatKey flag')
+ if chksumError and 8:
+ self.rawError.append(' - Encryption IV')
+ if chksumError and 16:
+ self.rawError.append(' - useEncryption flag')
+ if chksumError and 32:
+ self.rawError.append(' - public key')
+ if chksumError and 64:
+ self.rawError.append(' - hasPublicKey flag')
+ if chksumError and 128:
+ self.rawError.append(' - chaincode')
+
+ return retAddr, chksumError
+
+ ############################################################################
+ def createNewWO(self, toRecover, newPath, rootAddr):
+ newWO = PyBtcWallet()
+
+ newWO.version = toRecover.version
+ newWO.magicBytes = toRecover.magicBytes
+ newWO.wltCreateDate = toRecover.wltCreateDate
+ newWO.uniqueIDBin = toRecover.uniqueIDBin
+ newWO.useEncryption = False
+ newWO.watchingOnly = True
+ newWO.walletPath = newPath
+
+ if toRecover.labelName:
+ newWO.labelName = toRecover.labelName[:32]
+ if toRecover.labelDescr:
+ newWO.labelDescr = toRecover.labelDescr[:256]
+
+
+ newAddr = rootAddr.copy()
+ newAddr.binPrivKey32_Encr = SecureBinaryData()
+ newAddr.binPrivKey32_Plain = SecureBinaryData()
+ newAddr.useEncryption = False
+ newAddr.createPrivKeyNextUnlock = False
+
+ newWO.addrMap['ROOT'] = newAddr
+ firstAddr = newAddr.extendAddressChain()
+ newWO.addrMap[firstAddr.getAddr160()] = firstAddr
+
+ newWO.lastComputedChainAddr160 = firstAddr.getAddr160()
+ newWO.lastComputedChainIndex = firstAddr.chainIndex
+ newWO.highestUsedChainIndex = toRecover.highestUsedChainIndex
+ newWO.cppWallet = BtcWallet()
+
+ newWO.writeFreshWalletFile(newPath)
+
+ return newWO
+
+ ############################################################################
+ def getValidKeyHMAC(self, Q):
+ nonce = 0
+ while True:
+ hmacQ = HMAC256(Q, 'LogMult%d' % nonce)
+ if binary_to_int(hmacQ, BIGENDIAN) >= SECP256K1_ORDER:
+ nonce += 1
+ continue
+
+ return SecureBinaryData(hmacQ)
+
+ ############################################################################
+ def getInvModOfHMAC(self, Q):
+ hmacQ = self.getValidKeyHMAC(Q)
+ return CryptoECDSA().InvMod(SecureBinaryData(hmacQ))
+
+
+###############################################################################
+def WalletConsistencyCheck(wallet, prgAt=None):
+ """
+ Checks consistency of non encrypted wallet data
+ Returns 0 if no error was found, otherwise a
+ string list of the scan full log
+ """
+
+ return PyBtcWalletRecovery().ProcessWallet(None, wallet, None,
+ RECOVERMODE.Check, prgAt, True)
+
+#############################################################################
+# We don't have access to the qtdefines:tr function, but we still want
+# the capability to print multi-line strings within the code. This simply
+# strips each line and then concatenates them together
+def tr_(s):
+ return ' '.join([line.strip() for line in s.split('\n')])
+
+#############################################################################
+@AllowAsync
+def FixWallet(wltPath, wlt, mode=RECOVERMODE.Full, DoNotMove=False,
+ Passphrase=None, Progress=emptyFunc):
+
+ '''
+ return code:
+ 0 - no wallet errors found, nothing to fix
+ 1 - errors found, wallet fixed
+ str - errors found, couldnt fix wallet, returning the error as a str
+ '''
+ fixer = PyBtcWalletRecovery()
+ frt = fixer.ProcessWallet(wltPath, wlt, Passphrase, mode, Progress=Progress)
+
+ # Shorten a bunch of statements
+ datestr = RightNowStr('%Y-%m-%d-%H%M')
+ if wlt:
+ homedir = os.path.dirname(wlt.walletPath)
+ wltID = wlt.uniqueIDB58
+ else:
+ homedir = os.path.dirname(wltPath)
+ wltID = fixer.UID
+
+
+ if frt == 0:
+ Progress(fixer.UIreport + fixer.EndLog)
+ return 0, 0, fixer
+
+ elif frt == 1 or (isinstance(frt, dict) and frt['nErrors'] != 0):
+ Progress(fixer.UIreport)
+
+ if DoNotMove:
+ Progress(fixer.UIreport + fixer.EndLog)
+ return 1, 0, fixer
+ else:
+ #move the old wallets and log files to another folder
+ corruptFolder = os.path.join(homedir, wltID, datestr)
+ if not os.path.exists(corruptFolder):
+ os.makedirs(corruptFolder)
+
+ logsToCopy = ['armorylog.txt', 'armorycpplog.txt', 'multipliers.txt']
+ wltCopyName = 'armory_%s_ORIGINAL_%s.wallet' % (wltID, '.watchonly')
+ wltLogName = 'armory_%s_LOGFILE_%s.log' % \
+ (wltID, '.watchonly' if fixer.WO else '')
+
+ corruptWltPath = os.path.join(corruptFolder, wltCopyName)
+ recoverLogPath = os.path.join(corruptFolder, wltLogName)
+
+ try:
+
+ if not fixer.WO:
+ #wallet has private keys, make a WO version and delete it
+ wlt.forkOnlineWallet(corruptWltPath, wlt.labelName,
+ wlt.labelDescr)
+ os.remove(wlt.walletPath)
+ else:
+ os.rename(wlt.walletPath, corruptWltPath)
+
+
+ if os.path.exists(fixer.LogPath):
+ os.rename(fixer.LogPath, os.path.join(corruptFolder,
+ wltLogName))
+
+ if os.path.exists(fixer.newwalletPath):
+ os.rename(fixer.newwalletPath, wlt.walletPath)
+
+ #remove backups
+ origBackup = getSuffixedPath(wlt.walletPath, 'backup')
+ if os.path.exists(origBackup):
+ os.remove(origBackup)
+
+ newBackup = getSuffixedPath(fixer.newwalletPath, 'backup')
+ if os.path.exists(newBackup):
+ os.remove(newBackup)
+
+ # Copy all the relevant log files
+ for fn in logsToCopy:
+ fullpath = os.path.join(homedir, fn)
+ if os.path.exists(fullpath):
+ shutil.copy(fullpath, corruptFolder)
+ else:
+ LOGERROR('Expected log file was not copied: %s', fn)
+
+
+ fixer.EndLog = ("""
+ Wallet analysis and restoration complete.
+ The inconsistent wallet and log files were moved to:
+ %s/
""") % corruptFolder
+
+ Progress(fixer.UIreport + fixer.EndLog)
+ return 1, corruptFolder, fixer
+
+ except Exception as e:
+ #failed to move files around, most likely a credential error
+ LOGEXCEPT(str(e))
+ errStr = ' An error occurred moving wallet files: %s' % e
+ Progress(fixer.UIreport + errStr)
+
+ return -1, fixer.UIreport + errSt, fixer
+ else:
+ Progress(fixer.UIreport + fixer.EndLog)
+ return -1, fixer.UIreport + fixer.EndLog, fixer
+
+###############################################################################
+@AllowAsync
+def FixWalletList(wallets, dlg, Progress=emptyFunc):
+
+ #It's the caller's responsibility to unload the wallets from his app
+
+ #fix the wallets
+ fixedWlt = []
+ wlterror = []
+ goodWallets = []
+ fixerObjs = []
+ logsSaved = []
+
+ for wlt in wallets:
+ if dlg:
+ status = [0]
+ dlg.sigSetNewProgress(status)
+ while not status[0]:
+ sleep(0.01)
+
+ wltStatus, extraData, recovObj = FixWallet( \
+ '', wlt, Passphrase=dlg.AskUnlock, Progress=Progress)
+
+
+ fixerObjs.append(recovObj)
+
+ if wltStatus == 0:
+ goodWallets.append(wlt.uniqueIDB58)
+ fixedWlt.append(wlt.walletPath)
+
+ elif wltStatus == 1:
+ fixedWlt.append(wlt.walletPath)
+ logsSaved.append([wlt.uniqueIDB58, extraData])
+ elif wltStatus == -1:
+ wlterror.append([wlt.uniqueIDB58, extraData])
+
+ if dlg:
+ dlg.setRecoveryDone(wlterror, goodWallets, fixedWlt, fixerObjs)
+
+ #load the new wallets
+ dlg.loadFixedWallets(fixedWlt)
+
+ else:
+ return wlterror
+
+###############################################################################
+'''
+Stand alone, one wallet a time, all purpose recovery call.
+Used with unloaded wallets or modes other than Full, and for armoryd
+If dlg is set, it will report to it (UI)
+If not, it will log the wallet status with LOGERROR and LOGINFO, and return the
+status code to the caller
+'''
+@AllowAsync
+def ParseWallet(wltPath, wlt, mode, dlg, Progress=emptyFunc):
+ fixedWlt = []
+ wlterror = []
+ goodWallets = []
+
+ wltStatus, extraData, recovObj = FixWallet(wltPath, wlt, mode, True,
+ Passphrase=dlg.AskUnlock,
+ Progress=Progress)
+ if wltStatus == 0:
+ goodWallets.append(1)
+ fixedWlt.append(1)
+
+ if dlg is None:
+ if wlt: LOGINFO('Wallet %s is consistent' % (wlt.uniqueIDB58))
+ elif wltPath: LOGINFO('Wallet %s is consistent' % (wltPath))
+
+ elif wltStatus == 1:
+ fixedWlt.append(1)
+
+ if dlg is None:
+ if wlt: LOGERROR('Wallet %s is inconsistent!!!' % (wlt.uniqueIDB58))
+ elif wltPath: LOGERROR('Wallet %s is inconsistent!!!' % (wltPath))
+
+ elif wltStatus == -1:
+ wlterror.append(extraData)
+
+ if dlg is None:
+ if wlt:
+ LOGERROR('Failed to perform consistency check on wallet %s!!!'\
+ % (wlt.uniqueIDB58))
+ elif wltPath:
+ LOGERROR('Failed to perform consistency check on wallet %s!!!'\
+ % (wltPath))
+
+ if dlg:
+ dlg.setRecoveryDone(wlterror, goodWallets, fixedWlt, [recovObj])
+ else:
+ return wltStatus
+
+###############################################################################
+
+"""
+TODO: setup an array of tests:
+2) broken header
+3) oversized comment entries
+4) comments for non existant addr or txn entries
+
+possible wallet corruption vectors:
+1) PyBtcAddress.unlock verifies consistency between private and public key, \
+ unless SkipCheck is forced to false and private key is already computed.
+ Look for this scenario
+"""
diff --git a/armoryengine/Script.py b/armoryengine/Script.py
new file mode 100644
index 000000000..bf9fdda2c
--- /dev/null
+++ b/armoryengine/Script.py
@@ -0,0 +1,669 @@
+################################################################################
+# #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
+# Distributed under the GNU Affero General Public License (AGPL v3) #
+# See LICENSE or http://www.gnu.org/licenses/agpl.html #
+# #
+################################################################################
+################################################################################
+#
+# SCRIPTING!
+#
+################################################################################
+from armoryengine.ArmoryUtils import *
+from armoryengine.BinaryPacker import UINT8, BINARY_CHUNK, UINT16, UINT32
+from armoryengine.BinaryUnpacker import BinaryUnpacker
+from armoryengine.Timer import TimeThisFunction
+from armoryengine.Transaction import *
+
+
+def convertScriptToOpStrings(binScript):
+ opList = []
+
+ i = 0;
+ sz = len(binScript)
+ error = False;
+ while i < sz:
+ nextOp = ord(binScript[i]);
+ if nextOp == 0:
+ opList.append("0")
+ i+=1
+ elif nextOp < 76:
+ opList.append('PUSHDATA(%s)' % str(nextOp))
+ binObj = binScript[i+1:i+1+nextOp]
+ opList.append('['+binary_to_hex(binObj)+']')
+ i += nextOp+1
+ elif nextOp == 76:
+ nb = binary_to_int(binScript[i+1:i+2])
+ if i+1+1+nb > sz:
+ error = True;
+ break
+ binObj = binScript[i+2:i+2+nb]
+ opList.append('OP_PUSHDATA1(%s)' % str(nb))
+ opList.append('['+binary_to_hex(binObj)+']')
+ i += nb+2
+ elif nextOp == 77:
+ nb = binary_to_int(binScript[i+1:i+3]);
+ if i+1+2+nb > sz:
+ error = True;
+ break
+ nbprint = min(nb,256)
+ binObj = binScript[i+3:i+3+nbprint]
+ opList.append('OP_PUSHDATA2(%s)' % str(nb))
+ opList.append('['+binary_to_hex(binObj)[:512] + '...]')
+ i += nb+3
+ elif nextOp == 78:
+ nb = binScript[i+1:i+5];
+ if i+1+4+nb > sz:
+ error = True;
+ break
+ nbprint = min(nb,256)
+ binObj = binScript[i+5,i+5+nbprint]
+ opList.append('[OP_PUSHDATA4(%s)]' % str(nb))
+ opList.append('['+binary_to_hex(binObj)[:512] + '...]')
+ i += nb+5
+ else:
+ opList.append(opnames[nextOp]);
+ i += 1
+
+ if error:
+ opList.append("ERROR PROCESSING SCRIPT");
+
+ return opList;
+
+
+def pprintScript(binScript, nIndent=0):
+ indstr = indent*nIndent
+ print indstr + 'Script:'
+ opList = convertScriptToOpStrings(binScript)
+ for op in opList:
+ print indstr + indent + op
+
+
+def serializeBytesWithPushData(binObj):
+ sz = len(binObj)
+ if sz <= 76:
+ lenByte = int_to_binary(sz, widthBytes=1)
+ return lenByte+binObj
+ elif sz <= 256:
+ lenByte = int_to_binary(sz, widthBytes=1)
+ return '\x4c' + lenByte + binObj
+ elif sz <= 65536:
+ lenBytes = int_to_binary(sz, widthBytes=2)
+ return '\x4d' + lenBytes + binObj
+ else:
+ InvalidScriptError('Cannot use PUSHDATA for len(obj)>65536')
+
+
+TX_INVALID = 0
+OP_NOT_IMPLEMENTED = 1
+OP_DISABLED = 2
+SCRIPT_STACK_SIZE_ERROR = 3
+SCRIPT_ERROR = 4
+SCRIPT_NO_ERROR = 5
+
+
+class PyScriptProcessor(object):
+ """
+ Use this class to evaluate a script. This method is more complicated
+ than some might expect, due to the fact that any OP_CHECKSIG or
+ OP_CHECKMULTISIG code requires the full transaction of the TxIn script
+ and also needs the TxOut script being spent. Since nearly every useful
+ script will have one of these operations, this class/method assumes
+ that all that data will be supplied.
+
+ To simply execute a script not requiring any crypto operations:
+
+ scriptIsValid = PyScriptProcessor().executeScript(binScript)
+ """
+
+ def __init__(self, txOldData=None, txNew=None, txInIndex=None):
+ self.stack = []
+ self.txNew = None
+ self.script1 = None
+ self.script2 = None
+ if txOldData and txNew and not txInIndex==None:
+ self.setTxObjects(txOldData, txNew, txInIndex)
+
+
+ def setTxObjects(self, txOldData, txNew, txInIndex):
+ """
+ The minimal amount of data necessary to evaluate a script that
+ has an signature check is the TxOut script that is being spent
+ and the entire Tx of the TxIn that is spending it. Thus, we
+ must supply at least the txOldScript, and a txNew with its
+ TxIn index (so we know which TxIn is spending that TxOut).
+ It is acceptable to pass in the full TxOut or the tx of the
+ TxOut instead of just the script itself.
+ """
+ self.txNew = PyTx().unserialize(txNew.serialize())
+ self.script1 = str(txNew.inputs[txInIndex].binScript) # copy
+ self.txInIndex = txInIndex
+ self.txOutIndex = txNew.inputs[txInIndex].outpoint.txOutIndex
+ self.txHash = txNew.inputs[txInIndex].outpoint.txHash
+
+ if isinstance(txOldData, PyTx):
+ if not self.txHash == hash256(txOldData.serialize()):
+ LOGERROR('*** Supplied incorrect pair of transactions!')
+ self.script2 = str(txOldData.outputs[self.txOutIndex].binScript)
+ elif isinstance(txOldData, PyTxOut):
+ self.script2 = str(txOldData.binScript)
+ elif isinstance(txOldData, str):
+ self.script2 = str(txOldData)
+
+ @TimeThisFunction
+ def verifyTransactionValid(self, txOldData=None, txNew=None, txInIndex=-1):
+ if txOldData and txNew and txInIndex != -1:
+ self.setTxObjects(txOldData, txNew, txInIndex)
+ else:
+ txOldData = self.script2
+ txNew = self.txNew
+ txInIndex = self.txInIndex
+
+ if self.script1==None or self.txNew==None:
+ raise VerifyScriptError, 'Cannot verify transactions, without setTxObjects call first!'
+
+ # Execute TxIn script first
+ self.stack = []
+ exitCode1 = self.executeScript(self.script1, self.stack)
+
+ if not exitCode1 == SCRIPT_NO_ERROR:
+ raise VerifyScriptError, ('First script failed! Exit Code: ' + str(exitCode1))
+
+ exitCode2 = self.executeScript(self.script2, self.stack)
+
+ if not exitCode2 == SCRIPT_NO_ERROR:
+ raise VerifyScriptError, ('Second script failed! Exit Code: ' + str(exitCode2))
+
+ return self.stack[-1]==1
+
+
+ def executeScript(self, binaryScript, stack=[]):
+ self.stack = stack
+ self.stackAlt = []
+ scriptData = BinaryUnpacker(binaryScript)
+ self.lastOpCodeSepPos = None
+
+ while scriptData.getRemainingSize() > 0:
+ opcode = scriptData.get(UINT8)
+ exitCode = self.executeOpCode(opcode, scriptData, self.stack, self.stackAlt)
+ if not exitCode == SCRIPT_NO_ERROR:
+ if exitCode==OP_NOT_IMPLEMENTED:
+ LOGERROR('***ERROR: OpCodes OP_IF, OP_NOTIF, OP_ELSE, OP_ENDIF,')
+ LOGERROR(' have not been implemented, yet. This script')
+ LOGERROR(' could not be evaluated.')
+ if exitCode==OP_DISABLED:
+ LOGERROR('***ERROR: This script included an op code that has been')
+ LOGERROR(' disabled for security reasons. Script eval')
+ LOGERROR(' failed.')
+ return exitCode
+
+ return SCRIPT_NO_ERROR
+
+
+ # Implementing this method exactly as in the client because it looks like
+ # there could be some subtleties with how it determines "true"
+ def castToBool(self, binData):
+ if isinstance(binData, int):
+ binData = int_to_binary(binData)
+
+ for i,byte in enumerate(binData):
+ if not ord(byte) == 0:
+ # This looks like it's assuming LE encoding (?)
+ if (i == len(binData)-1) and (byte==0x80):
+ return False
+ return True
+ return False
+
+
+ def checkSig(self,binSig, binPubKey, txOutScript, txInTx, txInIndex, lastOpCodeSep=None):
+ """
+ Generic method for checking Bitcoin tx signatures. This needs to be used for both
+ OP_CHECKSIG and OP_CHECKMULTISIG. Step 1 is to pop signature and public key off
+ the stack, which must be done outside this method and passed in through the argument
+ list. The remaining steps do not require access to the stack.
+ """
+
+ # 2. Subscript is from latest OP_CODESEPARATOR until end... if DNE, use whole script
+ subscript = txOutScript
+ if lastOpCodeSep:
+ subscript = subscript[lastOpCodeSep:]
+
+ # 3. Signature is deleted from subscript
+ # I'm not sure why this line is necessary - maybe for non-standard scripts?
+ lengthInBinary = int_to_binary(len(binSig))
+ subscript = subscript.replace( lengthInBinary + binSig, "")
+
+ # 4. Hashtype is popped and stored
+ hashtype = binary_to_int(binSig[-1])
+ justSig = binSig[:-1]
+
+ if not hashtype == 1:
+ LOGERROR('Non-unity hashtypes not implemented yet! (hashtype = %d)', hashtype)
+ assert(False)
+
+ # 5. Make a copy of the transaction -- we will be hashing a modified version
+ txCopy = PyTx().unserialize( txInTx.serialize() )
+
+ # 6. Remove all OP_CODESEPARATORs
+ subscript.replace( int_to_binary(OP_CODESEPARATOR), '')
+
+ # 7. All the TxIn scripts in the copy are blanked (set to empty string)
+ for txin in txCopy.inputs:
+ txin.binScript = ''
+
+ # 8. Script for the current input in the copy is set to subscript
+ txCopy.inputs[txInIndex].binScript = subscript
+
+ # 9. Prepare the signature and public key
+ senderAddr = PyBtcAddress().createFromPublicKey(binPubKey)
+ binHashCode = int_to_binary(hashtype, widthBytes=4)
+ toHash = txCopy.serialize() + binHashCode
+
+ # Hashes are computed as part of CppBlockUtils::CryptoECDSA methods
+ ##hashToVerify = hash256(toHash)
+ ##hashToVerify = binary_switchEndian(hashToVerify)
+
+ # 10. Apply ECDSA signature verification
+ if senderAddr.verifyDERSignature(toHash, justSig):
+ return True
+ else:
+ return False
+
+
+
+
+ def executeOpCode(self, opcode, scriptUnpacker, stack, stackAlt):
+ """
+ Executes the next OP_CODE given the current state of the stack(s)
+ """
+
+ # TODO: Gavin clarified the effects of OP_0, and OP_1-OP_16.
+ # OP_0 puts an empty string onto the stack, which evaluateses to
+ # false and is plugged into HASH160 as ''
+ # OP_X puts a single byte onto the stack, 0x01 to 0x10
+ #
+ # I haven't implemented it this way yet, because I'm still missing
+ # some details. Since this "works" for available scripts, I'm going
+ # to leave it alone for now.
+
+ ##########################################################################
+ ##########################################################################
+ ### This block produces very nice debugging output for script eval!
+ #def pr(s):
+ #if isinstance(s,int):
+ #return str(s)
+ #elif isinstance(s,str):
+ #if len(s)>8:
+ #return binary_to_hex(s)[:8]
+ #else:
+ #return binary_to_hex(s)
+
+ #print ' '.join([pr(i) for i in stack])
+ #print opnames[opcode][:12].ljust(12,' ') + ':',
+ ##########################################################################
+ ##########################################################################
+
+
+ stackSizeAtLeast = lambda n: (len(self.stack) >= n)
+
+ if opcode == OP_FALSE:
+ stack.append(0)
+ elif 0 < opcode < 76:
+ stack.append(scriptUnpacker.get(BINARY_CHUNK, opcode))
+ elif opcode == OP_PUSHDATA1:
+ nBytes = scriptUnpacker.get(UINT8)
+ stack.append(scriptUnpacker.get(BINARY_CHUNK, nBytes))
+ elif opcode == OP_PUSHDATA2:
+ nBytes = scriptUnpacker.get(UINT16)
+ stack.append(scriptUnpacker.get(BINARY_CHUNK, nBytes))
+ elif opcode == OP_PUSHDATA4:
+ nBytes = scriptUnpacker.get(UINT32)
+ stack.append(scriptUnpacker.get(BINARY_CHUNK, nBytes))
+ elif opcode == OP_1NEGATE:
+ stack.append(-1)
+ elif opcode == OP_TRUE:
+ stack.append(1)
+ elif 81 < opcode < 97:
+ stack.append(opcode-80)
+ elif opcode == OP_NOP:
+ pass
+
+ # TODO: figure out the conditional op codes...
+ elif opcode == OP_IF:
+ return OP_NOT_IMPLEMENTED
+ elif opcode == OP_NOTIF:
+ return OP_NOT_IMPLEMENTED
+ elif opcode == OP_ELSE:
+ return OP_NOT_IMPLEMENTED
+ elif opcode == OP_ENDIF:
+ return OP_NOT_IMPLEMENTED
+
+ elif opcode == OP_VERIFY:
+ if not self.castToBool(stack.pop()):
+ stack.append(0)
+ return TX_INVALID
+ elif opcode == OP_RETURN:
+ return TX_INVALID
+ elif opcode == OP_TOALTSTACK:
+ stackAlt.append( stack.pop() )
+ elif opcode == OP_FROMALTSTACK:
+ stack.append( stackAlt.pop() )
+
+ elif opcode == OP_IFDUP:
+ # Looks like this method duplicates the top item if it's not zero
+ if not stackSizeAtLeast(1): return SCRIPT_STACK_SIZE_ERROR
+ if self.castToBool(stack[-1]):
+ stack.append(stack[-1]);
+
+ elif opcode == OP_DEPTH:
+ stack.append( len(stack) )
+ elif opcode == OP_DROP:
+ stack.pop()
+ elif opcode == OP_DUP:
+ stack.append( stack[-1] )
+ elif opcode == OP_NIP:
+ if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
+ del stack[-2]
+ elif opcode == OP_OVER:
+ if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
+ stack.append(stack[-2])
+ elif opcode == OP_PICK:
+ n = stack.pop()
+ if not stackSizeAtLeast(n): return SCRIPT_STACK_SIZE_ERROR
+ stack.append(stack[-n])
+ elif opcode == OP_ROLL:
+ n = stack.pop()
+ if not stackSizeAtLeast(n): return SCRIPT_STACK_SIZE_ERROR
+ stack.append(stack[-(n+1)])
+ del stack[-(n+2)]
+ elif opcode == OP_ROT:
+ if not stackSizeAtLeast(3): return SCRIPT_STACK_SIZE_ERROR
+ stack.append( stack[-3] )
+ del stack[-4]
+ elif opcode == OP_SWAP:
+ if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
+ x2 = stack.pop()
+ x1 = stack.pop()
+ stack.extend([x2, x1])
+ elif opcode == OP_TUCK:
+ if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
+ x2 = stack.pop()
+ x1 = stack.pop()
+ stack.extend([x2, x1, x2])
+ elif opcode == OP_2DROP:
+ if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
+ stack.pop()
+ stack.pop()
+ elif opcode == OP_2DUP:
+ if not stackSizeAtLeast(2): return SCRIPT_STACK_SIZE_ERROR
+ stack.append( stack[-2] )
+ stack.append( stack[-2] )
+ elif opcode == OP_3DUP:
+ if not stackSizeAtLeast(3): return SCRIPT_STACK_SIZE_ERROR
+ stack.append( stack[-3] )
+ stack.append( stack[-3] )
+ stack.append( stack[-3] )
+ elif opcode == OP_2OVER:
+ if not stackSizeAtLeast(4): return SCRIPT_STACK_SIZE_ERROR
+ stack.append( stack[-4] )
+ stack.append( stack[-4] )
+ elif opcode == OP_2ROT:
+ if not stackSizeAtLeast(6): return SCRIPT_STACK_SIZE_ERROR
+ stack.append( stack[-6] )
+ stack.append( stack[-6] )
+ elif opcode == OP_2SWAP:
+ if not stackSizeAtLeast(4): return SCRIPT_STACK_SIZE_ERROR
+ x4 = stack.pop()
+ x3 = stack.pop()
+ x2 = stack.pop()
+ x1 = stack.pop()
+ stack.extend( [x3, x4, x1, x2] )
+ elif opcode == OP_CAT:
+ return OP_DISABLED
+ elif opcode == OP_SUBSTR:
+ return OP_DISABLED
+ elif opcode == OP_LEFT:
+ return OP_DISABLED
+ elif opcode == OP_RIGHT:
+ return OP_DISABLED
+ elif opcode == OP_SIZE:
+ if isinstance(stack[-1], int):
+ stack.append(0)
+ else:
+ stack.append( len(stack[-1]) )
+ elif opcode == OP_INVERT:
+ return OP_DISABLED
+ elif opcode == OP_AND:
+ return OP_DISABLED
+ elif opcode == OP_OR:
+ return OP_DISABLED
+ elif opcode == OP_XOR:
+ return OP_DISABLED
+ elif opcode == OP_EQUAL:
+ x1 = stack.pop()
+ x2 = stack.pop()
+ stack.append( 1 if x1==x2 else 0 )
+ elif opcode == OP_EQUALVERIFY:
+ x1 = stack.pop()
+ x2 = stack.pop()
+ if not x1==x2:
+ stack.append(0)
+ return TX_INVALID
+
+
+ elif opcode == OP_1ADD:
+ stack[-1] += 1
+ elif opcode == OP_1SUB:
+ stack[-1] -= 1
+ elif opcode == OP_2MUL:
+ stack[-1] *= 2
+ return OP_DISABLED
+ elif opcode == OP_2DIV:
+ stack[-1] /= 2
+ return OP_DISABLED
+ elif opcode == OP_NEGATE:
+ stack[-1] *= -1
+ elif opcode == OP_ABS:
+ stack[-1] = abs(stack[-1])
+ elif opcode == OP_NOT:
+ top = stack.pop()
+ if top==0:
+ stack.append(1)
+ else:
+ stack.append(0)
+ elif opcode == OP_0NOTEQUAL:
+ top = stack.pop()
+ if top==0:
+ stack.append(0)
+ else:
+ stack.append(1)
+ top = stack.pop()
+ if top==0:
+ stack.append(1)
+ else:
+ stack.append(0)
+ elif opcode == OP_ADD:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append(a+b)
+ elif opcode == OP_SUB:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append(a-b)
+ elif opcode == OP_MUL:
+ return OP_DISABLED
+ elif opcode == OP_DIV:
+ return OP_DISABLED
+ elif opcode == OP_MOD:
+ return OP_DISABLED
+ elif opcode == OP_LSHIFT:
+ return OP_DISABLED
+ elif opcode == OP_RSHIFT:
+ return OP_DISABLED
+ elif opcode == OP_BOOLAND:
+ b = stack.pop()
+ a = stack.pop()
+ if (not a==0) and (not b==0):
+ stack.append(1)
+ else:
+ stack.append(0)
+ elif opcode == OP_BOOLOR:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append( 1 if (self.castToBool(a) or self.castToBool(b)) else 0 )
+ elif opcode == OP_NUMEQUAL:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append( 1 if a==b else 0 )
+ elif opcode == OP_NUMEQUALVERIFY:
+ b = stack.pop()
+ a = stack.pop()
+ if not a==b:
+ stack.append(0)
+ return TX_INVALID
+ elif opcode == OP_NUMNOTEQUAL:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append( 1 if not a==b else 0 )
+ elif opcode == OP_LESSTHAN:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append( 1 if ab else 0)
+ elif opcode == OP_LESSTHANOREQUAL:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append( 1 if a<=b else 0)
+ elif opcode == OP_GREATERTHANOREQUAL:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append( 1 if a>=b else 0)
+ elif opcode == OP_MIN:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append( min(a,b) )
+ elif opcode == OP_MAX:
+ b = stack.pop()
+ a = stack.pop()
+ stack.append( max(a,b) )
+ elif opcode == OP_WITHIN:
+ xmax = stack.pop()
+ xmin = stack.pop()
+ x = stack.pop()
+ stack.append( 1 if (xmin <= x < xmax) else 0 )
+
+ elif opcode == OP_RIPEMD160:
+ bits = stack.pop()
+ stack.append( ripemd160(bits) )
+ elif opcode == OP_SHA1:
+ bits = stack.pop()
+ stack.append( sha1(bits) )
+ elif opcode == OP_SHA256:
+ bits = stack.pop()
+ stack.append( sha256(bits) )
+ elif opcode == OP_HASH160:
+ bits = stack.pop()
+ if isinstance(bits, int):
+ bits = ''
+ stack.append( hash160(bits) )
+ elif opcode == OP_HASH256:
+ bits = stack.pop()
+ if isinstance(bits, int):
+ bits = ''
+ stack.append( sha256(sha256(bits) ) )
+ elif opcode == OP_CODESEPARATOR:
+ self.lastOpCodeSepPos = scriptUnpacker.getPosition()
+ elif opcode == OP_CHECKSIG or opcode == OP_CHECKSIGVERIFY:
+
+ # 1. Pop key and sig from the stack
+ binPubKey = stack.pop()
+ binSig = stack.pop()
+
+ # 2-10. encapsulated in sep method so CheckMultiSig can use it too
+ txIsValid = self.checkSig( binSig, \
+ binPubKey, \
+ scriptUnpacker.getBinaryString(), \
+ self.txNew, \
+ self.txInIndex, \
+ self.lastOpCodeSepPos)
+ stack.append(1 if txIsValid else 0)
+ if opcode==OP_CHECKSIGVERIFY:
+ verifyCode = self.executeOpCode(OP_VERIFY)
+ if verifyCode == TX_INVALID:
+ return TX_INVALID
+
+ elif opcode == OP_CHECKMULTISIG or opcode == OP_CHECKMULTISIGVERIFY:
+ # OP_CHECKMULTISIG procedure ported directly from Satoshi client code
+ # Location: bitcoin-0.4.0-linux/src/src/script.cpp:775
+ i=1
+ if len(stack) < i:
+ return TX_INVALID
+
+ nKeys = int(stack[-i])
+ if nKeys < 0 or nKeys > 20:
+ return TX_INVALID
+
+ i += 1
+ iKey = i
+ i += nKeys
+ if len(stack) < i:
+ return TX_INVALID
+
+ nSigs = int(stack[-i])
+ if nSigs < 0 or nSigs > nKeys:
+ return TX_INVALID
+
+ iSig = i
+ i += 1
+ i += nSigs
+ if len(stack) < i:
+ return TX_INVALID
+
+ stack.pop()
+
+ # Apply the ECDSA verification to each of the supplied Sig-Key-pairs
+ enoughSigsMatch = True
+ while enoughSigsMatch and nSigs > 0:
+ binSig = stack[-iSig]
+ binKey = stack[-iKey]
+
+ if( self.checkSig(binSig, \
+ binKey, \
+ scriptUnpacker.getBinaryString(), \
+ self.txNew, \
+ self.txInIndex, \
+ self.lastOpCodeSepPos) ):
+ iSig += 1
+ nSigs -= 1
+
+ iKey +=1
+ nKeys -=1
+
+ if(nSigs > nKeys):
+ enoughSigsMatch = False
+
+ # Now pop the things off the stack, we only accessed in-place before
+ while i > 1:
+ i -= 1
+ stack.pop()
+
+
+ stack.append(1 if enoughSigsMatch else 0)
+
+ if opcode==OP_CHECKMULTISIGVERIFY:
+ verifyCode = self.executeOpCode(OP_VERIFY)
+ if verifyCode == TX_INVALID:
+ return TX_INVALID
+
+ else:
+ return SCRIPT_ERROR
+
+ return SCRIPT_NO_ERROR
+
+
+# Putting this at the end because of the circular dependency
+from armoryengine.PyBtcAddress import PyBtcAddress
diff --git a/armoryengine/Timer.py b/armoryengine/Timer.py
new file mode 100644
index 000000000..b3843115b
--- /dev/null
+++ b/armoryengine/Timer.py
@@ -0,0 +1,102 @@
+################################################################################
+#
+# Copyright (C) 2011-2014, Armory Technologies, Inc.
+# Distributed under the GNU Affero General Public License (AGPL v3)
+# See LICENSE or http://www.gnu.org/licenses/agpl.html
+#
+################################################################################
+#
+# Project: Armory
+# Author: Alan Reiner
+# Website: www.bitcoinarmory.com
+# Orig Date: 20 November, 2011
+#
+################################################################################
+from armoryengine.ArmoryUtils import LOGWARN, RightNow, LOGERROR
+
+class Timer(object):
+
+ ################################################################################
+ #
+ # Keep track of lots of different timers:
+ #
+ # Key: timerName
+ # Value: [cumulTime, numStart, lastStart, isRunning]
+ #
+ timerMap = {}
+
+ def startTimer(self, timerName):
+ if not self.timerMap.has_key(timerName):
+ self.timerMap[timerName] = [0, 0, 0, False]
+ timerEntry = self.timerMap[timerName]
+ timerEntry[1] += 1
+ timerEntry[2] = RightNow()
+ timerEntry[3] = True
+
+ def stopTimer(self, timerName):
+ if not self.timerMap.has_key(timerName):
+ LOGWARN('Requested stop timer that does not exist! (%s)' % timerName)
+ return
+ if not self.timerMap[timerName][3]:
+ LOGWARN('Requested stop timer that is not running! (%s)' % timerName)
+ return
+ timerEntry = self.timerMap[timerName]
+ timerEntry[0] += RightNow() - timerEntry[2]
+ timerEntry[2] = 0
+ timerEntry[3] = False
+
+ def resetTimer(self, timerName):
+ if not self.timerMap.has_key(timerName):
+ LOGERROR('Requested reset timer that does not exist! (%s)' % timerName)
+ # Even if it didn't exist, it will be created now
+ self.timerMap[timerName] = [0, 0, 0, False]
+
+ def readTimer(self, timerName):
+ if not self.timerMap.has_key(timerName):
+ LOGERROR('Requested read timer that does not exist! (%s)' % timerName)
+ return
+ timerEntry = self.timerMap[timerName]
+ return timerEntry[0] + (RightNow() - timerEntry[2])
+
+ def printTimings(self):
+ print 'Timings: '.ljust(30),
+ print 'nCall'.rjust(13),
+ print 'cumulTime'.rjust(13),
+ print 'avgTime'.rjust(13)
+ print '-'*70
+ for tname,quad in self.timerMap.iteritems():
+ print ('%s' % tname).ljust(30),
+ print ('%d' % quad[1]).rjust(13),
+ print ('%0.6f' % quad[0]).rjust(13),
+ avg = quad[0]/quad[1]
+ print ('%0.6f' % avg).rjust(13)
+ print '-'*70
+
+ def saveTimingsCSV(self, fname):
+ f = open(fname, 'w')
+ f.write( 'TimerName,')
+ f.write( 'nCall,')
+ f.write( 'cumulTime,')
+ f.write( 'avgTime\n\n')
+ for tname,quad in self.timerMap.iteritems():
+ f.write('%s,' % tname)
+ f.write('%d,' % quad[1])
+ f.write('%0.6f,' % quad[0])
+ avg = quad[0]/quad[1]
+ f.write('%0.6f\n' % avg)
+ f.write('\n\nNote: timings may be incorrect if errors '
+ 'were triggered in the timed functions')
+ print 'Saved timings to file: %s' % fname
+
+ def __init__(selfparams): # @NoSelf
+ pass
+
+
+def TimeThisFunction(func):
+ timer = Timer()
+ def inner(*args, **kwargs):
+ timer.startTimer(func.__name__)
+ ret = func(*args, **kwargs)
+ timer.stopTimer(func.__name__)
+ return ret
+ return inner
diff --git a/armoryengine/Transaction.py b/armoryengine/Transaction.py
new file mode 100644
index 000000000..8b22d5bf0
--- /dev/null
+++ b/armoryengine/Transaction.py
@@ -0,0 +1,1661 @@
+################################################################################
+# #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
+# Distributed under the GNU Affero General Public License (AGPL v3) #
+# See LICENSE or http://www.gnu.org/licenses/agpl.html #
+# #
+################################################################################
+import logging
+import os
+
+import CppBlockUtils as Cpp
+from armoryengine.ArmoryUtils import *
+from armoryengine.BinaryPacker import *
+from armoryengine.BinaryUnpacker import *
+
+################################################################################
+# Identify all the codes/strings that are needed for dealing with scripts
+################################################################################
+# Start list of OP codes
+OP_0 = 0
+OP_FALSE = 0
+OP_PUSHDATA1 = 76
+OP_PUSHDATA2 = 77
+OP_PUSHDATA4 = 78
+OP_1NEGATE = 79
+OP_1 = 81
+OP_TRUE = 81
+OP_2 = 82
+OP_3 = 83
+OP_4 = 84
+OP_5 = 85
+OP_6 = 86
+OP_7 = 87
+OP_8 = 88
+OP_9 = 89
+OP_10 = 90
+OP_11 = 91
+OP_12 = 92
+OP_13 = 93
+OP_14 = 94
+OP_15 = 95
+OP_16 = 96
+OP_NOP = 97
+OP_IF = 99
+OP_NOTIF = 100
+OP_ELSE = 103
+OP_ENDIF = 104
+OP_VERIFY = 105
+OP_RETURN = 106
+OP_TOALTSTACK = 107
+OP_FROMALTSTACK = 108
+OP_IFDUP = 115
+OP_DEPTH = 116
+OP_DROP = 117
+OP_DUP = 118
+OP_NIP = 119
+OP_OVER = 120
+OP_PICK = 121
+OP_ROLL = 122
+OP_ROT = 123
+OP_SWAP = 124
+OP_TUCK = 125
+OP_2DROP = 109
+OP_2DUP = 110
+OP_3DUP = 111
+OP_2OVER = 112
+OP_2ROT = 113
+OP_2SWAP = 114
+OP_CAT = 126
+OP_SUBSTR = 127
+OP_LEFT = 128
+OP_RIGHT = 129
+OP_SIZE = 130
+OP_INVERT = 131
+OP_AND = 132
+OP_OR = 133
+OP_XOR = 134
+OP_EQUAL = 135
+OP_EQUALVERIFY = 136
+OP_1ADD = 139
+OP_1SUB = 140
+OP_2MUL = 141
+OP_2DIV = 142
+OP_NEGATE = 143
+OP_ABS = 144
+OP_NOT = 145
+OP_0NOTEQUAL = 146
+OP_ADD = 147
+OP_SUB = 148
+OP_MUL = 149
+OP_DIV = 150
+OP_MOD = 151
+OP_LSHIFT = 152
+OP_RSHIFT = 153
+OP_BOOLAND = 154
+OP_BOOLOR = 155
+OP_NUMEQUAL = 156
+OP_NUMEQUALVERIFY = 157
+OP_NUMNOTEQUAL = 158
+OP_LESSTHAN = 159
+OP_GREATERTHAN = 160
+OP_LESSTHANOREQUAL = 161
+OP_GREATERTHANOREQUAL = 162
+OP_MIN = 163
+OP_MAX = 164
+OP_WITHIN = 165
+OP_RIPEMD160 = 166
+OP_SHA1 = 167
+OP_SHA256 = 168
+OP_HASH160 = 169
+OP_HASH256 = 170
+OP_CODESEPARATOR = 171
+OP_CHECKSIG = 172
+OP_CHECKSIGVERIFY = 173
+OP_CHECKMULTISIG = 174
+OP_CHECKMULTISIGVERIFY = 175
+
+opnames = ['']*256
+opnames[0] = 'OP_0'
+for i in range(1,76):
+ opnames[i] ='OP_PUSHDATA'
+opnames[76] = 'OP_PUSHDATA1'
+opnames[77] = 'OP_PUSHDATA2'
+opnames[78] = 'OP_PUSHDATA4'
+opnames[79] = 'OP_1NEGATE'
+opnames[81] = 'OP_1'
+opnames[81] = 'OP_TRUE'
+for i in range(1,17):
+ opnames[80+i] = 'OP_' + str(i)
+opnames[97] = 'OP_NOP'
+opnames[99] = 'OP_IF'
+opnames[100] = 'OP_NOTIF'
+opnames[103] = 'OP_ELSE'
+opnames[104] = 'OP_ENDIF'
+opnames[105] = 'OP_VERIFY'
+opnames[106] = 'OP_RETURN'
+opnames[107] = 'OP_TOALTSTACK'
+opnames[108] = 'OP_FROMALTSTACK'
+opnames[115] = 'OP_IFDUP'
+opnames[116] = 'OP_DEPTH'
+opnames[117] = 'OP_DROP'
+opnames[118] = 'OP_DUP'
+opnames[119] = 'OP_NIP'
+opnames[120] = 'OP_OVER'
+opnames[121] = 'OP_PICK'
+opnames[122] = 'OP_ROLL'
+opnames[123] = 'OP_ROT'
+opnames[124] = 'OP_SWAP'
+opnames[125] = 'OP_TUCK'
+opnames[109] = 'OP_2DROP'
+opnames[110] = 'OP_2DUP'
+opnames[111] = 'OP_3DUP'
+opnames[112] = 'OP_2OVER'
+opnames[113] = 'OP_2ROT'
+opnames[114] = 'OP_2SWAP'
+opnames[126] = 'OP_CAT'
+opnames[127] = 'OP_SUBSTR'
+opnames[128] = 'OP_LEFT'
+opnames[129] = 'OP_RIGHT'
+opnames[130] = 'OP_SIZE'
+opnames[131] = 'OP_INVERT'
+opnames[132] = 'OP_AND'
+opnames[133] = 'OP_OR'
+opnames[134] = 'OP_XOR'
+opnames[135] = 'OP_EQUAL'
+opnames[136] = 'OP_EQUALVERIFY'
+opnames[139] = 'OP_1ADD'
+opnames[140] = 'OP_1SUB'
+opnames[141] = 'OP_2MUL'
+opnames[142] = 'OP_2DIV'
+opnames[143] = 'OP_NEGATE'
+opnames[144] = 'OP_ABS'
+opnames[145] = 'OP_NOT'
+opnames[146] = 'OP_0NOTEQUAL'
+opnames[147] = 'OP_ADD'
+opnames[148] = 'OP_SUB'
+opnames[149] = 'OP_MUL'
+opnames[150] = 'OP_DIV'
+opnames[151] = 'OP_MOD'
+opnames[152] = 'OP_LSHIFT'
+opnames[153] = 'OP_RSHIFT'
+opnames[154] = 'OP_BOOLAND'
+opnames[155] = 'OP_BOOLOR'
+opnames[156] = 'OP_NUMEQUAL'
+opnames[157] = 'OP_NUMEQUALVERIFY'
+opnames[158] = 'OP_NUMNOTEQUAL'
+opnames[159] = 'OP_LESSTHAN'
+opnames[160] = 'OP_GREATERTHAN'
+opnames[161] = 'OP_LESSTHANOREQUAL'
+opnames[162] = 'OP_GREATERTHANOREQUAL'
+opnames[163] = 'OP_MIN'
+opnames[164] = 'OP_MAX'
+opnames[165] = 'OP_WITHIN'
+opnames[166] = 'OP_RIPEMD160'
+opnames[167] = 'OP_SHA1'
+opnames[168] = 'OP_SHA256'
+opnames[169] = 'OP_HASH160'
+opnames[170] = 'OP_HASH256'
+opnames[171] = 'OP_CODESEPARATOR'
+opnames[172] = 'OP_CHECKSIG'
+opnames[173] = 'OP_CHECKSIGVERIFY'
+opnames[174] = 'OP_CHECKMULTISIG'
+opnames[175] = 'OP_CHECKMULTISIGVERIFY'
+
+
+opCodeLookup = {}
+opCodeLookup['OP_FALSE'] = 0
+opCodeLookup['OP_PUSHDATA1'] = 76
+opCodeLookup['OP_PUSHDATA2'] = 77
+opCodeLookup['OP_PUSHDATA4'] = 78
+opCodeLookup['OP_1NEGATE'] = 79
+opCodeLookup['OP_1'] = 81
+for i in range(1,17):
+ opCodeLookup['OP_'+str(i)] = 80+i
+opCodeLookup['OP_TRUE'] = 81
+opCodeLookup['OP_NOP'] = 97
+opCodeLookup['OP_IF'] = 99
+opCodeLookup['OP_NOTIF'] = 100
+opCodeLookup['OP_ELSE'] = 103
+opCodeLookup['OP_ENDIF'] = 104
+opCodeLookup['OP_VERIFY'] = 105
+opCodeLookup['OP_RETURN'] = 106
+opCodeLookup['OP_TOALTSTACK'] = 107
+opCodeLookup['OP_FROMALTSTACK'] = 108
+opCodeLookup['OP_IFDUP'] = 115
+opCodeLookup['OP_DEPTH'] = 116
+opCodeLookup['OP_DROP'] = 117
+opCodeLookup['OP_DUP'] = 118
+opCodeLookup['OP_NIP'] = 119
+opCodeLookup['OP_OVER'] = 120
+opCodeLookup['OP_PICK'] = 121
+opCodeLookup['OP_ROLL'] = 122
+opCodeLookup['OP_ROT'] = 123
+opCodeLookup['OP_SWAP'] = 124
+opCodeLookup['OP_TUCK'] = 125
+opCodeLookup['OP_2DROP'] = 109
+opCodeLookup['OP_2DUP'] = 110
+opCodeLookup['OP_3DUP'] = 111
+opCodeLookup['OP_2OVER'] = 112
+opCodeLookup['OP_2ROT'] = 113
+opCodeLookup['OP_2SWAP'] = 114
+opCodeLookup['OP_CAT'] = 126
+opCodeLookup['OP_SUBSTR'] = 127
+opCodeLookup['OP_LEFT'] = 128
+opCodeLookup['OP_RIGHT'] = 129
+opCodeLookup['OP_SIZE'] = 130
+opCodeLookup['OP_INVERT'] = 131
+opCodeLookup['OP_AND'] = 132
+opCodeLookup['OP_OR'] = 133
+opCodeLookup['OP_XOR'] = 134
+opCodeLookup['OP_EQUAL'] = 135
+opCodeLookup['OP_EQUALVERIFY'] = 136
+opCodeLookup['OP_1ADD'] = 139
+opCodeLookup['OP_1SUB'] = 140
+opCodeLookup['OP_2MUL'] = 141
+opCodeLookup['OP_2DIV'] = 142
+opCodeLookup['OP_NEGATE'] = 143
+opCodeLookup['OP_ABS'] = 144
+opCodeLookup['OP_NOT'] = 145
+opCodeLookup['OP_0NOTEQUAL'] = 146
+opCodeLookup['OP_ADD'] = 147
+opCodeLookup['OP_SUB'] = 148
+opCodeLookup['OP_MUL'] = 149
+opCodeLookup['OP_DIV'] = 150
+opCodeLookup['OP_MOD'] = 151
+opCodeLookup['OP_LSHIFT'] = 152
+opCodeLookup['OP_RSHIFT'] = 153
+opCodeLookup['OP_BOOLAND'] = 154
+opCodeLookup['OP_BOOLOR'] = 155
+opCodeLookup['OP_NUMEQUAL'] = 156
+opCodeLookup['OP_NUMEQUALVERIFY'] = 157
+opCodeLookup['OP_NUMNOTEQUAL'] = 158
+opCodeLookup['OP_LESSTHAN'] = 159
+opCodeLookup['OP_GREATERTHAN'] = 160
+opCodeLookup['OP_LESSTHANOREQUAL'] = 161
+opCodeLookup['OP_GREATERTHANOREQUAL'] = 162
+opCodeLookup['OP_MIN'] = 163
+opCodeLookup['OP_MAX'] = 164
+opCodeLookup['OP_WITHIN'] = 165
+opCodeLookup['OP_RIPEMD160'] = 166
+opCodeLookup['OP_SHA1'] = 167
+opCodeLookup['OP_SHA256'] = 168
+opCodeLookup['OP_HASH160'] = 169
+opCodeLookup['OP_HASH256'] = 170
+opCodeLookup['OP_CODESEPARATOR'] = 171
+opCodeLookup['OP_CHECKSIG'] = 172
+opCodeLookup['OP_CHECKSIGVERIFY'] = 173
+opCodeLookup['OP_CHECKMULTISIG'] = 174
+opCodeLookup['OP_CHECKMULTISIGVERIFY'] = 175
+#Word Opcode Description
+#OP_PUBKEYHASH = 253 Represents a public key hashed with OP_HASH160.
+#OP_PUBKEY = 254 Represents a public key compatible with OP_CHECKSIG.
+#OP_INVALIDOPCODE = 255 Matches any opcode that is not yet assigned.
+#[edit] Reserved words
+#Any opcode not assigned is also reserved. Using an unassigned opcode makes the transaction invalid.
+#Word Opcode When used...
+#OP_RESERVED = 80 Transaction is invalid
+#OP_VER = 98 Transaction is invalid
+#OP_VERIF = 101 Transaction is invalid
+#OP_VERNOTIF = 102 Transaction is invalid
+#OP_RESERVED1 = 137 Transaction is invalid
+#OP_RESERVED2 = 138 Transaction is invalid
+#OP_NOP1 = OP_NOP10 176-185 The word is ignored.
+
+
+################################################################################
+def getOpCode(name):
+ return int_to_binary(opCodeLookup[name], widthBytes=1)
+
+
+################################################################################
+def getMultisigScriptInfo(rawScript):
+ """
+ Gets the Multi-Sig tx type, as well as all the address-160 strings of
+ the keys that are needed to satisfy this transaction. This currently
+ only identifies M-of-N transaction types, returning unknown otherwise.
+
+ However, the address list it returns should be valid regardless of
+ whether the type was unknown: we assume all 20-byte chunks of data
+ are public key hashes, and 65-byte chunks are public keys.
+
+ M==0 (output[0]==0) indicates this isn't a multisig script
+ """
+
+ scrAddr = ''
+ addr160List = []
+ pubKeyList = []
+
+ M,N = 0,0
+
+ pubKeyStr = Cpp.BtcUtils().getMultisigPubKeyInfoStr(rawScript)
+
+ bu = BinaryUnpacker(pubKeyStr)
+ M = bu.get(UINT8)
+ N = bu.get(UINT8)
+
+ if M==0:
+ return [0, 0, None, None]
+
+ for i in range(N):
+ pkstr = bu.get(BINARY_CHUNK, 33)
+ if pkstr[0] == '\x04':
+ pkstr += bu.get(BINARY_CHUNK, 32)
+ pubKeyList.append(pkstr)
+ addr160List.append(hash160(pkstr))
+
+ return M, N, addr160List, pubKeyList
+
+
+################################################################################
+def getHash160ListFromMultisigScrAddr(scrAddr):
+ mslen = len(scrAddr) - 3
+ if not (mslen%20==0 and scrAddr[0]==SCRADDR_MULTISIG_BYTE):
+ raise BadAddressError('Supplied ScrAddr is not multisig!')
+
+ catList = scrAddr[3:]
+ return [catList[20*i:20*(i+1)] for i in range(len(catList)/20)]
+
+
+################################################################################
+# These two methods are just easier-to-type wrappers around the C++ methods
+def getTxOutScriptType(script):
+ return Cpp.BtcUtils().getTxOutScriptTypeInt(script)
+
+
+################################################################################
+def getTxInScriptType(txinObj):
+ """
+ NOTE: this method takes a TXIN object, not just the script itself. This
+ is because this method needs to see the OutPoint to distinguish an
+ UNKNOWN TxIn from a coinbase-TxIn
+ """
+ script = txinObj.binScript
+ prevTx = txinObj.outpoint.txHash
+ return Cpp.BtcUtils().getTxInScriptTypeInt(script, prevTx)
+
+################################################################################
+def getTxInP2SHScriptType(txinObj):
+ """
+ If this TxIn is identified as SPEND-P2SH, then it contains a subscript that
+ is really a TxOut script (which must hash to the value included on the
+ actual TxOut script).
+
+ Use this to get the TxOut script type of the Spend-P2SH subscript
+ """
+ scrType = getTxInScriptType(txinObj)
+ if not scrType==CPP_TXIN_SPENDP2SH:
+ return None
+
+ lastPush = Cpp.BtcUtils().getLastPushDataInScript(txinObj.binScript)
+
+ return getTxOutScriptType(lastPush)
+
+
+################################################################################
+def TxInExtractAddrStrIfAvail(txinObj):
+ rawScript = txinObj.binScript
+ prevTxHash = txinObj.outpoint.txHash
+ scrType = Cpp.BtcUtils().getTxInScriptTypeInt(rawScript, prevTxHash)
+ lastPush = Cpp.BtcUtils().getLastPushDataInScript(rawScript)
+
+ if scrType in [CPP_TXIN_STDUNCOMPR, CPP_TXIN_STDCOMPR]:
+ return hash160_to_addrStr( hash160(lastPush) )
+ elif scrType == CPP_TXIN_SPENDP2SH:
+ return hash160_to_p2shStr( hash160(lastPush) )
+ else:
+ return ''
+
+
+################################################################################
+def TxInExtractPreImageIfAvail(txinObj):
+ rawScript = txinObj.binScript
+ prevTxHash = txinObj.outpoint.txHash
+ scrType = Cpp.BtcUtils().getTxInScriptTypeInt(rawScript, prevTxHash)
+
+ if scrType == [CPP_TXIN_STDUNCOMPR, CPP_TXIN_STDCOMPR, CPP_TXIN_SPENDP2SH]:
+ return Cpp.BtcUtils().getLastPushDataInScript(rawScript)
+ else:
+ return ''
+
+
+
+
+# Finally done with all the base conversion functions and ECDSA code
+# Now define the classes for the objects that will use this
+
+
+################################################################################
+# Transaction Classes
+################################################################################
+
+
+#####
+class BlockComponent(object):
+
+ def copy(self):
+ return self.__class__().unserialize(self.serialize())
+
+ def serialize(self):
+ raise NotImplementedError
+
+ def unserialize(self):
+ raise NotImplementedError
+
+################################################################################
+class PyOutPoint(BlockComponent):
+ #def __init__(self, txHash, txOutIndex):
+ #self.txHash = txHash
+ #self.txOutIndex = outIndex
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ opData = toUnpack
+ else:
+ opData = BinaryUnpacker( toUnpack )
+
+ if opData.getRemainingSize() < 36: raise UnserializeError
+ self.txHash = opData.get(BINARY_CHUNK, 32)
+ self.txOutIndex = opData.get(UINT32)
+ return self
+
+ def serialize(self):
+ binOut = BinaryPacker()
+ binOut.put(BINARY_CHUNK, self.txHash)
+ binOut.put(UINT32, self.txOutIndex)
+ return binOut.getBinaryString()
+
+ def pprint(self, nIndent=0, endian=BIGENDIAN):
+ indstr = indent*nIndent
+ print indstr + 'OutPoint:'
+ print indstr + indent + 'PrevTxHash:', \
+ binary_to_hex(self.txHash, endian), \
+ '(BE)' if endian==BIGENDIAN else '(LE)'
+ print indstr + indent + 'TxOutIndex:', self.txOutIndex
+
+
+#####
+class PyTxIn(BlockComponent):
+ def __init__(self):
+ self.outpoint = UNINITIALIZED
+ self.binScript = UNINITIALIZED
+ self.intSeq = 2**32-1
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ txInData = toUnpack
+ else:
+ txInData = BinaryUnpacker( toUnpack )
+
+ self.outpoint = PyOutPoint().unserialize( txInData.get(BINARY_CHUNK, 36) )
+
+ scriptSize = txInData.get(VAR_INT)
+ if txInData.getRemainingSize() < scriptSize+4: raise UnserializeError
+ self.binScript = txInData.get(BINARY_CHUNK, scriptSize)
+ self.intSeq = txInData.get(UINT32)
+ return self
+
+ def getScript(self):
+ return self.binScript
+
+ def serialize(self):
+ binOut = BinaryPacker()
+ binOut.put(BINARY_CHUNK, self.outpoint.serialize() )
+ binOut.put(VAR_INT, len(self.binScript))
+ binOut.put(BINARY_CHUNK, self.binScript)
+ binOut.put(UINT32, self.intSeq)
+ return binOut.getBinaryString()
+
+ def pprint(self, nIndent=0, endian=BIGENDIAN):
+ indstr = indent*nIndent
+ print indstr + 'PyTxIn:'
+ print indstr + indent + 'PrevTxHash:', \
+ binary_to_hex(self.outpoint.txHash, endian), \
+ '(BE)' if endian==BIGENDIAN else '(LE)'
+ print indstr + indent + 'TxOutIndex:', self.outpoint.txOutIndex
+ print indstr + indent + 'Script: ', \
+ '('+binary_to_hex(self.binScript)[:64]+')'
+ addrStr = TxInExtractAddrStrIfAvail(self)
+ if len(addrStr)==0:
+ addrStr = ''
+ print indstr + indent + 'Sender: ', addrStr
+ print indstr + indent + 'Seq: ', self.intSeq
+
+ def toString(self, nIndent=0, endian=BIGENDIAN):
+ indstr = indent*nIndent
+ indstr2 = indstr + indent
+ result = indstr + 'PyTxIn:'
+ result = ''.join([result, '\n', indstr2 + 'PrevTxHash:', \
+ binary_to_hex(self.outpoint.txHash, endian), \
+ '(BE)' if endian==BIGENDIAN else '(LE)'])
+ result = ''.join([result, '\n', indstr2 + 'TxOutIndex:', \
+ str(self.outpoint.txOutIndex)])
+ result = ''.join([result, '\n', indstr2 + 'Script: ', \
+ '('+binary_to_hex(self.binScript)[:64]+')'])
+ addrStr = TxInExtractAddrStrIfAvail(self)
+ if len(addrStr)>0:
+ result = ''.join([result, '\n', indstr2 + 'Sender: ', addrStr])
+ result = ''.join([result, '\n', indstr2 + 'Seq: ', str(self.intSeq)])
+ return result
+
+ # Before broadcasting a transaction make sure that the script is canonical
+ # This TX could have been signed by an older version of the software.
+ # Either on the offline Armory installation which may not have been upgraded
+ # or on a previous installation of Armory on this computer.
+ def minimizeDERSignaturePadding(self):
+ rsLen = binary_to_int(self.binScript[2:3])
+ rLen = binary_to_int(self.binScript[4:5])
+ rBin = self.binScript[5:5+rLen]
+ sLen = binary_to_int(self.binScript[6+rLen:7+rLen])
+ sBin = self.binScript[7+rLen:7+rLen+sLen]
+ sigScript = createSigScriptFromRS(rBin, sBin)
+ newBinScript = int_to_binary(len(sigScript)+1) + sigScript + self.binScript[3+rsLen:]
+ paddingRemoved = newBinScript != self.binScript
+ newTxIn = self.copy()
+ newTxIn.binScript = newBinScript
+ return paddingRemoved, newTxIn
+
+
+#####
+class PyTxOut(BlockComponent):
+ def __init__(self):
+ self.value = UNINITIALIZED
+ self.binScript = UNINITIALIZED
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ txOutData = toUnpack
+ else:
+ txOutData = BinaryUnpacker( toUnpack )
+
+ self.value = txOutData.get(UINT64)
+ scriptSize = txOutData.get(VAR_INT)
+ if txOutData.getRemainingSize() < scriptSize: raise UnserializeError
+ self.binScript = txOutData.get(BINARY_CHUNK, scriptSize)
+ return self
+
+ def getValue(self):
+ return self.value
+
+ def getScript(self):
+ return self.binScript
+
+ def serialize(self):
+ binOut = BinaryPacker()
+ binOut.put(UINT64, self.value)
+ binOut.put(VAR_INT, len(self.binScript))
+ binOut.put(BINARY_CHUNK, self.binScript)
+ return binOut.getBinaryString()
+
+ def pprint(self, nIndent=0, endian=BIGENDIAN):
+ """
+ indstr = indent*nIndent
+ indstr2 = indent*nIndent + indent
+ print indstr + 'TxOut:'
+ print indstr2 + 'Value: ', self.value, '(', float(self.value) / ONE_BTC, ')'
+ txoutType = getTxOutScriptType(self.binScript)
+ if txoutType in [CPP_TXOUT_STDPUBKEY33, CPP_TXOUT_STDPUBKEY65]:
+ print indstr2 + 'Script: PubKey(%s) OP_CHECKSIG' % \
+ script_to_addrStr(self.binScript)
+ elif txoutType == CPP_TXOUT_STDHASH160:
+ print indstr2 + 'Script: OP_DUP OP_HASH160 (%s) OP_EQUALVERIFY OP_CHECKSIG' % \
+ script_to_addrStr(self.binScript)
+ elif txoutType == CPP_TXOUT_P2SH:
+ print indstr2 + 'Script: OP_HASH160 (%s) OP_EQUAL' % \
+ script_to_addrStr(self.binScript)
+ else:
+ opStrList = convertScriptToOpStrings(self.binScript)
+ print indstr + indent + 'Script: ', ' '.join(opStrList)
+ """
+ print self.toString(nIndent, endian)
+
+ def toString(self, nIndent=0, endian=BIGENDIAN):
+ indstr = indent*nIndent
+ indstr2 = indent*nIndent + indent
+ valStr, btcStr = str(self.value), str(float(self.value)/ONE_BTC)
+ result = indstr + 'TxOut:\n'
+ result += indstr2 + 'Value: %s (%s)\n' % (valStr, btcStr)
+ result += indstr2
+ txoutType = getTxOutScriptType(self.binScript)
+
+ if txoutType in [CPP_TXOUT_STDPUBKEY33, CPP_TXOUT_STDPUBKEY65]:
+ result += 'Script: PubKey(%s) OP_CHECKSIG \n' % \
+ script_to_addrStr(self.binScript)
+ elif txoutType == CPP_TXOUT_STDHASH160:
+ result += 'Script: OP_DUP OP_HASH160 (%s) OP_EQUALVERIFY OP_CHECKSIG' % \
+ script_to_addrStr(self.binScript)
+ elif txoutType == CPP_TXOUT_P2SH:
+ result += 'Script: OP_HASH160 (%s) OP_EQUAL' % \
+ script_to_addrStr(self.binScript)
+ else:
+ opStrList = convertScriptToOpStrings(self.binScript)
+ result += 'Script: ' + ' '.join(opStrList)
+
+ return result
+
+#####
+class PyTx(BlockComponent):
+ def __init__(self):
+ self.version = UNINITIALIZED
+ self.inputs = UNINITIALIZED
+ self.outputs = UNINITIALIZED
+ self.lockTime = 0
+ self.thisHash = UNINITIALIZED
+
+ def serialize(self):
+ binOut = BinaryPacker()
+ binOut.put(UINT32, self.version)
+ binOut.put(VAR_INT, len(self.inputs))
+ for txin in self.inputs:
+ binOut.put(BINARY_CHUNK, txin.serialize())
+ binOut.put(VAR_INT, len(self.outputs))
+ for txout in self.outputs:
+ binOut.put(BINARY_CHUNK, txout.serialize())
+ binOut.put(UINT32, self.lockTime)
+ return binOut.getBinaryString()
+
+ def unserialize(self, toUnpack):
+ if isinstance(toUnpack, BinaryUnpacker):
+ txData = toUnpack
+ else:
+ txData = BinaryUnpacker( toUnpack )
+
+ startPos = txData.getPosition()
+ self.inputs = []
+ self.outputs = []
+ self.version = txData.get(UINT32)
+ numInputs = txData.get(VAR_INT)
+ for i in xrange(numInputs):
+ self.inputs.append( PyTxIn().unserialize(txData) )
+ numOutputs = txData.get(VAR_INT)
+ for i in xrange(numOutputs):
+ self.outputs.append( PyTxOut().unserialize(txData) )
+ self.lockTime = txData.get(UINT32)
+ endPos = txData.getPosition()
+ self.nBytes = endPos - startPos
+ self.thisHash = hash256(self.serialize())
+ return self
+
+ # Before broadcasting a transaction make sure that the script is canonical
+ # This TX could have been signed by an older version of the software.
+ # Either on the offline Armory installation which may not have been upgraded
+ # or on a previous installation of Armory on this computer.
+ def minimizeDERSignaturePadding(self):
+ paddingRemoved = False
+ newTx = self.copy()
+ newTx.inputs = []
+ for txIn in self.inputs:
+ paddingRemovedFromTxIn, newTxIn = txIn.minimizeDERSignaturePadding()
+ if paddingRemovedFromTxIn:
+ paddingRemoved = True
+ newTx.inputs.append(newTxIn)
+ else:
+ newTx.inputs.append(txIn)
+
+ return paddingRemoved, newTx.copy()
+
+ def getHash(self):
+ return hash256(self.serialize())
+
+ def getHashHex(self, endianness=LITTLEENDIAN):
+ return binary_to_hex(self.getHash(), endOut=endianness)
+
+ def makeRecipientsList(self):
+ """
+ Make a list of lists, each one containing information about
+ an output in this tx. Usually contains
+ [ScriptType, Value, Script]
+ May include more information if any of the scripts are multi-sig,
+ such as public keys and multi-sig type (M-of-N)
+ """
+ recipInfoList = []
+ for txout in self.outputs:
+ recipInfoList.append([])
+
+ scrType = getTxOutScriptType(txout.binScript)
+ recipInfoList[-1].append(scrType)
+ recipInfoList[-1].append(txout.value)
+ recipInfoList[-1].append(txout.binScript)
+ if scrType == CPP_TXOUT_MULTISIG:
+ recipInfoList[-1].append(getMultisigScriptInfo(txout.binScript))
+ else:
+ recipInfoList[-1].append([])
+
+ return recipInfoList
+
+
+ def pprint(self, nIndent=0, endian=BIGENDIAN):
+ indstr = indent*nIndent
+ print indstr + 'Transaction:'
+ print indstr + indent + 'TxHash: ', self.getHashHex(endian), \
+ '(BE)' if endian==BIGENDIAN else '(LE)'
+ print indstr + indent + 'Version: ', self.version
+ print indstr + indent + 'nInputs: ', len(self.inputs)
+ print indstr + indent + 'nOutputs: ', len(self.outputs)
+ print indstr + indent + 'LockTime: ', self.lockTime
+ print indstr + indent + 'Inputs: '
+ for inp in self.inputs:
+ inp.pprint(nIndent+2, endian=endian)
+ print indstr + indent + 'Outputs: '
+ for out in self.outputs:
+ out.pprint(nIndent+2, endian=endian)
+
+ def toString(self, nIndent=0, endian=BIGENDIAN):
+ indstr = indent*nIndent
+ result = indstr + 'Transaction:'
+ result = ''.join([result, '\n', indstr + indent + 'TxHash: ', self.getHashHex(endian), \
+ '(BE)' if endian==BIGENDIAN else '(LE)'])
+ result = ''.join([result, '\n', indstr + indent + 'Version: ', str(self.version)])
+ result = ''.join([result, '\n', indstr + indent + 'nInputs: ', str(len(self.inputs))])
+ result = ''.join([result, '\n', indstr + indent + 'nOutputs: ', str(len(self.outputs))])
+ result = ''.join([result, '\n', indstr + indent + 'LockTime: ', str(self.lockTime)])
+ result = ''.join([result, '\n', indstr + indent + 'Inputs: '])
+ for inp in self.inputs:
+ result = ''.join([result, '\n', inp.toString(nIndent+2, endian=endian)])
+ print indstr + indent + 'Outputs: '
+ for out in self.outputs:
+ result = ''.join([result, '\n', out.toString(nIndent+2, endian=endian)])
+ return result
+
+ def fetchCpp(self):
+ """ Use the info in this PyTx to get the C++ version from TheBDM """
+ return TheBDM.getTxByHash(self.getHash())
+
+ def pprintHex(self, nIndent=0):
+ bu = BinaryUnpacker(self.serialize())
+ theSer = self.serialize()
+ print binary_to_hex(bu.get(BINARY_CHUNK, 4))
+ nTxin = bu.get(VAR_INT)
+ print 'VAR_INT(%d)' % nTxin
+ for i in range(nTxin):
+ print binary_to_hex(bu.get(BINARY_CHUNK,32))
+ print binary_to_hex(bu.get(BINARY_CHUNK,4))
+ scriptSz = bu.get(VAR_INT)
+ print 'VAR_IN(%d)' % scriptSz
+ print binary_to_hex(bu.get(BINARY_CHUNK,scriptSz))
+ print binary_to_hex(bu.get(BINARY_CHUNK,4))
+ nTxout = bu.get(VAR_INT)
+ print 'VAR_INT(%d)' % nTxout
+ for i in range(nTxout):
+ print binary_to_hex(bu.get(BINARY_CHUNK,8))
+ scriptSz = bu.get(VAR_INT)
+ print binary_to_hex(bu.get(BINARY_CHUNK,scriptSz))
+ print binary_to_hex(bu.get(BINARY_CHUNK, 4))
+
+
+
+
+
+
+################################################################################
+################################################################################
+# This class can be used for both multi-signature tx collection, as well as
+# offline wallet signing (you are collecting signatures for a 1-of-1 tx only
+# involving yourself).
+class PyTxDistProposal(object):
+ """
+ PyTxDistProposal is created from a PyTx object, and represents
+ an unsigned transaction, that may require the signatures of
+ multiple parties before being accepted by the network.
+
+ This technique (https://en.bitcoin.it/wiki/BIP_0010) is that
+ once TxDP is created, the system signing it only needs the
+ ECDSA private keys and nothing else. This enables the device
+ providing the signatures to be extremely lightweight, since it
+ doesn't have to store the blockchain.
+
+ For a given TxDP, we will be storing the following structure
+ in memory. Use a 4-input tx as an example, with the first
+ being a 2-of-3 multi-sig transaction (unsigned), and the last
+ is a 2-o-2 P2SH input.
+
+ self.scriptTypes = [ CPP_TXOUT_MULTISIG,
+ CPP_TXOUT_STDHASH160,
+ CPP_TXOUT_STDHASH160,
+ CPP_TXOUT_P2SH]
+
+ self.inputValues = [ long(23.13 * ONE_BTC),
+ long( 4.00 * ONE_BTC),
+ long(10.00 * ONE_BTC),
+ long( 5.00 * ONE_BTC) ]
+
+ self.signatures = [ ['', '', ''],
+ [''],
+ [''],
+ [''], ]
+
+ self.inScrAddrList = [ fe0203,
+ HASH160PREFIX + a160_4,
+ HASH160PREFIX + a160_5,
+ P2SHPREFIX + p2sh160 ]
+
+ self.p2shScripts = [ '',
+ '',
+ '',
+ ]
+
+ # Usually only have public keys on multi-sig TxOuts
+ self.inPubKeyLists = [ [pubKey1, pubKey2, pubKey3],
+ [''],
+ [''],
+ [pubKey6, pubKey7] ]
+
+ self.numSigsNeeded = [ 2,
+ 1,
+ 1,
+ 2 ]
+
+ self.relevantTxMap = [ prevTx0Hash: prevTx0.serialize(),
+ prevTx1Hash: prevTx1.serialize(),
+ prevTx2Hash: prevTx2.serialize(),
+ prevTx3Hash: prevTx3.serialize() ]
+
+ UPDATE Feb 2012: Before Jan 29, 2012, BIP 0010 used a different technique
+ for communicating blockchain information to the offline
+ device. This is no longer the case
+
+ Gregory Maxwell identified a reasonable-enough security
+ risk with the fact that previous BIP 0010 cannot guarantee
+ validity of stated input values in a TxDP. This is solved
+ by adding the supporting transactions to the TxDP, so that
+ the signing device can get the input values from those
+ tx and verify the hash matches the OutPoint on the tx
+ being signed (which *is* part of what's being signed).
+ The concern was that someone could manipulate your online
+ computer to misrepresent the inputs, and cause you to
+ send you entire wallet to tx-fees. Not the most useful
+ attack (for someone trying to steal your coins), but it is
+ still a risk that can be avoided by adding some "bloat" to
+ the TxDP
+
+
+
+ """
+ #############################################################################
+ def __init__(self, pytx=None, txMap={}):
+ self.pytxObj = UNINITIALIZED
+ self.uniqueB58 = ''
+ self.scriptTypes = []
+ self.signatures = []
+ self.txOutScripts = []
+ self.inScrAddrList = []
+ self.p2shScripts = []
+ self.inPubKeyLists = []
+ self.inputValues = []
+ self.numSigsNeeded = []
+ self.relevantTxMap = {} # needed to support input values of each TxIn
+ if pytx:
+ self.createFromPyTx(pytx, txMap)
+
+ #############################################################################
+ def createFromPyTx(self, pytx, txMap={}, p2shMap={}):
+ sz = len(pytx.inputs)
+ self.pytxObj = pytx.copy()
+ self.uniqueB58 = binary_to_base58(hash256(pytx.serialize()))[:8]
+ self.scriptTypes = []
+ self.signatures = []
+ self.txOutScripts = []
+ self.inScrAddrList = []
+ self.inPubKeyLists = []
+ self.inputValues = []
+ self.numSigsNeeded = []
+ self.relevantTxMap = {} # needed to support input values of each TxIn
+ self.p2shScripts = []
+
+ if len(txMap)==0 and not TheBDM.getBDMState()=='BlockchainReady':
+ # TxDP includes the transactions that supply the inputs to this
+ # transaction, so the BDM needs to be available to fetch those.
+ raise BlockchainUnavailableError, ('Must input supporting transactions '
+ 'or access to the blockchain, to '
+ 'create the TxDP')
+ for i in range(sz):
+ # First, make sure that we have the previous Tx data available
+ # We can't continue without it, since BIP 0010 will now require
+ # the full tx of outputs being spent
+ outpt = self.pytxObj.inputs[i].outpoint
+ txhash = outpt.txHash
+ txidx = outpt.txOutIndex
+ pyPrevTx = None
+ if len(txMap)>0:
+ # If supplied a txMap, we expect it to have everything we need
+ if not txMap.has_key(txhash):
+ raise InvalidHashError, ('Could not find the referenced tx '
+ 'in supplied txMap')
+ pyPrevTx = txMap[txhash].copy()
+ elif TheBDM.getBDMState()=='BlockchainReady':
+ cppPrevTx = TheBDM.getTxByHash(txhash)
+ if not cppPrevTx:
+ raise InvalidHashError, 'Could not find the referenced tx'
+ pyPrevTx = PyTx().unserialize(cppPrevTx.serialize())
+ else:
+ raise InvalidScriptError, 'No previous-tx data available for TxDP'
+
+ self.relevantTxMap[txhash] = pyPrevTx.copy()
+
+
+ # Now we have the previous transaction. We need to pull the
+ # script out of the specific TxOut so we know how it can be
+ # spent.
+ script = pyPrevTx.outputs[txidx].binScript
+ value = pyPrevTx.outputs[txidx].value
+ scrType = getTxOutScriptType(script)
+
+ self.inputValues.append(value)
+ self.txOutScripts.append(str(script)) # copy it
+ self.scriptTypes.append(scrType)
+
+ # Make sure we always add an element to each of these
+ self.numSigsNeeded.append(-1)
+ self.inScrAddrList.append('')
+ self.p2shScripts.append('')
+ self.inPubKeyLists.append([])
+ self.signatures.append([])
+
+ # If this is a P2SH TxOut being spent, we store the information
+ # about the SUBSCRIPT, since that is ultimately what needs to be
+ # "solved" to spend the coins. The fact that self.p2shScripts[i]
+ # will be non-empty is how we know we need to add the serialized
+ # SUBSCRIPT to the end of the sigScript when signing.
+ if scrType==CPP_TXOUT_P2SH:
+ p2shScrAddr = script_to_scrAddr(script)
+
+ if not p2shMap.has_key(p2shScrAddr):
+ raise InvalidScriptError, 'No P2SH script info avail for TxDP'
+ else:
+ scriptHash = hash160(p2shMap[p2shScrAddr])
+ if not SCRADDR_P2SH_BYTE+scriptHash == p2shScrAddr:
+ raise InvalidScriptError, 'No P2SH script info avail for TxDP'
+
+ script = p2shMap[p2shScrAddr]
+ self.p2shScripts[-1] = script
+
+ scrType = getTxOutScriptType(script)
+ self.scriptTypes[-1] = scrType
+
+
+ # Fill some of the other fields with info needed to spend the script
+ if scrType==CPP_TXOUT_P2SH:
+ # Technically, this is just "OP_HASH160 OP_EQUAL" in the
+ # subscript which would be unusual and mostly useless. I'll assume
+ # here that it was an attempt at recursive P2SH, since they are
+ # both the same to our code: unspendable
+ raise InvalidScriptError('Cannot have recursive P2SH scripts!')
+ elif scrType in CPP_TXOUT_STDSINGLESIG:
+ self.numSigsNeeded[-1] = 1
+ self.inScrAddrList[-1] = script_to_scrAddr(script)
+ self.signatures[-1] = ['']
+ elif scrType==CPP_TXOUT_MULTISIG:
+ M, N, a160s, pubs = getMultisigScriptInfo(script)
+ self.inScrAddrList[-1] = [SCRADDR_P2PKH_BYTE+a for a in a160s]
+ self.inPubKeyLists[-1] = pubs[:]
+ self.signatures[-1] = ['']*len(addrs)
+ self.numSigsNeeded[-1] = M
+ else:
+ LOGWARN("Non-standard script for TxIn %d" % i)
+ LOGWARN(binary_to_hex(script))
+ pass
+
+ return self
+
+
+ #############################################################################
+ def createFromTxOutSelection(self, utxoSelection, scriptValuePairs, txMap={}):
+ """
+ This creates a TxDP for a standard transaction from a list of inputs and
+ a list of recipient-value-pairs.
+
+ NOTE: I have modified this so that if the "recip" is not a 20-byte binary
+ string, it is instead interpretted as a SCRIPT -- which could be
+ anything, including a multi-signature transaction
+ """
+
+ for scr,val in scriptValuePairs:
+ if len(scr)==20:
+ raise BadAddressError( tr("""
+ createFromTxOutSelection() has changed to take (script, value)
+ pairs instead of (hash160, value) pairs. This is because we
+ need this function to be able to send to any arbitrary script,
+ not just pay2pubkeyhash scripts. Especially for P2SH support.
+ This method will check that it is either reg, P2SH or multisig
+ before continuing. Modify this function to allow more script
+ types to be handled."""))
+
+
+ totalUtxoSum = sumTxOutList(utxoSelection)
+ totalOutputSum = sum([a[1] for a in scriptValuePairs])
+ if not totalUtxoSum >= totalOutputSum:
+ raise TxdpError('More outputs than inputs!')
+
+ thePyTx = PyTx()
+ thePyTx.version = 1
+ thePyTx.lockTime = 0
+ thePyTx.inputs = []
+ thePyTx.outputs = []
+
+ # We can prepare the outputs, first
+ for script,value in scriptValuePairs:
+ txout = PyTxOut()
+ txout.value = long(value)
+
+ # Assume recipObj is either a PBA or a string
+ if isinstance(script, PyBtcAddress):
+ LOGERROR("Didn't know any func was still using this conditional")
+ #scrAddr = addrStr_to_scrAddr(scrAddr.getAddrStr())
+ #scrAddr =
+
+
+ intType = getTxOutScriptType(script)
+ if intType==CPP_TXOUT_NONSTANDARD:
+ LOGERROR('Only standard script types are valid for this call')
+ LOGERROR('Script: ' + binary_to_hex(script))
+ raise BadAddressError('Invalid script for tx creation')
+
+ txout.binScript = script[:]
+ thePyTx.outputs.append(txout)
+
+ # Prepare the inputs based on the utxo objects
+ for iin,utxo in enumerate(utxoSelection):
+ # First, make sure that we have the previous Tx data available
+ # We can't continue without it, since BIP 0010 will now require
+ # the full tx of outputs being spent
+ txin = PyTxIn()
+ txin.outpoint = PyOutPoint()
+ txin.binScript = ''
+ txin.intSeq = 2**32-1
+
+ txhash = utxo.getTxHash()
+ txidx = utxo.getTxOutIndex()
+ txin.outpoint.txHash = str(txhash)
+ txin.outpoint.txOutIndex = txidx
+ thePyTx.inputs.append(txin)
+
+ return self.createFromPyTx(thePyTx, txMap)
+
+
+ #############################################################################
+ # Currently not used, but may be when we finally implement multi-sig (or coinjoin)
+ def appendSignature(self, binSig, txinIndex=None):
+ """
+ Use this to add a signature to the TxDP object in memory.
+ """
+ idx, pos, scrAddr = self.processSignature(binSig, txinIndex, checkAllInputs=True)
+ if scrAddr:
+ self.signatures[idx].append(binSig)
+ return True
+
+ return False
+
+ #############################################################################
+ def processSignature(self, sigStr, txinIdx, checkAllInputs=False):
+ """
+ For standard transaction types, the signature field is actually the raw
+ script to be plugged into the final transaction that allows it to eval
+ to true -- except for multi-sig transactions. We have to mess with the
+ data a little bit if we want to use the script-processor to verify the
+ signature. Instead, we will use the crypto ops directly.
+
+ The return value is everything we need to know about this signature:
+ -- TxIn-index: if checkAllInputs=True, we need to know which one worked
+ -- Addr-position: for multi-sig tx, we need to know which addr it matches
+ -- Addr160: address to which this signature corresponds
+ """
+
+ if txinIdx==None or txinIdx<0 or txinIdx>=len(self.pytxObj.inputs):
+ pass
+ else:
+ scriptType = self.scriptTypes[txinIdx]
+ txCopy = self.pytxObj.copy()
+
+ if scriptType in CPP_TXOUT_STDSINGLESIG:
+ # For standard Tx types, sigStr is the full script itself (copy it)
+ txCopy.inputs[txinIdx].binScript = str(sigStr)
+ prevOutScript = str(self.txOutScripts[txinIdx])
+ psp = PyScriptProcessor(prevOutScript, txCopy, txinIdx)
+ if psp.verifyTransactionValid():
+ return txinIdx, 0, script_to_scrAddr(prevOutScript)
+ elif scriptType == CPP_TXOUT_MULTISIG:
+ #STUB
+ pass
+ '''
+ # For multi-sig, sigStr is the raw ECDSA sig ... we will have to
+ # manually construct a tx that the script processor can check,
+ # without the other signatures
+ for i in range(len(txCopy.inputs)):
+ if not i==idx:
+ txCopy.inputs[i].binScript = ''
+ else:
+ txCopy.inputs[i].binScript = self.txOutScripts[i]
+
+ hashCode = binary_to_int(sigStr[-1])
+ hashCode4 = int_to_binary(hashcode, widthBytes=4)
+ preHashMsg = txCopy.serialize() + hashCode4
+ if not hashCode==1:
+ raise NotImplementedError, 'Non-standard hashcodes not supported!'
+
+ # Now check all public keys in the multi-sig TxOut script
+ for i,pubkey in enumerate(self.inPubKeyLists):
+ tempAddr = PyBtcAddress().createFromPublicKeyData(pubkey)
+ if tempAddr.verifyDERSignature(preHashMsg, sigStr):
+ return txInIdx, i, hash160(pubkey)
+ '''
+
+ if checkAllInputs:
+ for i in range(len(self.pytxObj.inputs)):
+ idx, pos, scrAddr = self.processSignature(sigStr, i)
+ if idx>0:
+ return idx, pos, scrAddr
+
+ return -1,-1,''
+
+
+ #############################################################################
+ def checkTxHasEnoughSignatures(self, alsoVerify=False):
+ """
+ This method only counts signatures, unless verify==True
+ """
+ for i in range(len(self.pytxObj.inputs)):
+ numSigsHave = sum( [(1 if sig else 0) for sig in self.signatures[i]] )
+ if numSigsHave 0:
+ nextTx = PyTx().unserialize(binUnpacker)
+ self.relevantTxMap[nextTx.getHash()] = nextTx
+
+ for txin in targetTx.inputs:
+ if not self.relevantTxMap.has_key(txin.outpoint.txHash):
+ raise TxdpError, 'Not all inputs can be verified for TxDP. Aborting!'
+
+ self.createFromPyTx( targetTx, self.relevantTxMap )
+ numIn = len(self.pytxObj.inputs)
+
+ # Do some sanity checks
+ if not self.uniqueB58 == dpIdB58:
+ raise UnserializeError, 'TxDP: Actual DPID does not match listed ID'
+ if not MAGIC_BYTES==magic:
+ raise NetworkIDError, 'TxDP is for diff blockchain! (%s)' % \
+ BLOCKCHAINS[magic]
+
+ # At this point, we should have a TxDP constructed, now we need to
+ # simply scan the rest of the serialized structure looking for any
+ # signatures that may be included
+ while not 'END-TRANSACTION' in line:
+ [iin, val] = line.split('_')[2:]
+ iin = int(iin)
+ self.inputValues[iin] = str2coin(val)
+
+ line = nextLine(L)
+ while '_SIG_' in line:
+ addrB58, sz, sigszHex = line.split('_')[2:]
+ sz = int(sz)
+ sigsz = hex_to_int(sigszHex, endIn=BIGENDIAN)
+ hexSig = ''
+ line = nextLine(L)
+ while (not '_SIG_' in line) and \
+ (not 'TXINPUT' in line) and \
+ (not 'END-TRANSACTION' in line):
+ hexSig += line
+ line = nextLine(L)
+ binSig = hex_to_binary(hexSig)
+ idx, sigOrder, scrAddr = self.processSignature(binSig, iin)
+ if idx == -1:
+ LOGWARN('Invalid sig: Input %d, addr=%s' % (iin, addrB58))
+ elif not scrAddr_to_addrStr(scrAddr)== addrB58:
+ LOGERROR('Listed addr does not match computed addr')
+ raise BadAddressError
+ # If we got here, the signature is valid!
+ self.signatures[iin][sigOrder] = binSig
+
+ return self
+
+
+
+ #############################################################################
+ def pprint(self, indent=' '):
+ tx = self.pytxObj
+ propID = hash256(tx.serialize())
+ print indent+'Distribution Proposal : ', binary_to_base58(propID)[:8]
+ print indent+'Transaction Version : ', tx.version
+ print indent+'Transaction Lock Time : ', tx.lockTime
+ print indent+'Num Inputs : ', len(tx.inputs)
+ for i,txin in enumerate(tx.inputs):
+ prevHash = txin.outpoint.txHash
+ prevIndex = txin.outpoint.txOutIndex
+ #print ' PrevOut: (%s, index=%d)' % (binary_to_hex(prevHash[:8]),prevIndex),
+ print indent*2 + 'Value: %s' % self.inputValues[i]
+ print indent*2 + 'SrcScript: %s' % binary_to_hex(self.txOutScripts[i])
+ for ns, sig in enumerate(self.signatures[i]):
+ print indent*2 + 'Sig%d = "%s"'%(ns, binary_to_hex(sig))
+ print indent+'Num Outputs : ', len(tx.outputs)
+ for i,txout in enumerate(tx.outputs):
+ print ' Recipient: %s BTC' % coin2str(txout.value),
+ scrType = getTxOutScriptType(txout.binScript)
+ if scrType in CPP_TXOUT_HAS_ADDRSTR:
+ print script_to_addrStr(txout.binScript)
+ elif scrType == CPP_TXOUT_MULTISIG:
+ M, N, addrs, pubs = getMultisigScriptInfo(txout.binScript)
+ print 'MULTI-SIG-SCRIPT: %d-of-%d' % (M,N)
+ for addr in addrs:
+ print indent*2, hash160_to_addrStr(addr)
+ elif scrType == CPP_TXOUT_NONSTANDARD:
+ print 'Non-standard: ', binary_to_hex(txout.binScript)
+
+
+################################################################################
+# NOTE: This method was actually used to create the Blockchain-reorg unit-
+# test, and hence why coinbase transactions are supported. However,
+# for normal transactions supported by PyBtcEngine, this support is
+# unnecessary.
+#
+# Additionally, this method both creates and signs the tx: however
+# PyBtcEngine employs TxDistProposals which require the construction
+# and signing to be two separate steps. This method is not suited
+# for most of the armoryengine CONOPS.
+#
+# On the other hand, this method DOES work, and there is no reason
+# not to use it if you already have PyBtcAddress-w-PrivKeys avail
+# and have a list of inputs and outputs as described below.
+#
+# This method will take an already-selected set of TxOuts, along with
+# PyBtcAddress objects containing necessary the private keys
+#
+# Src TxOut ~ {PyBtcAddr, PrevTx, PrevTxOutIdx} --OR-- COINBASE = -1
+# Dst TxOut ~ {PyBtcAddr, value}
+#
+# Of course, we usually don't have the private keys of the dst addrs...
+#
+def PyCreateAndSignTx(srcTxOuts, dstAddrsVals):
+ newTx = PyTx()
+ newTx.version = 1
+ newTx.lockTime = 0
+ newTx.inputs = []
+ newTx.outputs = []
+
+
+ numInputs = len(srcTxOuts)
+ numOutputs = len(dstAddrsVals)
+
+ coinbaseTx = False
+ if numInputs==1 and srcTxOuts[0] == -1:
+ coinbaseTx = True
+
+
+ #############################
+ # Fill in TxOuts first
+ for i in range(numOutputs):
+ txout = PyTxOut()
+ txout.value = dstAddrsVals[i][1]
+ dst = dstAddrsVals[i][0]
+ if(coinbaseTx):
+ txout.binScript = pubkey_to_p2pk_script(dst.binPublicKey65.toBinStr())
+ else:
+ txout.binScript = hash160_to_p2pkhash_script(dst.getAddr160())
+
+ newTx.outputs.append(txout)
+
+
+ #############################
+ # Create temp TxIns with blank scripts
+ for i in range(numInputs):
+ txin = PyTxIn()
+ txin.outpoint = PyOutPoint()
+ if(coinbaseTx):
+ txin.outpoint.txHash = '\x00'*32
+ txin.outpoint.txOutIndex = binary_to_int('\xff'*4)
+ else:
+ txin.outpoint.txHash = hash256(srcTxOuts[i][1].serialize())
+ txin.outpoint.txOutIndex = srcTxOuts[i][2]
+ txin.binScript = ''
+ txin.intSeq = 2**32-1
+ newTx.inputs.append(txin)
+
+
+ #############################
+ # Now we apply the ultra-complicated signature procedure
+ # We need a copy of the Tx with all the txin scripts blanked out
+ txCopySerialized = newTx.serialize()
+ for i in range(numInputs):
+ if coinbaseTx:
+ pass
+ else:
+ txCopy = PyTx().unserialize(txCopySerialized)
+ srcAddr = srcTxOuts[i][0]
+ txoutIdx = srcTxOuts[i][2]
+ prevTxOut = srcTxOuts[i][1].outputs[txoutIdx]
+ binToSign = ''
+
+ assert(srcAddr.hasPrivKey())
+
+ # Only implemented one type of hashing: SIGHASH_ALL
+ hashType = 1 # SIGHASH_ALL
+ hashCode1 = int_to_binary(1, widthBytes=1)
+ hashCode4 = int_to_binary(1, widthBytes=4)
+
+ # Copy the script of the TxOut we're spending, into the txIn script
+ txCopy.inputs[i].binScript = prevTxOut.binScript
+ preHashMsg = txCopy.serialize() + hashCode4
+
+ # CppBlockUtils::CryptoECDSA modules do the hashing for us
+ ##binToSign = hash256(preHashMsg)
+ ##binToSign = binary_switchEndian(binToSign)
+
+ signature = srcAddr.generateDERSignature(preHashMsg)
+
+
+ # If we are spending a Coinbase-TxOut, only need sig, no pubkey
+ # Don't forget to tack on the one-byte hashcode and consider it part of sig
+ if len(prevTxOut.binScript) > 30:
+ sigLenInBinary = int_to_binary(len(signature) + 1)
+ newTx.inputs[i].binScript = sigLenInBinary + signature + hashCode1
+ else:
+ pubkey = srcAddr.binPublicKey65.toBinStr()
+ sigLenInBinary = int_to_binary(len(signature) + 1)
+ pubkeyLenInBinary = int_to_binary(len(pubkey) )
+ newTx.inputs[i].binScript = sigLenInBinary + signature + hashCode1 + \
+ pubkeyLenInBinary + pubkey
+
+ #############################
+ # Finally, our tx is complete!
+ return newTx
+
+#############################################################################
+def getFeeForTx(txHash):
+ if TheBDM.getBDMState()=='BlockchainReady':
+ if not TheBDM.hasTxWithHash(txHash):
+ LOGERROR('Attempted to get fee for tx we don\'t have...? %s', \
+ binary_to_hex(txHash,BIGENDIAN))
+ return 0
+ txref = TheBDM.getTxByHash(txHash)
+ valIn, valOut = 0,0
+ for i in range(txref.getNumTxIn()):
+ valIn += TheBDM.getSentValue(txref.getTxInCopy(i))
+ for i in range(txref.getNumTxOut()):
+ valOut += txref.getTxOutCopy(i).getValue()
+ return valIn - valOut
+
+
+#############################################################################
+def determineSentToSelfAmt(le, wlt):
+ """
+ NOTE: this method works ONLY because we always generate a new address
+ whenever creating a change-output, which means it must have a
+ higher chainIndex than all other addresses. If you did something
+ creative with this tx, this may not actually work.
+ """
+ amt = 0
+ if TheBDM.isInitialized() and le.isSentToSelf():
+ txref = TheBDM.getTxByHash(le.getTxHash())
+ if not txref.isInitialized():
+ return (0, 0)
+ if txref.getNumTxOut()==1:
+ return (txref.getTxOutCopy(0).getValue(), -1)
+ maxChainIndex = -5
+ txOutChangeVal = 0
+ changeIndex = -1
+ valSum = 0
+ for i in range(txref.getNumTxOut()):
+ valSum += txref.getTxOutCopy(i).getValue()
+ addr160 = CheckHash160(txref.getTxOutCopy(i).getScrAddressStr())
+ addr = wlt.getAddrByHash160(addr160)
+ if addr and addr.chainIndex > maxChainIndex:
+ maxChainIndex = addr.chainIndex
+ txOutChangeVal = txref.getTxOutCopy(i).getValue()
+ changeIndex = i
+
+ amt = valSum - txOutChangeVal
+ return (amt, changeIndex)
+
+
+################################################################################
+#def getUnspentTxOutsForAddrList(addr160List, utxoType='Sweep', startBlk=-1, \
+def getUnspentTxOutsForAddr160List(addr160List, utxoType='Sweep', startBlk=-1, \
+ abortIfBDMBusy=False):
+ """
+
+ You have a list of addresses (or just one) and you want to get all the
+ unspent TxOuts for it. This can either be for computing its balance, or
+ for sweeping the address(es).
+
+ This will return a list of pairs of [addr160, utxoObj]
+ This isn't the most efficient method for producing the pairs
+
+ NOTE: At the moment, this only gets STANDARD TxOuts... non-std uses
+ a different BDM call
+
+ This method will return null output if the BDM is currently in the
+ middle of a scan. You can use waitAsLongAsNecessary=True if you
+ want to wait for the previous scan AND the next scan. Otherwise,
+ you can check for bal==-1 and then try again later...
+
+ Multi-threading update:
+
+ This one-stop-shop method has to be blocking. Instead, you might want
+ to register the address and rescan asynchronously, skipping this method
+ entirely:
+
+ cppWlt = Cpp.BtcWallet()
+ cppWlt.addScrAddress_1_(Hash160ToScrAddr(self.getAddr160()))
+ TheBDM.registerScrAddr(Hash160ToScrAddr(self.getAddr160()))
+ TheBDM.rescanBlockchain(wait=False)
+
+ <... do some other stuff ...>
+
+ if TheBDM.getBDMState()=='BlockchainReady':
+ TheBDM.updateWalletsAfterScan(wait=True) # fast after a rescan
+ bal = cppWlt.getBalance('Spendable')
+ utxoList = cppWlt.getUnspentTxOutList()
+ else:
+ <...come back later...>
+ """
+ if TheBDM.getBDMState()=='BlockchainReady' or \
+ (TheBDM.isScanning() and not abortIfBDMBusy):
+ if not isinstance(addr160List, (list,tuple)):
+ addr160List = [addr160List]
+
+ cppWlt = Cpp.BtcWallet()
+ for addr in addr160List:
+ if isinstance(addr, PyBtcAddress):
+ cppWlt.addScrAddress_1_(Hash160ToScrAddr(addr.getAddr160()))
+ else:
+ cppWlt.addScrAddress_1_(Hash160ToScrAddr(addr))
+
+ TheBDM.registerWallet(cppWlt)
+ currBlk = TheBDM.getTopBlockHeight()
+ TheBDM.scanBlockchainForTx(cppWlt, currBlk+1 if startBlk==-1 else startBlk)
+ #TheBDM.scanRegisteredTxForWallet(cppWlt, currBlk+1 if startBlk==-1 else startBlk)
+
+ if utxoType.lower() in ('sweep','unspent','full','all','ultimate'):
+ return cppWlt.getFullTxOutList(currBlk)
+ elif utxoType.lower() in ('spend','spendable','confirmed'):
+ return cppWlt.getSpendableTxOutList(currBlk, IGNOREZC)
+ else:
+ raise TypeError, 'Unknown utxoType!'
+ else:
+ return []
+
+def pprintLedgerEntry(le, indent=''):
+ if len(le.getScrAddr())==21:
+ hash160 = CheckHash160(le.getScrAddr())
+ addrStr = hash160_to_addrStr(hash160)[:12]
+ else:
+ addrStr = ''
+
+ leVal = coin2str(le.getValue(), maxZeros=1)
+ txType = ''
+ if le.isSentToSelf():
+ txType = 'ToSelf'
+ else:
+ txType = 'Recv' if le.getValue()>0 else 'Sent'
+
+ blkStr = str(le.getBlockNum())
+ print indent + 'LE %s %s %s %s' % \
+ (addrStr.ljust(15), leVal, txType.ljust(8), blkStr.ljust(8))
+
+# Putting this at the end because of the circular dependency
+from armoryengine.BDM import TheBDM
+from armoryengine.PyBtcAddress import PyBtcAddress
+from armoryengine.CoinSelection import pprintUnspentTxOutList, sumTxOutList
+from armoryengine.Script import *
diff --git a/dialogs/__init__.py b/armoryengine/__init__.py
similarity index 100%
rename from dialogs/__init__.py
rename to armoryengine/__init__.py
diff --git a/armoryengine/parseAnnounce.py b/armoryengine/parseAnnounce.py
new file mode 100644
index 000000000..5f01967c7
--- /dev/null
+++ b/armoryengine/parseAnnounce.py
@@ -0,0 +1,402 @@
+################################################################################
+# #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
+# Distributed under the GNU Affero General Public License (AGPL v3) #
+# See LICENSE or http://www.gnu.org/licenses/agpl.html #
+# #
+################################################################################
+from ArmoryUtils import *
+import os
+from jasvet import readSigBlock
+from copy import deepcopy
+
+
+################################################################################
+# NOTE: These methods DO NOT verify signatures. It is assumed that the
+# signature verification already happened, and these methods were only
+# called if the signatures were good. They can be called on blocks
+# of text WITH OR WITHOUT the signature block data (either pass it
+# the signed block, or just whats inside the block).
+
+SIGNED_BLOCK_HEAD = '-----BEGIN BITCOIN SIGNED MESSAGE-----'
+SIGNED_BLOCK_TAIL = '-----BEGIN BITCOIN SIGNATURE-----'
+
+################################################################################
+################################################################################
+class changelogParser(object):
+ """
+ Returns a list of list of lists representing all version & changelg to the
+ stop version (or all versions, if 0)
+
+
+ changeLog ==
+ [
+ [ '0.88.1', 'December 27, 2013',
+ [
+ [ 'Auto-run Bitcoind', 'This version will now run ...'],
+ [ 'Mac/OSX Version', 'Binaries now available on ...'],
+ [ 'Signed installers', 'All binaries are now sign ...']
+ ]
+ ]
+ [ '0.87', 'April 18, 2013',
+ [
+ [ 'QR Codes', 'QR codes added everywhere ...'],
+ [ 'Export History', 'Export tx history to CSV ...']
+ ]
+ ]
+ ...
+ ]
+
+ """
+
+
+ #############################################################################
+ def __init__(self, filename='', filetext=''):
+ self.changelog = []
+ if not filename and not filetext:
+ return
+
+ if filename and os.path.exists(filename):
+ f = open(filename, 'r')
+ filetext = f.read()
+ f.close()
+
+ self.parseChangelogText(filetext)
+
+
+
+ #############################################################################
+ def parseChangelogFile(self, filename):
+ if not os.path.exists(filename):
+ LOGERROR('File does not exist: %s', filename)
+
+ f = open(filename,'r')
+ verdata = f.read()
+ f.close()
+
+ return self.parseVersionsText(verdata)
+
+
+
+ #############################################################################
+ def parseChangelogText(self, fileText):
+
+ self.changelog = []
+
+ if fileText is None:
+ return None
+
+
+ try:
+ if SIGNED_BLOCK_HEAD in fileText:
+ fileText = readSigBlock(fileText)[1]
+
+ versionLines = [line.strip() for line in fileText.split('\n')][::-1]
+
+
+ if len(versionLines)==0:
+ return None
+
+ # All lines have been stripped already
+ while len(versionLines) > 0:
+ line = versionLines.pop()
+
+ if line.startswith('#') or len(line)==0:
+ continue
+
+
+ if line.startswith('VERSION') and len(line.split())==2:
+ self.changelog.append([line.split(' ')[-1], '', []])
+ elif line.upper().startswith('RELEASED'):
+ self.changelog[-1][1] = line[8:].strip()
+ elif line.startswith('-'):
+ featureTitle = line[2:]
+ self.changelog[-1][2].append([featureTitle, ''])
+ else:
+ curr = self.changelog[-1][2][-1][-1]
+ self.changelog[-1][2][-1][-1] += ('' if len(curr)==0 else ' ') + line
+
+ return self.getChangelog()
+ except:
+ LOGEXCEPT('Failed to parse changelog')
+ return None
+
+
+ #############################################################################
+ def getChangelog(self, stopAtVersion=0, dontStartBefore=UINT32_MAX):
+ output = []
+ for ver in self.changelog:
+ verInt = getVersionInt(readVersionString(ver[0]))
+
+ if verInt > dontStartBefore:
+ continue
+
+ if verInt <= stopAtVersion:
+ break
+
+ output.append(ver[:])
+
+ return output
+
+
+
+
+
+
+################################################################################
+################################################################################
+class downloadLinkParser(object):
+ """
+ Parse files with the following format:
+
+ -----BEGIN BITCOIN SIGNED MESSAGE-----
+ # Armory for Windows
+ Armory 0.91 Windows XP 32 http://url/armory_0.91_xp32.exe 3afb9881c32
+ Armory 0.91 Windows XP 64 http://url/armory_0.91_xp64.exe 8993ab127cf
+ Armory 0.91 Windows Vista,7,8 32,64 http://url/armory_0.91.exe 7f3b9964aa3
+
+ # Offline Bundles
+ ArmoryOffline 0.88 Ubuntu 10.04 32 http://url/offbundle-32.tar.gz 641382c93b9
+ ArmoryOffline 0.88 Ubuntu 12.10 32 http://url/offbundle-64.tar.gz 5541af39c84
+
+ # Windows 32-bit Satoshi (Bitcoin-Qt/bitcoind)
+ Satoshi 0.9.0 Windows XP,Vista,7,8 32,64 http://btc.org/win0.9.0.exe 118372a9ff3
+ Satoshi 0.9.0 Ubuntu 10.04 http://btc.org/win0.9.0.deb 2aa3f763c3b
+
+ -----BEGIN BITCOIN SIGNATURE-----
+ ac389861cff8a989ae57ae67af43cb3716ca189aa178cff893179531
+ -----END BITCOIN SIGNATURE-----
+
+
+ This will return a heavily-nested dictionary that will be easy to look up
+ after we have reduced the current OS to the right set of keys (we will
+ create a function that takes the output of
+ platform.system(),
+ platform.mac_ver(),
+ platform.linux_distribution(), and
+ platform.win32_ver()
+ and returns a sequence of keys we can use to look up the correct version
+
+ self.downloadMap['Armory']['0.91']['Windows']['Vista']['64'] -->
+ ['http://url/armory_0.91.exe', '7f3b9964aa3']
+
+ Actually use "getDownloadLink
+
+ """
+
+ #############################################################################
+ def __init__(self, filename='', filetext=''):
+ self.downloadMap = {}
+ if not filename and not filetext:
+ return
+
+ if filename and os.path.exists(filename):
+ f = open(filename, 'r')
+ self.parseDownloadList(f.read())
+ f.close()
+ elif filetext:
+ self.parseDownloadList(filetext)
+
+
+
+ #############################################################################
+ def parseDownloadList(self, fileText):
+ self.downloadMap = {}
+
+ if fileText is None:
+ return {}
+
+ def insertLink(mapObj, urlAndHash, keyList):
+ if len(keyList)>1:
+ if not keyList[0] in mapObj:
+ mapObj[keyList[0]] = {}
+ insertLink(mapObj[keyList[0]], urlAndHash, keyList[1:])
+ else:
+ mapObj[keyList[0]] = urlAndHash
+
+
+ try:
+ if SIGNED_BLOCK_HEAD in fileText:
+ fileText = readSigBlock(fileText)[1]
+
+
+ dlLines = [line.strip() for line in fileText.split('\n')][::-1]
+
+ while len(dlLines) > 0:
+
+ line = dlLines.pop()
+
+ if line.startswith('#') or len(line)==0:
+ continue
+
+ lineLists = [pc.split(',') for pc in line.split()[:-2]]
+ urlAndHash = line.split()[-2:]
+
+ APPLIST, VERLIST, OSLIST, SUBOSLIST, BITLIST = range(5)
+
+ for app in lineLists[APPLIST]:
+ for ver in lineLists[VERLIST]:
+ for opsys in lineLists[OSLIST]:
+ for subOS in lineLists[SUBOSLIST]:
+ for nbit in lineLists[BITLIST]:
+ insertLink(self.downloadMap,
+ urlAndHash,
+ [app, ver, opsys, subOS, nbit])
+
+
+ return self.getNestedDownloadMap()
+ except:
+ LOGEXCEPT('Failed to parse downloads')
+ return None
+
+
+ #############################################################################
+ def printDownloadMap(self):
+
+ def recursePrint(theObj, indent=0):
+ if not isinstance(theObj, dict):
+ print ' '*indent + str(theObj)
+ else:
+ for key,val in theObj.iteritems():
+ print ' '*indent + key + ':'
+ recursePrint(theObj[key], indent+5)
+
+ recursePrint(self.downloadMap)
+
+ #############################################################################
+ def getDownloadLink(self, *keyList):
+
+ def recurseGet(theMap, keyList):
+ if len(keyList)==0:
+ return None
+
+ if not isinstance(theMap, dict):
+ return None
+
+ if len(keyList)>1:
+ if not keyList[0] in theMap:
+ return None
+ return recurseGet(theMap[keyList[0]], keyList[1:])
+ else:
+ return theMap[keyList[0]]
+
+ if len(keyList)==0:
+ return None
+
+ return recurseGet(self.downloadMap, keyList)
+
+
+ #############################################################################
+ def getNestedDownloadMap(self):
+ return deepcopy(self.downloadMap)
+
+
+
+################################################################################
+################################################################################
+class notificationParser(object):
+ """
+ # PRIORITY VALUES:
+ # Test announce: 1024
+ # General Announcment: 2048
+ # Important non-critical: 3072
+ # Critical/security sens: 4096
+ #
+ # Unique ID must be first, and signals that this is a new notification
+
+ UNIQUEID: 873fbc11
+ VERSION: 0
+ STARTTIME: 0
+ EXPIRES: 1500111222
+ CANCELID: []
+ MINVERSION: 0.87.2
+ MAXVERSION: 0.88.1
+ PRIORITY: 4096
+ NOTIFYSEND: False
+ NOTIFYRECV: True
+ SHORTDESCR: Until further notice, require 30 confirmations for incoming transactions.
+ LONGDESCR:
+ THIS IS A FAKE ALERT FOR TESTING PURPOSES:
+
+ There is some turbulence on the network that may result in some transactions
+ being accidentally reversed up to 30 confirmations. A clever attacker may
+ be able to exploit this to scam you. For incoming transactions from
+ parties with no existing trust, please wait at least 30 confirmations before
+ considering the coins to be yours.
+ *****
+ """
+
+ #############################################################################
+ def __init__(self, filename='', filetext=''):
+ self.notifications = {}
+ if not filename and not filetext:
+ return
+
+ if filename and os.path.exists(filename):
+ f = open(filename, 'r')
+ filetext = f.read()
+ f.close()
+
+ self.parseNotificationText(filetext)
+
+
+
+
+ #############################################################################
+ def parseNotificationText(self, fileText):
+ self.notifications = {}
+
+ if fileText is None:
+ return None
+
+
+ try:
+ if SIGNED_BLOCK_HEAD in fileText:
+ fileText = readSigBlock(fileText)[1]
+
+ notifyLines = [line.strip() for line in fileText.split('\n')][::-1]
+
+
+ currID = ''
+ readLongDescr = False
+ longDescrAccum = ''
+
+ while len(notifyLines) > 0:
+
+ line = notifyLines.pop()
+
+ if not readLongDescr and (line.startswith('#') or len(line)==0):
+ continue
+
+ if line.upper().startswith('UNIQUEID'):
+ currID = line.split(':')[-1].strip()
+ self.notifications[currID] = {}
+ elif line.upper().startswith('LONGDESCR'):
+ readLongDescr = True
+ elif line.startswith("*****"):
+ readLongDescr = False
+ self.notifications[currID]['LONGDESCR'] = longDescrAccum
+ longDescrAccum = ''
+ elif readLongDescr:
+ if len(line.strip())==0:
+ longDescrAccum += '
'
+ else:
+ longDescrAccum += line.strip() + ' '
+ else:
+ key = line.split(':')[ 0].strip().upper()
+ val = line.split(':')[-1].strip()
+ self.notifications[currID][key] = val
+
+ return self.getNotificationMap()
+ except:
+ LOGEXCEPT('Failed to parse notifications')
+ return None
+
+
+ #############################################################################
+ def getNotificationMap(self):
+ return deepcopy(self.notifications)
+
+
+
+# kate: indent-width 3; replace-tabs on;
diff --git a/armoryengine/torrentDL.py b/armoryengine/torrentDL.py
new file mode 100644
index 000000000..2093f0c10
--- /dev/null
+++ b/armoryengine/torrentDL.py
@@ -0,0 +1,527 @@
+import sys
+import os
+
+sys.path.append('..')
+
+from ArmoryUtils import ARMORY_HOME_DIR, BTC_HOME_DIR, LOGEXCEPT, \
+ LOGERROR, LOGWARN, LOGINFO, MEGABYTE, \
+ AllowAsync, RightNow, unixTimeToFormatStr, \
+ secondsToHumanTime, MAGIC_BYTES,\
+ bytesToHumanSize, secondsToHumanTime
+from BitTornado.download_bt1 import BT1Download, defaults, get_response
+from BitTornado.RawServer import RawServer, UPnP_ERROR
+from random import seed
+from socket import error as socketerror
+from BitTornado.bencode import bencode
+from BitTornado.natpunch import UPnP_test
+from threading import Event
+from os.path import abspath
+from sys import argv, stdout
+import sys
+import shutil
+from sha import sha
+from time import strftime, sleep
+import types
+from BitTornado.clock import clock
+from BitTornado import createPeerID, version
+from BitTornado.ConfigDir import ConfigDir
+from BitTornado.download_bt1 import defaults, download
+from BitTornado.ConfigDir import ConfigDir
+
+
+# Totally should've used a decorator for the custom funcs...
+
+
+class TorrentDownloadManager(object):
+
+ #############################################################################
+ def __init__(self, torrentFile=None, savePath=None, doDisable=False):
+ self.torrent = torrentFile
+ self.torrentDNE = False
+ self.cacheDir = os.path.join(ARMORY_HOME_DIR, 'bittorrentcache')
+ self.doneObj = Event()
+ self.customCallbacks = {}
+ self.minSecondsBetweenUpdates = 1
+ self.lastUpdate = 0
+ self.disabled = doDisable
+ self.satoshiDir = BTC_HOME_DIR
+
+ # These need to exist even if setup hasn't been called
+ self.lastStats = {}
+ self.startTime = None
+ self.finishTime = None
+ self.dlFailed = False
+ self.bt1dow = None
+ self.response = None
+ self.torrentSize = None
+ self.torrentName = None
+ self.savePath = None
+ self.savePath_temp = None
+
+
+
+
+ #############################################################################
+ def setupTorrent(self, torrentFile, savePath=None):
+
+ # Some things to reset on every setup operation
+ self.lastStats = {}
+ self.startTime = None
+ self.finishTime = None
+ self.dlFailed = False
+ self.bt1dow = None
+ self.response = None
+ self.torrentSize = None
+ self.torrentName = None
+ self.savePath = None
+ self.savePath_temp = None
+
+ # Set torrent file, bail if it doesn't exist
+ self.torrent = torrentFile
+ self.torrentDNE = False
+
+ if not self.torrent or not os.path.exists(self.torrent):
+ LOGERROR('Attempted to setup TDM with non-existent torrent:')
+ if self.torrent:
+ LOGERROR('Torrent path: %s', self.torrent)
+ self.torrentDNE = True
+ return
+
+ self.lastUpdate = RightNow()
+
+ # Get some info about the torrent
+ if not self.torrentDNE:
+ self.response = get_response(self.torrent, '', self.errorFunc)
+ self.torrentSize = self.response['info']['length']
+ self.torrentName = self.response['info']['name']
+ LOGINFO('Torrent name is: %s' % self.torrentName)
+ LOGINFO('Torrent size is: %0.2f MB' % (self.torrentSize/float(MEGABYTE)))
+
+
+ self.savePath = savePath
+ if self.savePath is None:
+ self.savePath = os.path.join(BTC_HOME_DIR, self.torrentName)
+ self.savePath_temp = self.savePath + '.partial'
+
+ #############################################################################
+ def setSatoshiDir(self, btcDir):
+ self.satoshiDir = btcDir
+
+ #############################################################################
+ def isInitialized(self):
+ return (self.torrent is not None)
+
+ #############################################################################
+ def torrentIsMissing(self):
+ return self.torrentDNE
+
+ #############################################################################
+ def fileProgress(self):
+ """
+ Either the mainsize is the same as the torrent (because it finished and
+ was renamed, or the .partial file is the current state of the DL, and
+ we report its size
+ """
+
+ mainsize = 0
+ if os.path.exists(self.savePath):
+ mainsize = os.path.getsize(self.savePath)
+
+ tempsize = 0
+ if os.path.exists(self.savePath_temp):
+ tempsize = os.path.getsize(self.savePath_temp)
+
+
+ if tempsize > 0:
+ return (tempsize, self.torrentSize)
+ elif mainsize > 0:
+ if not mainsize == self.torrentSize:
+ LOGERROR('Torrent %s is not the correct size...?', self.torrentName)
+ return (0,0)
+ else:
+ return (mainsize, mainsize)
+
+ return (0, self.torrentSize)
+
+
+
+ #############################################################################
+ def hasCustomFunc(self, funcName):
+ if not funcName in self.customCallbacks:
+ return False
+
+ return isinstance(self.customCallbacks[funcName], types.FunctionType)
+
+
+ #############################################################################
+ def setCallback(self, name, func):
+ if func is None:
+ if name in self.customCallbacks:
+ del self.customCallbacks[name]
+ return
+
+ self.customCallbacks[name] = func
+
+ #############################################################################
+ def setSecondsBetweenUpdates(self, newSec):
+ self.minSecondsBetweenUpdates = newSec
+
+ #############################################################################
+ def isDone(self):
+ return self.doneObj.isSet()
+
+
+ #############################################################################
+ def displayFunc(self, dpflag=Event(),
+ fractionDone=None,
+ timeEst=None,
+ downRate=None,
+ upRate=None,
+ activity=None,
+ statistics=None,
+ **kws):
+
+ # Use caller-set function if it exists
+ if self.hasCustomFunc('displayFunc'):
+ self.customCallbacks['displayFunc'](dpflag, fractionDone, timeEst, \
+ downRate, upRate, activity, \
+ statistics, **kws)
+ return
+
+
+ pr = ''
+ pr += ('Done: %0.1f%%' % (fractionDone*100)) if fractionDone else ''
+ pr += (' (%0.1f kB/s' % (downRate/1024.)) if downRate else ' ('
+ pr += (' from %d seeds' % statistics.numSeeds) if statistics else ''
+ pr += (' and %d peers' % statistics.numPeers) if statistics else ''
+ if timeEst:
+ pr += '; Approx %s remaining' % secondsToHumanTime(timeEst)
+ pr += ')'
+ LOGINFO(pr)
+
+
+
+ #############################################################################
+ def statusFunc(self, dpflag=Event(),
+ fractionDone=None,
+ timeEst=None,
+ downRate=None,
+ upRate=None,
+ activity=None,
+ statistics=None,
+ **kws):
+
+ # Want to be able to query a few things between status calls
+ self.lastStats['fracDone'] = fractionDone
+ self.lastStats['timeEst'] = timeEst
+ self.lastStats['downRate'] = downRate
+ self.lastStats['upRate'] = upRate
+ self.lastStats['activity'] = activity
+ self.lastStats['numSeeds'] = statistics.numSeeds if statistics else None
+ self.lastStats['numPeers'] = statistics.numPeers if statistics else None
+ self.lastStats['downTotal']= statistics.downTotal if statistics else None
+ self.lastStats['upTotal'] = statistics.upTotal if statistics else None
+
+ try:
+ if (RightNow() - self.lastUpdate) < self.minSecondsBetweenUpdates:
+ return
+
+ self.lastUpdate = RightNow()
+
+ self.displayFunc(dpflag, fractionDone, timeEst, downRate, upRate,
+ activity, statistics, **kws)
+
+ finally:
+ # Set this flag to let the caller know it's ready for the next update
+ dpflag.set()
+
+
+ #############################################################################
+ def getLastStats(self, name):
+ return self.lastStats.get(name)
+
+ #############################################################################
+ def isStarted(self):
+ return (self.startTime is not None)
+
+ #############################################################################
+ def isFailed(self):
+ return self.dlFailed
+
+ #############################################################################
+ def isFinished(self):
+ return (self.finishTime is not None) or self.dlFailed
+
+ #############################################################################
+ def isRunning(self):
+ return self.isStarted() and not self.isFinished()
+
+ #############################################################################
+ def finishedFunc(self):
+ """
+ This function must rename the ".partial" function to the correct name
+ """
+ self.finishTime = RightNow()
+ LOGINFO('Download finished!')
+
+ LOGINFO("Moving file")
+ LOGINFO(" From: %s", self.savePath_temp)
+ LOGINFO(" To: %s", self.savePath)
+ shutil.move(self.savePath_temp, self.savePath)
+
+ # Use caller-set function if it exists
+ if self.hasCustomFunc('finishedFunc'):
+ self.customCallbacks['finishedFunc']()
+
+ if self.bt1dow:
+ self.bt1dow.shutdown()
+
+
+
+ #############################################################################
+ def failedFunc(self, msg=''):
+ self.dlFailed = True
+ LOGEXCEPT('Download failed! %s', msg)
+
+ # Use caller-set function if it exists
+ if self.hasCustomFunc('failedFunc'):
+ self.customCallbacks['failedFunc'](msg)
+ return
+
+ if self.bt1dow:
+ self.bt1dow.shutdown()
+
+
+
+ #############################################################################
+ def errorFunc(self, errMsg):
+ # Use caller-set function if it exists
+ if self.hasCustomFunc('errorFunc'):
+ self.customCallbacks['errorFunc'](errMsg)
+ return
+
+ LOGEXCEPT(errMsg)
+
+ #############################################################################
+ def excFunc(self, errMsg):
+ # Use caller-set function if it exists
+ if self.hasCustomFunc('excFunc'):
+ self.customCallbacks['excFunc'](errMsg)
+ return
+
+ LOGEXCEPT(errMsg)
+
+ #############################################################################
+ def chooseFileFunc(self, default, fsize, saveas, thedir):
+ # Use caller-set function if it exists
+ if self.hasCustomFunc('chooseFileFunc'):
+ self.customCallbacks['chooseFileFunc'](default, fsize, saveas, thedir)
+ return
+
+ return (default if saveas is None else saveas)
+
+
+ #############################################################################
+ def getTDMState(self):
+ if self.disabled:
+ return 'Disabled'
+
+ if not self.isInitialized():
+ return 'Uninitialized'
+
+ if self.torrentDNE:
+ return 'TorrentDNE'
+
+ if not self.isStarted():
+ return 'ReadyToStart'
+
+ if self.dlFailed:
+ return 'DownloadFailed'
+
+ if self.isFinished():
+ return 'DownloadFinished'
+
+ return 'Downloading'
+
+ #############################################################################
+ def startDownload(self):
+ return self.doTheDownloadThing(async=True)
+
+ #############################################################################
+ @AllowAsync
+ def doTheDownloadThing(self):
+ """
+ This was copied and modified directly from btdownloadheadless.py
+ """
+
+ if self.disabled:
+ LOGERROR('Attempted to start DL but DISABLE_TORRENT is True')
+ return
+
+ while 1:
+
+ # Use this var to identify if we've started downloading
+ self.startTime = RightNow()
+
+
+ configdir = ConfigDir(self.cacheDir)
+ defaultsToIgnore = ['responsefile', 'url', 'priority']
+ configdir.setDefaults(defaults, defaultsToIgnore)
+ config = configdir.loadConfig()
+ config['responsefile'] = self.torrent
+ config['url'] = ''
+ config['priority'] = ''
+ config['saveas'] = self.savePath_temp
+ config['save_options'] = 0
+ config['max_uploads'] = 0
+ config['max_files_open'] = 25
+
+ configdir.deleteOldCacheData(config['expire_cache_data'])
+
+ myid = createPeerID()
+ seed(myid)
+
+ rawserver = RawServer( self.doneObj,
+ config['timeout_check_interval'],
+ config['timeout'],
+ ipv6_enable = config['ipv6_enabled'],
+ failfunc = self.failedFunc,
+ errorfunc = self.errorFunc)
+
+ upnp_type = UPnP_test(config['upnp_nat_access'])
+
+ while True:
+ try:
+ listen_port = rawserver.find_and_bind( \
+ config['minport'],
+ config['maxport'],
+ config['bind'],
+ ipv6_socket_style = config['ipv6_binds_v4'],
+ upnp = upnp_type,
+ randomizer = config['random_port'])
+ break
+ except socketerror, e:
+ if upnp_type and e == UPnP_ERROR:
+ LOGWARN('WARNING: COULD NOT FORWARD VIA UPnP')
+ upnp_type = 0
+ continue
+ LOGERROR("error: Couldn't listen - " + str(e))
+ self.failedFunc()
+ return
+
+ if not self.response:
+ break
+
+ infohash = sha(bencode(self.response['info'])).digest()
+
+ LOGINFO('Downloading: %s', self.torrentName)
+ curr,tot = [float(a)/MEGABYTE for a in self.fileProgress()]
+ if curr == 0:
+ LOGINFO('Starting new download')
+ elif curr==tot:
+ LOGINFO('Torrent already finished!')
+ return
+ else:
+ LOGINFO('Picking up where left off at %0.0f of %0.0f MB' % (curr,tot))
+
+ self.bt1dow = BT1Download( self.statusFunc,
+ self.finishedFunc,
+ self.errorFunc,
+ self.excFunc,
+ self.doneObj,
+ config,
+ self.response,
+ infohash,
+ myid,
+ rawserver,
+ listen_port,
+ configdir)
+
+ if not self.bt1dow.saveAs(self.chooseFileFunc):
+ break
+
+ if not self.bt1dow.initFiles(old_style = True):
+ break
+
+ if not self.bt1dow.startEngine():
+ self.bt1dow.shutdown()
+ break
+
+
+ self.bt1dow.startRerequester()
+ self.bt1dow.autoStats()
+
+ if not self.bt1dow.am_I_finished():
+ self.statusFunc(activity = 'Connecting to peers')
+
+ rawserver.listen_forever(self.bt1dow.getPortHandler())
+ self.statusFunc(activity = 'Shutting down')
+ self.bt1dow.shutdown()
+ break
+
+ try:
+ rawserver.shutdown()
+ except:
+ pass
+
+ if not self.isDone():
+ self.failedFunc()
+
+
+# Run this file to test with your target torrent. Also shows an example
+# of overriding methods with other custom methods. Just about
+# any of the methods of TorrentDownloadManager can be replaced like this
+if __name__=="__main__":
+ tdm = TorrentDownloadManager()
+ tdm.setupTorrent(argv[1], argv[2])
+
+ # Replace full-featured LOGINFOs with simple print message
+ def simplePrint( dpflag=Event(),
+ fractionDone=None,
+ timeEst=None,
+ downRate=None,
+ upRate=None,
+ activity=None,
+ statistics=None,
+ **kws):
+
+ if fractionDone:
+ print 'TorrentThread: %0.1f%% done;' % (fractionDone*100),
+
+ if timeEst:
+ print ', about %s remaining' % secondsToHumanTime(timeEst),
+
+ if activity:
+ print ' (%s)'%activity
+ else:
+ print ''
+
+ sys.stdout.flush()
+
+ # Finish funct will still move file.partial to file, this is everything else
+ def notifyFinished():
+ print 'TorrentThread: Finished downloading at %s' % unixTimeToFormatStr(RightNow())
+ sys.stdout.flush()
+
+
+ tdm.setCallback('displayFunc', simplePrint)
+ tdm.setCallback('finishedFunc', notifyFinished)
+ tdm.setSecondsBetweenUpdates(1)
+
+ thr = tdm.startDownload(async=True)
+
+ # The above call was asynchronous
+ while not thr.isFinished():
+ print 'MainThread: Still downloading;',
+ if tdm.getLastStats('downRate'):
+ print ' Last dl speed: %0.1f kB/s' % (tdm.getLastStats('downRate')/1024.)
+ else:
+ print ''
+ sys.stdout.flush()
+ sleep(10)
+
+
+ print 'Finished downloading! Exiting...'
+
+
+
diff --git a/armorymodels.py b/armorymodels.py
index 9e2cca8d1..df82a7d97 100644
--- a/armorymodels.py
+++ b/armorymodels.py
@@ -1,23 +1,26 @@
################################################################################
# #
-# Copyright (C) 2011-2013, Armory Technologies, Inc. #
+# Copyright (C) 2011-2014, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
################################################################################
+from os import path
import os
import platform
import sys
-from os import path
+
from PyQt4.QtCore import *
from PyQt4.QtGui import *
-sys.path.append('..')
-sys.path.append('../cppForSwig')
-from armoryengine import *
+
from CppBlockUtils import *
+from armoryengine.ALL import *
from qtdefines import *
-from armorycolors import Colors, htmlColor
+
+
+sys.path.append('..')
+sys.path.append('../cppForSwig')
@@ -178,7 +181,7 @@ def data(self, index, role=Qt.DisplayRole):
#if self.index(index.row(),COL.DoubleSpend).data().toBool():
if rowData[COL.DoubleSpend]:
return QVariant(Colors.TextRed)
- if nConf <= 2:
+ if nConf < 2:
return QVariant(Colors.TextNoConfirm)
elif nConf <= 4:
return QVariant(Colors.TextSomeConfirm)
@@ -420,6 +423,7 @@ def filterAddrList(self):
self.addr160List = [a.getAddr160() for a in addrList]
+ @TimeThisFunction
def reset(self):
self.filterAddrList()
super(WalletAddrDispModel, self).reset()
@@ -592,18 +596,19 @@ def __init__(self, pytx, txinListFromBDM=None, main=None):
scrType = getTxInScriptType(txin)
if txinListFromBDM and len(txinListFromBDM[i][0])>0:
# We had a BDM to help us get info on each input -- use it
- recip160,val,blk,hsh,idx = txinListFromBDM[i]
+ scrAddr,val,blk,hsh,idx = txinListFromBDM[i]
+ addrStr = scrAddr_to_addrStr(scrAddr)
if main:
- wltID = self.main.getWalletForAddr160(recip160)
+ wltID = self.main.getWalletForAddr160(scrAddr[1:])
dispcoin = '' if not val else coin2str(val,maxZeros=1)
self.dispTable[-1].append(wltID)
- self.dispTable[-1].append(hash160_to_addrStr(recip160))
+ self.dispTable[-1].append(addrStr)
self.dispTable[-1].append(dispcoin)
self.dispTable[-1].append(binary_to_hex(hsh))
self.dispTable[-1].append(idx)
self.dispTable[-1].append(blk)
if pytxdp==None:
- self.dispTable[-1].append(TXIN_TYPE_NAMES[scrType])
+ self.dispTable[-1].append(CPP_TXIN_SCRIPT_NAMES[scrType])
else:
# TODO: Assume NO multi-sig... will be updated in future to use
# PyTxDP::isSigValidForInput which will handle all cases
@@ -615,17 +620,19 @@ def __init__(self, pytx, txinListFromBDM=None, main=None):
# We don't have any info from the BDM, display whatever we can
# (which usually isn't much)
recipAddr = ''
- if scrType in (TXIN_SCRIPT_STANDARD,):
- recipAddr = TxInScriptExtractAddr160IfAvail(txin)
- if main:
- wltID = self.main.getWalletForAddr160(recip)
+ recipAddr = TxInExtractAddrStrIfAvail(txin)
+ atype, a160 = '',''
+ if len(recipAddr) > 0:
+ atype, a160 = addrStr_to_hash160(recipAddr)
+ wltID = self.main.getWalletForAddr160(a160)
+
self.dispTable[-1].append(wltID)
- self.dispTable[-1].append(recipAddr)
+ self.dispTable[-1].append(a160)
self.dispTable[-1].append('')
self.dispTable[-1].append(binary_to_hex(txin.outpoint.txHash))
self.dispTable[-1].append(str(txin.outpoint.txOutIndex))
self.dispTable[-1].append('')
- self.dispTable[-1].append(TXIN_TYPE_NAMES[scrType])
+ self.dispTable[-1].append(CPP_TXIN_SCRIPT_NAMES[scrType])
self.dispTable[-1].append(int_to_hex(txin.intSeq, widthBytes=4))
self.dispTable[-1].append(binary_to_hex(txin.binScript))
@@ -702,9 +709,9 @@ def __init__(self, pytx, main=None, idxGray=[]):
self.main = main
self.txOutList = []
self.wltIDList = []
- self.idxGray = idxGray
+ self.idxGray = idxGray[:]
for i,txout in enumerate(self.tx.outputs):
- recip160 = TxOutScriptExtractAddr160(txout.binScript)
+ recip160 = script_to_scrAddr(txout.binScript)[1:]
self.txOutList.append(txout)
if main:
self.wltIDList.append(main.getWalletForAddr160(recip160))
@@ -722,31 +729,24 @@ def data(self, index, role=Qt.DisplayRole):
COLS = TXOUTCOLS
row,col = index.row(), index.column()
txout = self.txOutList[row]
- stype = getTxOutScriptType(txout.binScript)
- stypeStr = TXOUT_TYPE_NAMES[stype]
+ stype = BtcUtils().getTxOutScriptTypeInt(txout.binScript)
+ stypeStr = CPP_TXOUT_SCRIPT_NAMES[stype]
wltID = self.wltIDList[row]
- if stype==TXOUT_SCRIPT_MULTISIG:
- mstype = getTxOutMultiSigInfo(txout.binScript)[0]
- stypeStr = 'Multi-Signature (%d-of-%d)' % mstype
+ if stype==CPP_TXOUT_MULTISIG:
+ M,N = getMultisigScriptInfo(txout.binScript)[:2]
+ stypeStr = 'MultiSig(%d-of-%d)' % (M,N)
if role==Qt.DisplayRole:
if col==COLS.WltID: return QVariant(wltID)
if col==COLS.ScrType: return QVariant(stypeStr)
if col==COLS.Script: return QVariant(binary_to_hex(txout.binScript))
- if stype==TXOUT_SCRIPT_STANDARD:
- if col==COLS.Recip: return QVariant(TxOutScriptExtractAddrStr(txout.binScript))
- if col==COLS.Btc: return QVariant(coin2str(txout.getValue(),maxZeros=2))
- if stype==TXOUT_SCRIPT_COINBASE:
- if col==COLS.Recip: return QVariant(TxOutScriptExtractAddrStr(txout.binScript))
- if col==COLS.Btc: return QVariant(coin2str(txout.getValue(),maxZeros=2))
- if stype==TXOUT_SCRIPT_MULTISIG:
- if col==COLS.Recip: return QVariant('[[Multiple]]')
- if col==COLS.Btc: return QVariant(coin2str(txout.getValue(),maxZeros=2))
- if stype==TXOUT_SCRIPT_UNKNOWN:
- if col==COLS.Recip: return QVariant('[[Non-Standard]]')
- if col==COLS.Btc: return QVariant(coin2str(txout.getValue(),maxZeros=2))
- if stype==TXOUT_SCRIPT_OP_EVAL:
- if col==COLS.Recip: return QVariant('[[OP-EVAL]]')
- if col==COLS.Btc: return QVariant(coin2str(txout.getValue(),maxZeros=2))
+ if col==COLS.Btc: return QVariant(coin2str(txout.getValue(),maxZeros=2))
+ if col==COLS.Recip:
+ if stype in CPP_TXOUT_HAS_ADDRSTR:
+ return QVariant(script_to_addrStr(txout.binScript))
+ elif stype==CPP_TXOUT_MULTISIG:
+ return QVariant('[[Multiple]]')
+ elif stype==CPP_TXOUT_NONSTANDARD:
+ return QVariant('[[Non-Standard]]')
elif role==Qt.TextAlignmentRole:
if col==COLS.Recip: return QVariant(int(Qt.AlignLeft | Qt.AlignVCenter))
if col==COLS.Btc: return QVariant(int(Qt.AlignRight | Qt.AlignVCenter))
@@ -806,7 +806,12 @@ def __init__(self, wltID, main):
# the python code... :(
for abe in TheBDM.getAddressBook(self.wlt.cppWallet):
- addr160 = CheckHash160(abe.getScrAddr())
+ scrAddr = abe.getScrAddr()
+ try:
+ addr160 = addrStr_to_hash160(scrAddr_to_addrStr(scrAddr))[1]
+ except Exception as e:
+ LOGERROR(str(e))
+ addr160 = ''
# Only grab addresses that are not in any of your Armory wallets
if not self.main.getWalletForAddr160(addr160):
@@ -815,9 +820,8 @@ def __init__(self, wltID, main):
txhashlist = []
for i in range(ntx):
txhashlist.append( abeList[i].getTxHash() )
- self.addrBook.append( [ addr160, txhashlist] )
+ self.addrBook.append( [scrAddr, txhashlist] )
- print 'Done collecting addresses for addrbook'
def rowCount(self, index=QModelIndex()):
return len(self.addrBook)
@@ -828,8 +832,13 @@ def columnCount(self, index=QModelIndex()):
def data(self, index, role=Qt.DisplayRole):
COL = ADDRBOOKCOLS
row,col = index.row(), index.column()
- addr160 = self.addrBook[row][0]
- addrB58 = hash160_to_addrStr(addr160)
+ scrAddr = self.addrBook[row][0]
+ if scrAddr[0] in [SCRADDR_P2PKH_BYTE, SCRADDR_P2SH_BYTE]:
+ addrB58 = scrAddr_to_addrStr(scrAddr)
+ addr160 = scrAddr[1:]
+ else:
+ addrB58 = ''
+ addr160 = ''
wltID = self.main.getWalletForAddr160(addr160)
txList = self.addrBook[row][1]
numSent = len(txList)
diff --git a/build_installer.bat b/build_installer.bat
index d983c0089..e00b7b91e 100644
--- a/build_installer.bat
+++ b/build_installer.bat
@@ -1,9 +1,9 @@
REM This should only be run from cppForSwig\BitcoinArmory_SwigDLL directory
-copy ..\libs\Win32\BitcoinArmory_SwigDLL.dll ..\..\_CppBlockUtils.pyd
+copy ..\libs\Win32\BitcoinArmory_SwigDLL.dll ..\..\_CppBlockUtils.pyd
C:\Python27\Lib\site-packages\PyQt4\pyrcc4.exe -o ..\..\qrc_img_resources.py ..\..\imgList.xml
python ..\..\setup.py py2exe --includes sip,hashlib,json,twisted -d ..\..\ArmoryStandalone
copy ..\..\img\*.ico ..\..\ArmoryStandalone
copy ..\..\img\armory_logo*.png ..\..\ArmoryStandalone
-rtc /F:..\..\edit_icons.rts
+copy ..\..\default_bootstrap.torrent ..\..\ArmoryStandalone
python ..\..\writeNSISCompilerArgs.py
makensis.exe ..\..\ArmorySetup.nsi
diff --git a/cppForSwig/BinaryData.cpp b/cppForSwig/BinaryData.cpp
index 00feb2c26..d62ec04e5 100644
--- a/cppForSwig/BinaryData.cpp
+++ b/cppForSwig/BinaryData.cpp
@@ -1,6 +1,6 @@
////////////////////////////////////////////////////////////////////////////////
// //
-// Copyright(C) 2011-2013, Armory Technologies, Inc. //
+// Copyright (C) 2011-2014, Armory Technologies, Inc. //
// Distributed under the GNU Affero General Public License (AGPL v3) //
// See LICENSE or http://www.gnu.org/licenses/agpl.html //
// //
@@ -203,7 +203,7 @@ uint64_t BinaryReader::get_var_int(uint8_t* nRead)
uint64_t BinaryRefReader::get_var_int(uint8_t* nRead)
{
uint32_t nBytes;
- uint64_t varInt = BtcUtils::readVarInt( bdRef_.getPtr() + pos_, &nBytes);
+ uint64_t varInt = BtcUtils::readVarInt( bdRef_.getPtr() + pos_, getSizeRemaining(), &nBytes);
if(nRead != NULL)
*nRead = nBytes;
pos_ += nBytes;
diff --git a/cppForSwig/BinaryData.h b/cppForSwig/BinaryData.h
index 2d3d45bf8..73f92e970 100644
--- a/cppForSwig/BinaryData.h
+++ b/cppForSwig/BinaryData.h
@@ -1,6 +1,6 @@
////////////////////////////////////////////////////////////////////////////////
// //
-// Copyright(C) 2011-2013, Armory Technologies, Inc. //
+// Copyright (C) 2011-2014, Armory Technologies, Inc. //
// Distributed under the GNU Affero General Public License (AGPL v3) //
// See LICENSE or http://www.gnu.org/licenses/agpl.html //
// //
@@ -10,7 +10,9 @@
#include
#if defined(_MSC_VER) || defined(__MINGW32__)
-
+ #if _MSC_PLATFORM_TOOLSET!=110
+ #include
+ #endif
#else
#include
#include
@@ -528,7 +530,7 @@ class BinaryData
// Absorb a binary file's data into a new BinaryData object
int32_t readBinaryFile(string filename)
{
- ifstream is(filename.c_str(), ios::in | ios::binary );
+ ifstream is(OS_TranslatePath(filename.c_str()), ios::in | ios::binary );
if( !is.is_open() )
return -1;
@@ -1546,7 +1548,7 @@ class BinaryStreamBuffer
streamPtr_ = new ifstream;
weOwnTheStream_ = true;
ifstream* ifstreamPtr = static_cast(streamPtr_);
- ifstreamPtr->open(filename.c_str(), ios::in | ios::binary);
+ ifstreamPtr->open(OS_TranslatePath(filename.c_str()), ios::in | ios::binary);
if( !ifstreamPtr->is_open() )
{
cerr << "Could not open file for reading! File: " << filename.c_str() << endl;
diff --git a/cppForSwig/BitcoinArmory.sln b/cppForSwig/BitcoinArmory.sln
index ef344dbde..ed287e0b9 100644
--- a/cppForSwig/BitcoinArmory.sln
+++ b/cppForSwig/BitcoinArmory.sln
@@ -5,20 +5,9 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "guardian", "guardian\guardi
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "cryptopp", "cryptopp\cryptopp.vcxproj", "{B1055DA3-83CE-47BF-98E6-2850E3411D39}"
EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "snappy", "leveldbwin\build\msvc10\snappy\snappy.vcxproj", "{72639F93-D2E6-4220-AA46-E24C502E470C}"
-EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "BitcoinArmory_SwigDLL", "BitcoinArmory_SwigDLL\BitcoinArmory_SwigDLL.vcxproj", "{19329A6B-FE96-4917-B69A-2B0375C12017}"
- ProjectSection(ProjectDependencies) = postProject
- {D35F732D-55D7-4037-9C6D-E141F740F802} = {D35F732D-55D7-4037-9C6D-E141F740F802}
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8} = {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}
- {B1055DA3-83CE-47BF-98E6-2850E3411D39} = {B1055DA3-83CE-47BF-98E6-2850E3411D39}
- EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "BitcoinArmory_CppTests", "BitcoinArmory_CppTests\BitcoinArmory_CppTests.vcxproj", "{D9733F9E-BE30-466F-B100-B686DF663C4D}"
- ProjectSection(ProjectDependencies) = postProject
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8} = {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}
- {B1055DA3-83CE-47BF-98E6-2850E3411D39} = {B1055DA3-83CE-47BF-98E6-2850E3411D39}
- EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "leveldb_msvc11_port", "leveldb_windows_port\leveldb_msvc11_port.vcxproj", "{01CD1176-66F7-44AB-9E7B-8AEECC9915E8}"
EndProject
@@ -26,107 +15,66 @@ Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
Debug|x64 = Debug|x64
- DebugDll|Win32 = DebugDll|Win32
- DebugDll|x64 = DebugDll|x64
Release|Win32 = Release|Win32
Release|x64 = Release|x64
- ReleaseDll|Win32 = ReleaseDll|Win32
- ReleaseDll|x64 = ReleaseDll|x64
+ WinXP_32|Win32 = WinXP_32|Win32
+ WinXP_32|x64 = WinXP_32|x64
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{D35F732D-55D7-4037-9C6D-E141F740F802}.Debug|Win32.ActiveCfg = Release|Win32
{D35F732D-55D7-4037-9C6D-E141F740F802}.Debug|Win32.Build.0 = Release|Win32
{D35F732D-55D7-4037-9C6D-E141F740F802}.Debug|x64.ActiveCfg = Release|x64
{D35F732D-55D7-4037-9C6D-E141F740F802}.Debug|x64.Build.0 = Release|x64
- {D35F732D-55D7-4037-9C6D-E141F740F802}.DebugDll|Win32.ActiveCfg = Release|Win32
- {D35F732D-55D7-4037-9C6D-E141F740F802}.DebugDll|Win32.Build.0 = Release|Win32
- {D35F732D-55D7-4037-9C6D-E141F740F802}.DebugDll|x64.ActiveCfg = Release|x64
- {D35F732D-55D7-4037-9C6D-E141F740F802}.DebugDll|x64.Build.0 = Release|x64
{D35F732D-55D7-4037-9C6D-E141F740F802}.Release|Win32.ActiveCfg = Release|Win32
- {D35F732D-55D7-4037-9C6D-E141F740F802}.Release|Win32.Build.0 = Release|Win32
{D35F732D-55D7-4037-9C6D-E141F740F802}.Release|x64.ActiveCfg = Release|x64
{D35F732D-55D7-4037-9C6D-E141F740F802}.Release|x64.Build.0 = Release|x64
- {D35F732D-55D7-4037-9C6D-E141F740F802}.ReleaseDll|Win32.ActiveCfg = Release|Win32
- {D35F732D-55D7-4037-9C6D-E141F740F802}.ReleaseDll|Win32.Build.0 = Release|Win32
- {D35F732D-55D7-4037-9C6D-E141F740F802}.ReleaseDll|x64.ActiveCfg = Release|x64
- {D35F732D-55D7-4037-9C6D-E141F740F802}.ReleaseDll|x64.Build.0 = Release|x64
+ {D35F732D-55D7-4037-9C6D-E141F740F802}.WinXP_32|Win32.ActiveCfg = WinXP_32|Win32
+ {D35F732D-55D7-4037-9C6D-E141F740F802}.WinXP_32|Win32.Build.0 = WinXP_32|Win32
+ {D35F732D-55D7-4037-9C6D-E141F740F802}.WinXP_32|x64.ActiveCfg = Release|x64
+ {D35F732D-55D7-4037-9C6D-E141F740F802}.WinXP_32|x64.Build.0 = Release|x64
{B1055DA3-83CE-47BF-98E6-2850E3411D39}.Debug|Win32.ActiveCfg = Debug|Win32
{B1055DA3-83CE-47BF-98E6-2850E3411D39}.Debug|Win32.Build.0 = Debug|Win32
{B1055DA3-83CE-47BF-98E6-2850E3411D39}.Debug|x64.ActiveCfg = Debug|x64
{B1055DA3-83CE-47BF-98E6-2850E3411D39}.Debug|x64.Build.0 = Debug|x64
- {B1055DA3-83CE-47BF-98E6-2850E3411D39}.DebugDll|Win32.ActiveCfg = Debug|Win32
- {B1055DA3-83CE-47BF-98E6-2850E3411D39}.DebugDll|Win32.Build.0 = Debug|Win32
- {B1055DA3-83CE-47BF-98E6-2850E3411D39}.DebugDll|x64.ActiveCfg = Debug|x64
- {B1055DA3-83CE-47BF-98E6-2850E3411D39}.DebugDll|x64.Build.0 = Debug|x64
{B1055DA3-83CE-47BF-98E6-2850E3411D39}.Release|Win32.ActiveCfg = Release|Win32
- {B1055DA3-83CE-47BF-98E6-2850E3411D39}.Release|Win32.Build.0 = Release|Win32
{B1055DA3-83CE-47BF-98E6-2850E3411D39}.Release|x64.ActiveCfg = Release|x64
{B1055DA3-83CE-47BF-98E6-2850E3411D39}.Release|x64.Build.0 = Release|x64
- {B1055DA3-83CE-47BF-98E6-2850E3411D39}.ReleaseDll|Win32.ActiveCfg = Release|Win32
- {B1055DA3-83CE-47BF-98E6-2850E3411D39}.ReleaseDll|Win32.Build.0 = Release|Win32
- {B1055DA3-83CE-47BF-98E6-2850E3411D39}.ReleaseDll|x64.ActiveCfg = Release|x64
- {B1055DA3-83CE-47BF-98E6-2850E3411D39}.ReleaseDll|x64.Build.0 = Release|x64
- {72639F93-D2E6-4220-AA46-E24C502E470C}.Debug|Win32.ActiveCfg = Debug|Win32
- {72639F93-D2E6-4220-AA46-E24C502E470C}.Debug|x64.ActiveCfg = Debug|x64
- {72639F93-D2E6-4220-AA46-E24C502E470C}.Debug|x64.Build.0 = Debug|x64
- {72639F93-D2E6-4220-AA46-E24C502E470C}.DebugDll|Win32.ActiveCfg = Debug|Win32
- {72639F93-D2E6-4220-AA46-E24C502E470C}.DebugDll|Win32.Build.0 = Debug|Win32
- {72639F93-D2E6-4220-AA46-E24C502E470C}.DebugDll|x64.ActiveCfg = Debug|x64
- {72639F93-D2E6-4220-AA46-E24C502E470C}.DebugDll|x64.Build.0 = Debug|x64
- {72639F93-D2E6-4220-AA46-E24C502E470C}.Release|Win32.ActiveCfg = Release|Win32
- {72639F93-D2E6-4220-AA46-E24C502E470C}.Release|Win32.Build.0 = Release|Win32
- {72639F93-D2E6-4220-AA46-E24C502E470C}.Release|x64.ActiveCfg = Release|x64
- {72639F93-D2E6-4220-AA46-E24C502E470C}.ReleaseDll|Win32.ActiveCfg = Release|Win32
- {72639F93-D2E6-4220-AA46-E24C502E470C}.ReleaseDll|Win32.Build.0 = Release|Win32
- {72639F93-D2E6-4220-AA46-E24C502E470C}.ReleaseDll|x64.ActiveCfg = Release|x64
- {72639F93-D2E6-4220-AA46-E24C502E470C}.ReleaseDll|x64.Build.0 = Release|x64
+ {B1055DA3-83CE-47BF-98E6-2850E3411D39}.WinXP_32|Win32.ActiveCfg = WinXP_32|Win32
+ {B1055DA3-83CE-47BF-98E6-2850E3411D39}.WinXP_32|Win32.Build.0 = WinXP_32|Win32
+ {B1055DA3-83CE-47BF-98E6-2850E3411D39}.WinXP_32|x64.ActiveCfg = Release|x64
+ {B1055DA3-83CE-47BF-98E6-2850E3411D39}.WinXP_32|x64.Build.0 = Release|x64
{19329A6B-FE96-4917-B69A-2B0375C12017}.Debug|Win32.ActiveCfg = Debug|Win32
{19329A6B-FE96-4917-B69A-2B0375C12017}.Debug|Win32.Build.0 = Debug|Win32
{19329A6B-FE96-4917-B69A-2B0375C12017}.Debug|x64.ActiveCfg = Debug|x64
{19329A6B-FE96-4917-B69A-2B0375C12017}.Debug|x64.Build.0 = Debug|x64
- {19329A6B-FE96-4917-B69A-2B0375C12017}.DebugDll|Win32.ActiveCfg = Debug|Win32
- {19329A6B-FE96-4917-B69A-2B0375C12017}.DebugDll|Win32.Build.0 = Debug|Win32
- {19329A6B-FE96-4917-B69A-2B0375C12017}.DebugDll|x64.ActiveCfg = Debug|x64
- {19329A6B-FE96-4917-B69A-2B0375C12017}.DebugDll|x64.Build.0 = Debug|x64
{19329A6B-FE96-4917-B69A-2B0375C12017}.Release|Win32.ActiveCfg = Release|Win32
{19329A6B-FE96-4917-B69A-2B0375C12017}.Release|Win32.Build.0 = Release|Win32
{19329A6B-FE96-4917-B69A-2B0375C12017}.Release|x64.ActiveCfg = Release|x64
{19329A6B-FE96-4917-B69A-2B0375C12017}.Release|x64.Build.0 = Release|x64
- {19329A6B-FE96-4917-B69A-2B0375C12017}.ReleaseDll|Win32.ActiveCfg = Release|Win32
- {19329A6B-FE96-4917-B69A-2B0375C12017}.ReleaseDll|Win32.Build.0 = Release|Win32
- {19329A6B-FE96-4917-B69A-2B0375C12017}.ReleaseDll|x64.ActiveCfg = Release|x64
- {19329A6B-FE96-4917-B69A-2B0375C12017}.ReleaseDll|x64.Build.0 = Release|x64
+ {19329A6B-FE96-4917-B69A-2B0375C12017}.WinXP_32|Win32.ActiveCfg = WinXP_32|Win32
+ {19329A6B-FE96-4917-B69A-2B0375C12017}.WinXP_32|Win32.Build.0 = WinXP_32|Win32
+ {19329A6B-FE96-4917-B69A-2B0375C12017}.WinXP_32|x64.ActiveCfg = Release|x64
+ {19329A6B-FE96-4917-B69A-2B0375C12017}.WinXP_32|x64.Build.0 = Release|x64
{D9733F9E-BE30-466F-B100-B686DF663C4D}.Debug|Win32.ActiveCfg = Debug|Win32
{D9733F9E-BE30-466F-B100-B686DF663C4D}.Debug|Win32.Build.0 = Debug|Win32
{D9733F9E-BE30-466F-B100-B686DF663C4D}.Debug|x64.ActiveCfg = Debug|x64
{D9733F9E-BE30-466F-B100-B686DF663C4D}.Debug|x64.Build.0 = Debug|x64
- {D9733F9E-BE30-466F-B100-B686DF663C4D}.DebugDll|Win32.ActiveCfg = Debug|Win32
- {D9733F9E-BE30-466F-B100-B686DF663C4D}.DebugDll|Win32.Build.0 = Debug|Win32
- {D9733F9E-BE30-466F-B100-B686DF663C4D}.DebugDll|x64.ActiveCfg = Debug|x64
- {D9733F9E-BE30-466F-B100-B686DF663C4D}.DebugDll|x64.Build.0 = Debug|x64
{D9733F9E-BE30-466F-B100-B686DF663C4D}.Release|Win32.ActiveCfg = Release|Win32
- {D9733F9E-BE30-466F-B100-B686DF663C4D}.Release|Win32.Build.0 = Release|Win32
{D9733F9E-BE30-466F-B100-B686DF663C4D}.Release|x64.ActiveCfg = Release|x64
- {D9733F9E-BE30-466F-B100-B686DF663C4D}.ReleaseDll|Win32.ActiveCfg = Release|Win32
- {D9733F9E-BE30-466F-B100-B686DF663C4D}.ReleaseDll|Win32.Build.0 = Release|Win32
- {D9733F9E-BE30-466F-B100-B686DF663C4D}.ReleaseDll|x64.ActiveCfg = Release|x64
- {D9733F9E-BE30-466F-B100-B686DF663C4D}.ReleaseDll|x64.Build.0 = Release|x64
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Debug|Win32.ActiveCfg = Release|Win32
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Debug|Win32.Build.0 = Release|Win32
+ {D9733F9E-BE30-466F-B100-B686DF663C4D}.WinXP_32|Win32.ActiveCfg = Release|Win32
+ {D9733F9E-BE30-466F-B100-B686DF663C4D}.WinXP_32|Win32.Build.0 = Release|Win32
+ {D9733F9E-BE30-466F-B100-B686DF663C4D}.WinXP_32|x64.ActiveCfg = Release|x64
+ {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Debug|Win32.ActiveCfg = Debug|Win32
+ {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Debug|Win32.Build.0 = Debug|Win32
{01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Debug|x64.ActiveCfg = Debug|x64
{01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Debug|x64.Build.0 = Debug|x64
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.DebugDll|Win32.ActiveCfg = Debug|Win32
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.DebugDll|Win32.Build.0 = Debug|Win32
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.DebugDll|x64.ActiveCfg = Debug|x64
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.DebugDll|x64.Build.0 = Debug|x64
{01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Release|Win32.ActiveCfg = Release|Win32
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Release|Win32.Build.0 = Release|Win32
{01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Release|x64.ActiveCfg = Release|x64
{01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.Release|x64.Build.0 = Release|x64
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.ReleaseDll|Win32.ActiveCfg = Release|Win32
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.ReleaseDll|Win32.Build.0 = Release|Win32
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.ReleaseDll|x64.ActiveCfg = Release|x64
- {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.ReleaseDll|x64.Build.0 = Release|x64
+ {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.WinXP_32|Win32.ActiveCfg = WinXP_32|Win32
+ {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.WinXP_32|Win32.Build.0 = WinXP_32|Win32
+ {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.WinXP_32|x64.ActiveCfg = Release|x64
+ {01CD1176-66F7-44AB-9E7B-8AEECC9915E8}.WinXP_32|x64.Build.0 = Release|x64
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
diff --git a/cppForSwig/BitcoinArmory_CppTests/BitcoinArmory_CppTests.vcxproj b/cppForSwig/BitcoinArmory_CppTests/BitcoinArmory_CppTests.vcxproj
index 1b53ba3be..e0e845916 100644
--- a/cppForSwig/BitcoinArmory_CppTests/BitcoinArmory_CppTests.vcxproj
+++ b/cppForSwig/BitcoinArmory_CppTests/BitcoinArmory_CppTests.vcxproj
@@ -96,7 +96,7 @@
Consoletrue
- cryptopp_d.lib;leveldb_msvc11_port_d.lib;snappy_d.lib;%(AdditionalDependencies)
+ cryptopp_d.lib;leveldb_msvc11_port_d.lib;%(AdditionalDependencies)$(SolutionDir)libs\$(Platform)\;%(AdditionalLibraryDirectories)
@@ -167,6 +167,7 @@
+
@@ -180,6 +181,7 @@
+
diff --git a/cppForSwig/BitcoinArmory_CppTests/BitcoinArmory_CppTests.vcxproj.filters b/cppForSwig/BitcoinArmory_CppTests/BitcoinArmory_CppTests.vcxproj.filters
index 20e1904c8..13101ca1b 100644
--- a/cppForSwig/BitcoinArmory_CppTests/BitcoinArmory_CppTests.vcxproj.filters
+++ b/cppForSwig/BitcoinArmory_CppTests/BitcoinArmory_CppTests.vcxproj.filters
@@ -45,6 +45,9 @@
Header Files
+
+ Header Files
+
@@ -77,5 +80,8 @@
Source Files
+
+ Source Files
+
\ No newline at end of file
diff --git a/cppForSwig/BitcoinArmory_SwigDLL/BitcoinArmory_SwigDLL.vcxproj b/cppForSwig/BitcoinArmory_SwigDLL/BitcoinArmory_SwigDLL.vcxproj
index 60beb61ad..7d9a8cfb9 100644
--- a/cppForSwig/BitcoinArmory_SwigDLL/BitcoinArmory_SwigDLL.vcxproj
+++ b/cppForSwig/BitcoinArmory_SwigDLL/BitcoinArmory_SwigDLL.vcxproj
@@ -17,14 +17,6 @@
Releasex64
-
- WinXP Release
- Win32
-
-
- WinXP Release
- x64
- WinXP_32Win32
@@ -33,22 +25,6 @@
WinXP_32x64
-
- WinXP_release_x86
- Win32
-
-
- WinXP_release_x86
- x64
-
-
- Win_XP_32
- Win32
-
-
- Win_XP_32
- x64
- {19329A6B-FE96-4917-B69A-2B0375C12017}
@@ -176,7 +152,7 @@
$(SolutionDir)libs\x64
- false
+ true$(SolutionDir)libs\$(Platform)\$(SolutionDir)libs\IntermediateBuildFiles\$(Configuration).$(ProjectName)\
@@ -217,6 +193,9 @@
C:\Python27\Lib\site-packages\PyQt4\pyrcc4.exe -o ..\..\qrc_img_resources.py ..\..\imgList.xml
+
+ ..\swigwin\swig.exe -c++ -python -threads -classic -outdir ..\..\ -v ..\CppBlockUtils.i
+
@@ -263,6 +242,9 @@ C:\Python27\Lib\site-packages\PyQt4\pyrcc4.exe -o ..\..\qrc_img_resources.py ..\
C:\Python27_64\Lib\site-packages\PyQt4\pyrcc4.exe -o ..\..\qrc_img_resources.py ..\..\imgList.xml
+
+ ..\swigwin\swig.exe -c++ -python -threads -classic -outdir ..\..\ -v ..\CppBlockUtils.i
+
@@ -289,23 +271,23 @@ C:\Python27_64\Lib\site-packages\PyQt4\pyrcc4.exe -o ..\..\qrc_img_resources.py
Level3
-
-
+ NotUsingFulltrue
- false
- WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;_MSC_PLATFORM_TOOLSET=$(PlatformToolset);%(PreprocessorDefinitions)
- ..\leveldb;..\leveldb\include;..\cryptopp;C:\Python27\include;C:\Python26\include;%(AdditionalIncludeDirectories)
+ true
+ _CRT_SECURE_NO_WARNINGS;_MSC_PLATFORM_TOOLSET=$(PlatformToolset);%(PreprocessorDefinitions)
+ ..\leveldb;..\leveldb\include;..\cryptopp;C:\Python27\include;C:\Python26\include;..\leveldb_windows_port\win32_posix;%(AdditionalIncludeDirectories)MultiThreadedSpeedtrueNone
+ CompileAsCppConsolefalse
- true
- false
+ false
+ true$(SolutionDir)libs\$(Platform)\;C:\Python27\libs;C:\Python26\libs;%(AdditionalLibraryDirectories)cryptopp.lib;leveldb_msvc11_port.lib;%(AdditionalDependencies)
@@ -315,7 +297,9 @@ C:\Python27_64\Lib\site-packages\PyQt4\pyrcc4.exe -o ..\..\qrc_img_resources.py
..\swigwin\swig.exe -c++ -python -threads -classic -outdir ..\..\ -v ..\CppBlockUtils.i
- ..\..\build_installer.bat
+ build_installer.bat
+copy ..\libs\Win32\BitcoinArmory_SwigDLL.dll ..\..\_CppBlockUtils.pyd
+false
@@ -343,8 +327,8 @@ C:\Python27_64\Lib\site-packages\PyQt4\pyrcc4.exe -o ..\..\qrc_img_resources.py
falsetruetrue
- ..\pthreads-w32-2-9-1-release\Pre-built.2\lib\x86;$(SolutionDir)libs\$(Platform)\Win_XP;C:\Python27\libs;%(AdditionalLibraryDirectories)
- cryptopp.lib;leveldb_msvc11_port.lib;pthreadVC2.lib;%(AdditionalDependencies)
+ $(SolutionDir)libs\$(Platform)\Win_XP;C:\Python27\libs;%(AdditionalLibraryDirectories)
+ cryptopp.lib;leveldb_msvc11_port.lib;%(AdditionalDependencies)
@@ -372,8 +356,8 @@ C:\Python27_64\Lib\site-packages\PyQt4\pyrcc4.exe -o ..\..\qrc_img_resources.py
falsetruetrue
- C:\Python27_64\libs;..\libs\x64;..\pthreads-w32-2-9-1-release\Pre-built.2\lib\x64;%(AdditionalLibraryDirectories)
- cryptopp.lib;leveldb_msvc11_port.lib;snappy.lib;pthreadVC2.lib;python27.lib;%(AdditionalDependencies)
+ C:\Python27_64\libs;..\libs\x64;%(AdditionalLibraryDirectories)
+ cryptopp.lib;leveldb_msvc11_port.lib;%(AdditionalDependencies)
@@ -447,6 +431,7 @@ C:\Python27_64\Lib\site-packages\PyQt4\pyrcc4.exe -o ..\..\qrc_img_resources.py
+
diff --git a/cppForSwig/BitcoinArmory_SwigDLL/BitcoinArmory_SwigDLL.vcxproj.filters b/cppForSwig/BitcoinArmory_SwigDLL/BitcoinArmory_SwigDLL.vcxproj.filters
index d30e06ed3..f8cc2c8a2 100644
--- a/cppForSwig/BitcoinArmory_SwigDLL/BitcoinArmory_SwigDLL.vcxproj.filters
+++ b/cppForSwig/BitcoinArmory_SwigDLL/BitcoinArmory_SwigDLL.vcxproj.filters
@@ -28,6 +28,9 @@
Source Files
+
+ Source Files
+
diff --git a/cppForSwig/BlockObj.cpp b/cppForSwig/BlockObj.cpp
index 2bbb3c793..823495405 100644
--- a/cppForSwig/BlockObj.cpp
+++ b/cppForSwig/BlockObj.cpp
@@ -1,6 +1,6 @@
////////////////////////////////////////////////////////////////////////////////
// //
-// Copyright(C) 2011-2013, Armory Technologies, Inc. //
+// Copyright (C) 2011-2014, Armory Technologies, Inc. //
// Distributed under the GNU Affero General Public License (AGPL v3) //
// See LICENSE or http://www.gnu.org/licenses/agpl.html //
// //
@@ -21,8 +21,10 @@
////////////////////////////////////////////////////////////////////////////////
-void BlockHeader::unserialize(uint8_t const * ptr)
+void BlockHeader::unserialize(uint8_t const * ptr, uint32_t size)
{
+ if (size < HEADER_SIZE)
+ throw BlockDeserializingException();
dataCopy_.copyFrom(ptr, HEADER_SIZE);
BtcUtils::getHash256(dataCopy_.getPtr(), HEADER_SIZE, thisHash_);
difficultyDbl_ = BtcUtils::convertDiffBitsToDouble(
@@ -40,7 +42,7 @@ void BlockHeader::unserialize(uint8_t const * ptr)
////////////////////////////////////////////////////////////////////////////////
void BlockHeader::unserialize(BinaryDataRef const & str)
{
- unserialize(str.getPtr());
+ unserialize(str.getPtr(), str.getSize());
}
////////////////////////////////////////////////////////////////////////////////
@@ -155,18 +157,25 @@ BinaryData OutPoint::serialize(void) const
return bw.getData();
}
-void OutPoint::unserialize(uint8_t const * ptr)
+void OutPoint::unserialize(uint8_t const * ptr, uint32_t size)
{
+ if (size < 32)
+ throw BlockDeserializingException();
+
txHash_.copyFrom(ptr, 32);
txOutIndex_ = READ_UINT32_LE(ptr+32);
}
void OutPoint::unserialize(BinaryReader & br)
{
+ if (br.getSizeRemaining() < 32)
+ throw BlockDeserializingException();
br.get_BinaryData(txHash_, 32);
txOutIndex_ = br.get_uint32_t();
}
void OutPoint::unserialize(BinaryRefReader & brr)
{
+ if (brr.getSizeRemaining() < 32)
+ throw BlockDeserializingException();
brr.get_BinaryData(txHash_, 32);
txOutIndex_ = brr.get_uint32_t();
}
@@ -174,11 +183,11 @@ void OutPoint::unserialize(BinaryRefReader & brr)
void OutPoint::unserialize(BinaryData const & bd)
{
- unserialize(bd.getPtr());
+ unserialize(bd.getPtr(), bd.getSize());
}
void OutPoint::unserialize(BinaryDataRef const & bdRef)
{
- unserialize(bdRef.getPtr());
+ unserialize(bdRef.getPtr(), bdRef.getSize());
}
@@ -194,7 +203,7 @@ void OutPoint::unserialize(BinaryDataRef const & bdRef)
OutPoint TxIn::getOutPoint(void) const
{
OutPoint op;
- op.unserialize(getPtr());
+ op.unserialize(getPtr(), getSize());
return op;
}
@@ -215,20 +224,26 @@ BinaryDataRef TxIn::getScriptRef(void) const
}
-
/////////////////////////////////////////////////////////////////////////////
-void TxIn::unserialize(uint8_t const * ptr,
+void TxIn::unserialize_checked(uint8_t const * ptr,
+ uint32_t size,
uint32_t nbytes,
TxRef parent,
uint32_t idx)
{
parentTx_ = parent;
index_ = idx;
- uint32_t numBytes = (nbytes==0 ? BtcUtils::TxInCalcLength(ptr) : nbytes);
+ uint32_t numBytes = (nbytes==0 ? BtcUtils::TxInCalcLength(ptr, size) : nbytes);
+ if (size < numBytes)
+ throw BlockDeserializingException();
dataCopy_.copyFrom(ptr, numBytes);
+ if (dataCopy_.getSize()-36 < 1)
+ throw BlockDeserializingException();
scriptOffset_ = 36 + BtcUtils::readVarIntLength(getPtr()+36);
+ if (dataCopy_.getSize() < 32)
+ throw BlockDeserializingException();
scriptType_ = BtcUtils::getTxInScriptType(getScriptRef(),
BinaryDataRef(getPtr(),32));
@@ -245,7 +260,7 @@ void TxIn::unserialize(BinaryRefReader & brr,
TxRef parent,
uint32_t idx)
{
- unserialize(brr.getCurrPtr(), nbytes, parent, idx);
+ unserialize_checked(brr.getCurrPtr(), brr.getSizeRemaining(), nbytes, parent, idx);
brr.advance(getSize());
}
@@ -255,7 +270,7 @@ void TxIn::unserialize(BinaryData const & str,
TxRef parent,
uint32_t idx)
{
- unserialize(str.getPtr(), nbytes, parent, idx);
+ unserialize_checked(str.getPtr(), str.getSize(), nbytes, parent, idx);
}
/////////////////////////////////////////////////////////////////////////////
@@ -264,7 +279,7 @@ void TxIn::unserialize(BinaryDataRef str,
TxRef parent,
uint32_t idx)
{
- unserialize(str.getPtr(), nbytes, parent, idx);
+ unserialize_checked(str.getPtr(), str.getSize(), nbytes, parent, idx);
}
@@ -281,7 +296,14 @@ bool TxIn::getSenderScrAddrIfAvail(BinaryData & addrTarget) const
return false;
}
- addrTarget = BtcUtils::getTxInAddrFromType(getScript(), scriptType_);
+ try
+ {
+ addrTarget = BtcUtils::getTxInAddrFromType(getScript(), scriptType_);
+ }
+ catch (BlockDeserializingException&)
+ {
+ return false;
+ }
return true;
}
@@ -355,9 +377,9 @@ BinaryDataRef TxOut::getScriptRef(void)
return BinaryDataRef( dataCopy_.getPtr()+scriptOffset_, getScriptSize() );
}
-
/////////////////////////////////////////////////////////////////////////////
-void TxOut::unserialize( uint8_t const * ptr,
+void TxOut::unserialize_checked( uint8_t const * ptr,
+ uint32_t size,
uint32_t nbytes,
TxRef parent,
uint32_t idx)
@@ -365,9 +387,13 @@ void TxOut::unserialize( uint8_t const * ptr,
parentTx_ = parent;
index_ = idx;
uint32_t numBytes = (nbytes==0 ? BtcUtils::TxOutCalcLength(ptr) : nbytes);
+ if (size < numBytes)
+ throw BlockDeserializingException();
dataCopy_.copyFrom(ptr, numBytes);
scriptOffset_ = 8 + BtcUtils::readVarIntLength(getPtr()+8);
+ if (dataCopy_.getSize()-scriptOffset_-getScriptSize() > size)
+ throw BlockDeserializingException();
BinaryDataRef scriptRef(dataCopy_.getPtr()+scriptOffset_, getScriptSize());
scriptType_ = BtcUtils::getTxOutScriptType(scriptRef);
uniqueScrAddr_ = BtcUtils::getTxOutScrAddr(scriptRef);
@@ -385,7 +411,7 @@ void TxOut::unserialize( BinaryData const & str,
TxRef parent,
uint32_t idx)
{
- unserialize(str.getPtr(), nbytes, parent, idx);
+ unserialize_checked(str.getPtr(), str.getSize(), nbytes, parent, idx);
}
/////////////////////////////////////////////////////////////////////////////
@@ -394,7 +420,7 @@ void TxOut::unserialize( BinaryDataRef const & str,
TxRef parent,
uint32_t idx)
{
- unserialize(str.getPtr(), nbytes, parent, idx);
+ unserialize_checked(str.getPtr(), str.getSize(), nbytes, parent, idx);
}
/////////////////////////////////////////////////////////////////////////////
@@ -403,7 +429,7 @@ void TxOut::unserialize( BinaryRefReader & brr,
TxRef parent,
uint32_t idx)
{
- unserialize( brr.getCurrPtr(), nbytes, parent, idx );
+ unserialize_checked( brr.getCurrPtr(), brr.getSizeRemaining(), nbytes, parent, idx );
brr.advance(getSize());
}
@@ -464,14 +490,20 @@ Tx::Tx(TxRef txref)
}
/////////////////////////////////////////////////////////////////////////////
-void Tx::unserialize(uint8_t const * ptr)
+void Tx::unserialize(uint8_t const * ptr, uint32_t size)
{
- uint32_t nBytes = BtcUtils::TxCalcLength(ptr, &offsetsTxIn_, &offsetsTxOut_);
+ uint32_t nBytes = BtcUtils::TxCalcLength(ptr, size, &offsetsTxIn_, &offsetsTxOut_);
+ if (nBytes > size)
+ throw BlockDeserializingException();
dataCopy_.copyFrom(ptr, nBytes);
BtcUtils::getHash256(ptr, nBytes, thisHash_);
+ if (8 > size)
+ throw BlockDeserializingException();
uint32_t numTxOut = offsetsTxOut_.size()-1;
version_ = READ_UINT32_LE(ptr);
+ if (4 > size - offsetsTxOut_[numTxOut])
+ throw BlockDeserializingException();
lockTime_ = READ_UINT32_LE(ptr + offsetsTxOut_[numTxOut]);
isInitialized_ = true;
@@ -497,7 +529,7 @@ BinaryData Tx::getThisHash(void) const
/////////////////////////////////////////////////////////////////////////////
void Tx::unserialize(BinaryRefReader & brr)
{
- unserialize(brr.getCurrPtr());
+ unserialize(brr.getCurrPtr(), brr.getSizeRemaining());
brr.advance(getSize());
}
@@ -529,7 +561,8 @@ TxIn Tx::getTxInCopy(int i)
{
assert(isInitialized());
uint32_t txinSize = offsetsTxIn_[i+1] - offsetsTxIn_[i];
- TxIn out(dataCopy_.getPtr()+offsetsTxIn_[i], txinSize, txRefObj_, i);
+ TxIn out;
+ out.unserialize_checked(dataCopy_.getPtr()+offsetsTxIn_[i], dataCopy_.getSize()-offsetsTxIn_[i], txinSize, txRefObj_, i);
if(txRefObj_.isInitialized())
{
@@ -547,13 +580,13 @@ TxOut Tx::getTxOutCopy(int i)
{
assert(isInitialized());
uint32_t txoutSize = offsetsTxOut_[i+1] - offsetsTxOut_[i];
- TxOut out(dataCopy_.getPtr()+offsetsTxOut_[i], txoutSize, txRefObj_, i);
-
+ TxOut out;
+ out.unserialize_checked(dataCopy_.getPtr()+offsetsTxOut_[i], dataCopy_.getSize()-offsetsTxOut_[i], txoutSize, txRefObj_, i);
+ out.setParentHash(getThisHash());
+
if(txRefObj_.isInitialized())
- {
- out.setParentHash(getThisHash());
out.setParentHeight(txRefObj_.getBlockHeight());
- }
+
return out;
}
@@ -999,7 +1032,7 @@ bool TxIOPair::isUnspent(void)
}
//////////////////////////////////////////////////////////////////////////////
-bool TxIOPair::isSpendable(uint32_t currBlk)
+bool TxIOPair::isSpendable(uint32_t currBlk, bool ignoreAllZeroConf)
{
// Spendable TxOuts are ones with at least 1 confirmation, or zero-conf
// TxOuts that were sent-to-self. Obviously, they should be unspent, too
@@ -1016,13 +1049,13 @@ bool TxIOPair::isSpendable(uint32_t currBlk)
}
if( hasTxOutZC() && isTxOutFromSelf() )
- return true;
+ return !ignoreAllZeroConf;
return false;
}
//////////////////////////////////////////////////////////////////////////////
-bool TxIOPair::isMineButUnconfirmed(uint32_t currBlk)
+bool TxIOPair::isMineButUnconfirmed(uint32_t currBlk, bool inclAllZC)
{
// All TxOuts that were from our own transactions are always confirmed
if(isTxOutFromSelf())
@@ -1039,8 +1072,7 @@ bool TxIOPair::isMineButUnconfirmed(uint32_t currBlk)
else
return (nConfputValue(BLKDATA, sbh.getDBKey(), sbh.serializeDBValue(BLKDATA));
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+static StoredTx* makeSureSTXInMap(
+ InterfaceToLDB* iface,
+ BinaryDataRef txHash,
+ map & stxMap,
+ uint64_t* additionalSize)
+{
+ // TODO: If we are pruning, we may have completely removed this tx from
+ // the DB, which means that it won't be in the map or the DB.
+ // But this method was written before pruning was ever implemented...
+ StoredTx * stxptr;
+
+ // Get the existing STX or make a new one
+ map::iterator txIter = stxMap.find(txHash);
+ if(ITER_IN_MAP(txIter, stxMap))
+ stxptr = &(txIter->second);
+ else
+ {
+ StoredTx stxTemp;
+ iface->getStoredTx(stxTemp, txHash);
+ stxMap[txHash] = stxTemp;
+ stxptr = &stxMap[txHash];
+ if (additionalSize)
+ *additionalSize += stxptr->numBytes_;
+ }
+
+ return stxptr;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// This avoids having to do the double-lookup when fetching by hash.
+// We still pass in the hash anyway, because the map is indexed by the hash,
+// and we'd like to not have to do a lookup for the hash if only provided
+// {hgt, dup, idx}
+static StoredTx* makeSureSTXInMap(
+ InterfaceToLDB* iface,
+ uint32_t hgt,
+ uint8_t dup,
+ uint16_t txIdx,
+ BinaryDataRef txHash,
+ map & stxMap,
+ uint64_t* additionalSize)
+{
+ StoredTx * stxptr;
+
+ // Get the existing STX or make a new one
+ map::iterator txIter = stxMap.find(txHash);
+ if(ITER_IN_MAP(txIter, stxMap))
+ stxptr = &(txIter->second);
+ else
+ {
+ StoredTx &stxTemp = stxMap[txHash];
+ iface->getStoredTx(stxTemp, hgt, dup, txIdx);
+ stxptr = &stxMap[txHash];
+ if (additionalSize)
+ *additionalSize += stxptr->numBytes_;
+ }
+
+ return stxptr;
+}
+
+static StoredScriptHistory* makeSureSSHInMap(
+ InterfaceToLDB* iface,
+ BinaryDataRef uniqKey,
+ BinaryDataRef hgtX,
+ map & sshMap,
+ uint64_t* additionalSize,
+ bool createIfDNE=true)
+{
+ SCOPED_TIMER("makeSureSSHInMap");
+ StoredScriptHistory * sshptr;
+
+ // If already in Map
+ map::iterator iter = sshMap.find(uniqKey);
+ if(ITER_IN_MAP(iter, sshMap))
+ {
+ SCOPED_TIMER("___SSH_AlreadyInMap");
+ sshptr = &(iter->second);
+ }
+ else
+ {
+ StoredScriptHistory sshTemp;
+
+ iface->getStoredScriptHistorySummary(sshTemp, uniqKey);
+ // sshTemp.alreadyScannedUpToBlk_ = getAppliedToHeightInDB(); TODO
+ if (additionalSize)
+ *additionalSize += UPDATE_BYTES_SSH;
+ if(sshTemp.isInitialized())
+ {
+ SCOPED_TIMER("___SSH_AlreadyInDB");
+ // We already have an SSH in DB -- pull it into the map
+ sshMap[uniqKey] = sshTemp;
+ sshptr = &sshMap[uniqKey];
+ }
+ else
+ {
+ SCOPED_TIMER("___SSH_NeedCreate");
+ if(!createIfDNE)
+ return NULL;
+
+ sshMap[uniqKey] = StoredScriptHistory();
+ sshptr = &sshMap[uniqKey];
+ sshptr->uniqueKey_ = uniqKey;
+ }
+ }
+
+
+ // If sub-history for this block doesn't exist, add an empty one before
+ // returning the pointer to the SSH. Since we haven't actually inserted
+ // anything into the SubSSH, we don't need to adjust the totalTxioCount_
+ uint32_t prevSize = sshptr->subHistMap_.size();
+ iface->fetchStoredSubHistory(*sshptr, hgtX, true, false);
+ uint32_t newSize = sshptr->subHistMap_.size();
+
+ if (additionalSize)
+ *additionalSize += (newSize - prevSize) * UPDATE_BYTES_SUBSSH;
+ return sshptr;
+}
+
+
BlockDataManager_LevelDB* BlockDataManager_LevelDB::theOnlyBDM_ = NULL;
@@ -112,34 +238,34 @@ ScrAddrObj::ScrAddrObj(HashString addr,
////////////////////////////////////////////////////////////////////////////////
-uint64_t ScrAddrObj::getSpendableBalance(uint32_t currBlk)
+uint64_t ScrAddrObj::getSpendableBalance(uint32_t currBlk, bool ignoreAllZC)
{
uint64_t balance = 0;
for(uint32_t i=0; iisSpendable(currBlk))
+ if(relevantTxIOPtrs_[i]->isSpendable(currBlk, ignoreAllZC))
balance += relevantTxIOPtrs_[i]->getValue();
}
for(uint32_t i=0; iisSpendable(currBlk))
+ if(relevantTxIOPtrsZC_[i]->isSpendable(currBlk, ignoreAllZC))
balance += relevantTxIOPtrsZC_[i]->getValue();
}
return balance;
}
////////////////////////////////////////////////////////////////////////////////
-uint64_t ScrAddrObj::getUnconfirmedBalance(uint32_t currBlk)
+uint64_t ScrAddrObj::getUnconfirmedBalance(uint32_t currBlk, bool inclAllZC)
{
uint64_t balance = 0;
for(uint32_t i=0; iisMineButUnconfirmed(currBlk))
+ if(relevantTxIOPtrs_[i]->isMineButUnconfirmed(currBlk, inclAllZC))
balance += relevantTxIOPtrs_[i]->getValue();
}
for(uint32_t i=0; iisMineButUnconfirmed(currBlk))
+ if(relevantTxIOPtrsZC_[i]->isMineButUnconfirmed(currBlk, inclAllZC))
balance += relevantTxIOPtrsZC_[i]->getValue();
}
return balance;
@@ -165,22 +291,24 @@ uint64_t ScrAddrObj::getFullBalance(void)
}
////////////////////////////////////////////////////////////////////////////////
-vector ScrAddrObj::getSpendableTxOutList(uint32_t blkNum)
+vector ScrAddrObj::getSpendableTxOutList(uint32_t blkNum,
+ bool ignoreAllZC)
{
vector utxoList(0);
for(uint32_t i=0; i BtcWallet::isMineBulkFilter(Tx & tx,
- bool withMultiSig)
+ bool withMultiSig) const
{
return isMineBulkFilter(tx, txioMap_, withMultiSig);
}
@@ -396,22 +524,24 @@ pair BtcWallet::isMineBulkFilter(Tx & tx,
/////////////////////////////////////////////////////////////////////////////
// Determine, as fast as possible, whether this tx is relevant to us
// Return
-pair BtcWallet::isMineBulkFilter(Tx & tx,
- map & txiomap,
- bool withMultiSig)
+pair BtcWallet::isMineBulkFilter(
+ Tx & tx,
+ map const & txiomap,
+ bool withMultiSig) const
{
// Since 99.999%+ of all transactions are not ours, let's do the
// fastest bulk filter possible, even though it will add
// redundant computation to the tx that are ours. In fact,
// we will skip the TxIn/TxOut convenience methods and follow the
- // pointers directly the data we want
+ // pointers directly to the data we want
uint8_t const * txStartPtr = tx.getPtr();
for(uint32_t iin=0; iin(true,true);
}
@@ -565,6 +695,16 @@ void BtcWallet::pprintAlot(uint32_t topBlk, bool withAddr)
}
}
+void BtcWallet::reorgChangeBlkNum(uint32_t newBlkHgt)
+{
+ if(newBlkHgtsize()-1;
@@ -674,7 +814,7 @@ void BlockDataManager_LevelDB::registeredScrAddrScan(
{
// We have the txin, now check if it contains one of our TxOuts
static OutPoint op;
- op.unserialize(txStartPtr + (*txInOffsets)[iin]);
+ op.unserialize(txStartPtr + (*txInOffsets)[iin], txSize - (*txInOffsets)[iin]);
if(registeredOutPoints_.count(op) > 0)
{
insertRegisteredTxIfNew(BtcUtils::getHash256(txptr, txSize));
@@ -769,7 +909,7 @@ void BlockDataManager_LevelDB::registeredScrAddrScan_IterSafe(
{
txInOffsets = &localOffsIn;
txOutOffsets = &localOffsOut;
- BtcUtils::TxCalcLength(txStartPtr, txInOffsets, txOutOffsets);
+ BtcUtils::TxCalcLength(txStartPtr, tx.getSize(), txInOffsets, txOutOffsets);
}
uint32_t nTxIn = txInOffsets->size()-1;
@@ -779,7 +919,7 @@ void BlockDataManager_LevelDB::registeredScrAddrScan_IterSafe(
{
// We have the txin, now check if it spends one of our TxOuts
static OutPoint op;
- op.unserialize(txStartPtr + (*txInOffsets)[iin]);
+ op.unserialize(txStartPtr + (*txInOffsets)[iin], tx.getSize()-(*txInOffsets)[iin]);
if(registeredOutPoints_.count(op) > 0)
{
insertRegisteredTxIfNew(tx.getTxRef(),
@@ -870,7 +1010,8 @@ void BlockDataManager_LevelDB::registeredScrAddrScan( Tx & theTx )
void BtcWallet::scanTx(Tx & tx,
uint32_t txIndex,
uint32_t txtime,
- uint32_t blknum)
+ uint32_t blknum,
+ bool mainwallet)
{
int64_t totalLedgerAmt = 0;
@@ -896,226 +1037,225 @@ void BtcWallet::scanTx(Tx & tx,
map::iterator addrIter;
ScrAddrObj* thisAddrPtr;
HashString scraddr;
- //for(uint32_t i=0; i no addr inputs
+ if(outpt.getTxHashRef() == BtcUtils::EmptyHash_)
+ {
+ isCoinbaseTx = true;
+ continue;
+ }
+
+ // We have the txin, now check if it contains one of our TxOuts
+ map::iterator txioIter = txioMap_.find(outpt);
+ //bool txioWasInMapAlready = (txioIter != txioMap_.end());
+ bool txioWasInMapAlready = ITER_IN_MAP(txioIter, txioMap_);
+ if(txioWasInMapAlready)
{
- TxIn txin = tx.getTxInCopy(iin);
- OutPoint outpt = txin.getOutPoint();
- // Empty hash in Outpoint means it's a COINBASE tx --> no addr inputs
- if(outpt.getTxHashRef() == BtcUtils::EmptyHash_)
+ // If we are here, we know that this input is spending an
+ // output owned by this wallet.
+ // We will get here for every address in the search, even
+ // though it is only relevant to one of the addresses.
+ TxIOPair & txio = txioIter->second;
+ TxOut txout = txio.getTxOutCopy();
+
+ // It's our TxIn, so address should be in this wallet
+ scraddr = txout.getScrAddressStr();
+ addrIter = scrAddrMap_.find(scraddr);
+ //if( addrIter == scrAddrMap_.end())
+ if(ITER_NOT_IN_MAP(addrIter, scrAddrMap_))
{
- isCoinbaseTx = true;
+ // Have TxIO but address is not in the map...?
+ LOGERR << "ERROR: TxIn in TxIO map, but addr not in wallet...?";
continue;
}
+ thisAddrPtr = &addrIter->second;
- // We have the txin, now check if it contains one of our TxOuts
- map::iterator txioIter = txioMap_.find(outpt);
- //bool txioWasInMapAlready = (txioIter != txioMap_.end());
- bool txioWasInMapAlready = ITER_IN_MAP(txioIter, txioMap_);
- if(txioWasInMapAlready)
+ // We need to make sure the ledger entry makes sense, and make
+ // sure we update TxIO objects appropriately
+ int64_t thisVal = (int64_t)txout.getValue();
+ totalLedgerAmt -= thisVal;
+
+ // Skip, if zero-conf-spend, but it's already got a zero-conf
+ if( isZeroConf && txio.hasTxInZC() )
+ return; // this tx can't be valid, might as well bail now
+
+ if( !txio.hasTxInInMain() && !(isZeroConf && txio.hasTxInZC()) )
{
- // If we are here, we know that this input is spending an
- // output owned by this wallet.
- // We will get here for every address in the search, even
- // though it is only relevant to one of the addresses.
- TxIOPair & txio = txioIter->second;
- TxOut txout = txio.getTxOutCopy();
-
- // It's our TxIn, so address should be in this wallet
- scraddr = txout.getScrAddressStr();
- addrIter = scrAddrMap_.find(scraddr);
- //if( addrIter == scrAddrMap_.end())
- if(ITER_NOT_IN_MAP(addrIter, scrAddrMap_))
- {
- // Have TxIO but address is not in the map...?
- LOGERR << "ERROR: TxIn in TxIO map, but addr not in wallet...?";
+ // isValidNew only identifies whether this set-call succeeded
+ // If it didn't, it's because this is from a zero-conf tx but this
+ // TxIn already exists in the blockchain spending the same output.
+ // (i.e. we have a ref to the prev output, but it's been spent!)
+ bool isValidNew;
+ if(isZeroConf)
+ isValidNew = txio.setTxInZC(&tx, iin);
+ else
+ isValidNew = txio.setTxIn(tx.getTxRef(), iin);
+
+ if(!isValidNew)
continue;
- }
- thisAddrPtr = &addrIter->second;
- // We need to make sure the ledger entry makes sense, and make
- // sure we update TxIO objects appropriately
- int64_t thisVal = (int64_t)txout.getValue();
- totalLedgerAmt -= thisVal;
+ anyNewTxInIsOurs = true;
- // Skip, if zero-conf-spend, but it's already got a zero-conf
- if( isZeroConf && txio.hasTxInZC() )
- return; // this tx can't be valid, might as well bail now
+ LedgerEntry newEntry(scraddr,
+ -(int64_t)thisVal,
+ blknum,
+ tx.getThisHash(),
+ iin,
+ txtime,
+ isCoinbaseTx,
+ false, // SentToSelf is meaningless for addr ledger
+ false); // "isChangeBack" is meaningless for TxIn
+ thisAddrPtr->addLedgerEntry(newEntry, isZeroConf);
- if( !txio.hasTxInInMain() && !(isZeroConf && txio.hasTxInZC()) )
- {
- // isValidNew only identifies whether this set-call succeeded
- // If it didn't, it's because this is from a zero-conf tx but this
- // TxIn already exists in the blockchain spending the same output.
- // (i.e. we have a ref to the prev output, but it's been spent!)
- bool isValidNew;
- if(isZeroConf)
- isValidNew = txio.setTxInZC(&tx, iin);
- else
- isValidNew = txio.setTxIn(tx.getTxRef(), iin);
-
- if(!isValidNew)
- continue;
+ txLedgerForComments_.push_back(newEntry);
+ savedAsTxIn = true;
- anyNewTxInIsOurs = true;
-
- LedgerEntry newEntry(scraddr,
- -(int64_t)thisVal,
- blknum,
- tx.getThisHash(),
- iin,
- txtime,
- isCoinbaseTx,
- false, // SentToSelf is meaningless for addr ledger
- false); // "isChangeBack" is meaningless for TxIn
- thisAddrPtr->addLedgerEntry(newEntry, isZeroConf);
-
- // Update last seen on the network
- thisAddrPtr->setLastTimestamp(txtime);
- thisAddrPtr->setLastBlockNum(blknum);
- }
+ // Update last seen on the network
+ thisAddrPtr->setLastTimestamp(txtime);
+ thisAddrPtr->setLastBlockNum(blknum);
}
- else
+ }
+ else
+ {
+ // Lots of txins that we won't have, this is a normal conditional
+ // But we should check the non-std txio list since it may actually
+ // be there
+ //if(nonStdTxioMap_.find(outpt) != nonStdTxioMap_.end())
+ if(KEY_IN_MAP(outpt, nonStdTxioMap_))
{
- // Lots of txins that we won't have, this is a normal conditional
- // But we should check the non-std txio list since it may actually
- // be there
- //if(nonStdTxioMap_.find(outpt) != nonStdTxioMap_.end())
- if(KEY_IN_MAP(outpt, nonStdTxioMap_))
- {
- if(isZeroConf)
- nonStdTxioMap_[outpt].setTxInZC(&tx, iin);
- else
- nonStdTxioMap_[outpt].setTxIn(tx.getTxRef(), iin);
- nonStdUnspentOutPoints_.erase(outpt);
- }
+ if(isZeroConf)
+ nonStdTxioMap_[outpt].setTxInZC(&tx, iin);
+ else
+ nonStdTxioMap_[outpt].setTxIn(tx.getTxRef(), iin);
+ nonStdUnspentOutPoints_.erase(outpt);
}
- } // loop over TxIns
- //}
+ }
+ } // loop over TxIns
- //for(uint32_t i=0; i -1)
- //scanNonStdTx(blknum, txIndex, tx, iout, *thisAddrPtr);
- continue;
- }
+ //if(txout.getScriptRef().find(scraddr) > -1)
+ //scanNonStdTx(blknum, txIndex, tx, iout, *thisAddrPtr);
+ continue;
+ }
- scraddr = txout.getScrAddressStr();
- addrIter = scrAddrMap_.find(scraddr);
- //if( addrIter != scrAddrMap_.end())
- if(ITER_IN_MAP(addrIter, scrAddrMap_))
+ scraddr = txout.getScrAddressStr();
+ addrIter = scrAddrMap_.find(scraddr);
+ //if( addrIter != scrAddrMap_.end())
+ if(ITER_IN_MAP(addrIter, scrAddrMap_))
+ {
+ thisAddrPtr = &addrIter->second;
+ // If we got here, at least this TxOut is for this address.
+ // But we still need to find out if it's new and update
+ // ledgers/TXIOs appropriately
+ int64_t thisVal = (int64_t)(txout.getValue());
+ totalLedgerAmt += thisVal;
+
+ OutPoint outpt(tx.getThisHash(), iout);
+ map::iterator txioIter = txioMap_.find(outpt);
+ //bool txioWasInMapAlready = (txioIter != txioMap_.end());
+ bool txioWasInMapAlready = ITER_IN_MAP(txioIter, txioMap_);
+ bool doAddLedgerEntry = false;
+ if(txioWasInMapAlready)
{
- thisAddrPtr = &addrIter->second;
- // If we got here, at least this TxOut is for this address.
- // But we still need to find out if it's new and update
- // ledgers/TXIOs appropriately
- int64_t thisVal = (int64_t)(txout.getValue());
- totalLedgerAmt += thisVal;
-
- OutPoint outpt(tx.getThisHash(), iout);
- map::iterator txioIter = txioMap_.find(outpt);
- //bool txioWasInMapAlready = (txioIter != txioMap_.end());
- bool txioWasInMapAlready = ITER_IN_MAP(txioIter, txioMap_);
- bool doAddLedgerEntry = false;
- if(txioWasInMapAlready)
+ if(isZeroConf)
{
- if(isZeroConf)
- {
- // This is a real txOut, in the blockchain
- if(txioIter->second.hasTxOutZC() || txioIter->second.hasTxOutInMain())
- continue;
-
- // If we got here, somehow the Txio existed already, but
- // there was no existing TxOut referenced by it. Probably,
- // there was, but that TxOut was invalidated due to reorg
- // and now being re-added
- txioIter->second.setTxOutZC(&tx, iout);
- txioIter->second.setValue((uint64_t)thisVal);
- thisAddrPtr->addTxIO( txioIter->second, isZeroConf);
- doAddLedgerEntry = true;
- }
- else
- {
- if(txioIter->second.hasTxOutInMain()) // ...but we already have one
- continue;
-
- // If we got here, we have an in-blockchain TxOut that is
- // replacing a zero-conf txOut. Reset the txio to have
- // only this real TxOut, blank out the ZC TxOut. And the addr
- // relevantTxIOPtrs_ does not have this yet so it needs
- // to be added (it's already part of the relevantTxIOPtrsZC_
- // but that will be removed)
- txioIter->second.setTxOut(tx.getTxRef(), iout);
- txioIter->second.setValue((uint64_t)thisVal);
- thisAddrPtr->addTxIO( txioIter->second, isZeroConf);
- doAddLedgerEntry = true;
- }
+ // This is a real txOut, in the blockchain
+ if(txioIter->second.hasTxOutZC() || txioIter->second.hasTxOutInMain())
+ continue;
+
+ // If we got here, somehow the Txio existed already, but
+ // there was no existing TxOut referenced by it. Probably,
+ // there was, but that TxOut was invalidated due to reorg
+ // and now being re-added
+ txioIter->second.setTxOutZC(&tx, iout);
+ txioIter->second.setValue((uint64_t)thisVal);
+ thisAddrPtr->addTxIO( txioIter->second, isZeroConf);
+ doAddLedgerEntry = true;
}
else
{
- // TxIO is not in the map yet -- create and add it
- TxIOPair newTxio(thisVal);
- if(isZeroConf)
- newTxio.setTxOutZC(&tx, iout);
- else
- newTxio.setTxOut(tx.getTxRef(), iout);
-
- pair toBeInserted(outpt, newTxio);
- txioIter = txioMap_.insert(toBeInserted).first;
+ if(txioIter->second.hasTxOutInMain()) // ...but we already have one
+ continue;
+
+ // If we got here, we have an in-blockchain TxOut that is
+ // replacing a zero-conf txOut. Reset the txio to have
+ // only this real TxOut, blank out the ZC TxOut. And the addr
+ // relevantTxIOPtrs_ does not have this yet so it needs
+ // to be added (it's already part of the relevantTxIOPtrsZC_
+ // but that will be removed)
+ txioIter->second.setTxOut(tx.getTxRef(), iout);
+ txioIter->second.setValue((uint64_t)thisVal);
thisAddrPtr->addTxIO( txioIter->second, isZeroConf);
doAddLedgerEntry = true;
}
+ }
+ else
+ {
+ // TxIO is not in the map yet -- create and add it
+ TxIOPair newTxio(thisVal);
+ if(isZeroConf)
+ newTxio.setTxOutZC(&tx, iout);
+ else
+ newTxio.setTxOut(tx.getTxRef(), iout);
- if(anyTxInIsOurs)
- txioIter->second.setTxOutFromSelf(true);
-
- if(isCoinbaseTx)
- txioIter->second.setFromCoinbase(true);
+ pair toBeInserted(outpt, newTxio);
+ txioIter = txioMap_.insert(toBeInserted).first;
+ thisAddrPtr->addTxIO( txioIter->second, isZeroConf);
+ doAddLedgerEntry = true;
+ }
- anyNewTxOutIsOurs = true;
- thisTxOutIsOurs[iout] = true;
+ if(anyTxInIsOurs)
+ txioIter->second.setTxOutFromSelf(true);
+
+ if(isCoinbaseTx)
+ txioIter->second.setFromCoinbase(true);
- if(doAddLedgerEntry)
- {
- LedgerEntry newLedger(scraddr,
- thisVal,
- blknum,
- tx.getThisHash(),
- iout,
- txtime,
- isCoinbaseTx, // input was coinbase/generation
- false, // sentToSelf meaningless for addr ledger
- false); // we don't actually know
- thisAddrPtr->addLedgerEntry(newLedger, isZeroConf);
- }
- // Check if this is the first time we've seen this
- if(thisAddrPtr->getFirstTimestamp() == 0)
- {
- thisAddrPtr->setFirstBlockNum( blknum );
- thisAddrPtr->setFirstTimestamp( txtime );
- }
- // Update last seen on the network
- thisAddrPtr->setLastTimestamp(txtime);
- thisAddrPtr->setLastBlockNum(blknum);
+ anyNewTxOutIsOurs = true;
+ thisTxOutIsOurs[iout] = true;
+
+ if(doAddLedgerEntry)
+ {
+ LedgerEntry newLedger(scraddr,
+ thisVal,
+ blknum,
+ tx.getThisHash(),
+ iout,
+ txtime,
+ isCoinbaseTx, // input was coinbase/generation
+ false, // sentToSelf meaningless for addr ledger
+ false); // we don't actually know
+ thisAddrPtr->addLedgerEntry(newLedger, isZeroConf);
+
+ if(!savedAsTxIn) txLedgerForComments_.push_back(newLedger);
}
- } // loop over TxOuts
+ // Check if this is the first time we've seen this
+ if(thisAddrPtr->getFirstTimestamp() == 0)
+ {
+ thisAddrPtr->setFirstBlockNum( blknum );
+ thisAddrPtr->setFirstTimestamp( txtime );
+ }
+ // Update last seen on the network
+ thisAddrPtr->setLastTimestamp(txtime);
+ thisAddrPtr->setLastBlockNum(blknum);
+ }
+ } // loop over TxOuts
- //} // loop over all wallet addresses
bool allTxOutIsOurs = true;
bool anyTxOutIsOurs = false;
@@ -1130,7 +1270,7 @@ void BtcWallet::scanTx(Tx & tx,
bool isSentToSelf = (anyTxInIsOurs && allTxOutIsOurs);
bool isChangeBack = (anyTxInIsOurs && anyTxOutIsOurs && !isSentToSelf);
- if(anyNewTxInIsOurs || anyNewTxOutIsOurs)
+ if((anyNewTxInIsOurs || anyNewTxOutIsOurs))
{
LedgerEntry le( BinaryData(0),
totalLedgerAmt,
@@ -1170,7 +1310,7 @@ LedgerEntry BtcWallet::calcLedgerEntryForTx(Tx & tx)
{
// We have the txin, now check if it contains one of our TxOuts
static OutPoint op;
- op.unserialize(txStartPtr + tx.getTxInOffset(iin));
+ op.unserialize(txStartPtr + tx.getTxInOffset(iin), tx.getSize()-tx.getTxInOffset(iin));
if(op.getTxHashRef() == BtcUtils::EmptyHash_)
isCoinbaseTx = true;
@@ -1325,7 +1465,7 @@ void BtcWallet::scanNonStdTx(uint32_t blknum,
//uint64_t BtcWallet::getBalance(bool blockchainOnly)
////////////////////////////////////////////////////////////////////////////////
-uint64_t BtcWallet::getSpendableBalance(uint32_t currBlk)
+uint64_t BtcWallet::getSpendableBalance(uint32_t currBlk, bool ignoreAllZC)
{
uint64_t balance = 0;
map::iterator iter;
@@ -1333,14 +1473,14 @@ uint64_t BtcWallet::getSpendableBalance(uint32_t currBlk)
iter != txioMap_.end();
iter++)
{
- if(iter->second.isSpendable(currBlk))
+ if(iter->second.isSpendable(currBlk, ignoreAllZC))
balance += iter->second.getValue();
}
return balance;
}
////////////////////////////////////////////////////////////////////////////////
-uint64_t BtcWallet::getUnconfirmedBalance(uint32_t currBlk)
+uint64_t BtcWallet::getUnconfirmedBalance(uint32_t currBlk, bool inclAllZC)
{
uint64_t balance = 0;
map::iterator iter;
@@ -1348,7 +1488,7 @@ uint64_t BtcWallet::getUnconfirmedBalance(uint32_t currBlk)
iter != txioMap_.end();
iter++)
{
- if(iter->second.isMineButUnconfirmed(currBlk))
+ if(iter->second.isMineButUnconfirmed(currBlk, inclAllZC))
balance += iter->second.getValue();
}
return balance;
@@ -1370,7 +1510,8 @@ uint64_t BtcWallet::getFullBalance(void)
}
////////////////////////////////////////////////////////////////////////////////
-vector BtcWallet::getSpendableTxOutList(uint32_t blkNum)
+vector BtcWallet::getSpendableTxOutList(uint32_t blkNum,
+ bool ignoreAllZC)
{
vector utxoList(0);
map::iterator iter;
@@ -1379,7 +1520,7 @@ vector BtcWallet::getSpendableTxOutList(uint32_t blkNum)
iter++)
{
TxIOPair & txio = iter->second;
- if(txio.isSpendable(blkNum))
+ if(txio.isSpendable(blkNum, ignoreAllZC))
{
TxOut txout = txio.getTxOutCopy();
utxoList.push_back(UnspentTxOut(txout, blkNum) );
@@ -1529,4676 +1670,4760 @@ vector BtcWallet::createAddressBook(void)
////////////////////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////////////////////
+// AddRawBlockTODB
//
-// Start BlockDataManager_LevelDB methods
+// Assumptions:
+// -- We have already determined the correct height and dup for the header
+// and we assume it's part of the sbh object
+// -- It has definitely been added to the headers DB (bail if not)
+// -- We don't know if it's been added to the blkdata DB yet
+//
+// Things to do when adding a block:
+//
+// -- PREPARATION:
+// -- Create list of all OutPoints affected, and scripts touched
+// -- If not supernode, then check above data against registeredSSHs_
+// -- Fetch all StoredTxOuts from DB about to be removed
+// -- Get/create TXHINT entries for all tx in block
+// -- Compute all script keys and get/create all StoredScriptHistory objs
+// -- Check if any multisig scripts are affected, if so get those objs
+// -- If pruning, create StoredUndoData from TxOuts about to be removed
+// -- Modify any Tx/TxOuts in the SBH tree to accommodate any tx in this
+// block that affect any other tx in this block
+//
+//
+// -- Check if the block {hgt,dup} has already been written to BLKDATA DB
+// -- Check if the header has already been added to HEADERS DB
+//
+// -- BATCH (HEADERS)
+// -- Add header to HEADHASH list
+// -- Add header to HEADHGT list
+// -- Update validDupByHeight_
+// -- Update DBINFO top block data
+//
+// -- BATCH (BLKDATA)
+// -- Modify StoredTxOut with spentness info (or prep a delete operation
+// if pruning).
+// -- Modify StoredScriptHistory objs same as above.
+// -- Modify StoredScriptHistory multisig objects as well.
+// -- Update SSH objects alreadyScannedUpToBlk_, if necessary
+// -- Write all new TXDATA entries for {hgt,dup}
+// -- If pruning, write StoredUndoData objs to DB
+// -- Update DBINFO top block data
+//
+// IMPORTANT: we also need to make sure this method does nothing if the
+// block has already been added properly (though, it okay for
+// it to take time to verify nothing needs to be done). We may
+// end up replaying some blocks to force consistency of the DB,
+// and this method needs to be robust to replaying already-added
+// blocks, as well as fixing data if the replayed block appears
+// to have been added already but is different.
//
////////////////////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////////////////////
-BlockDataManager_LevelDB::BlockDataManager_LevelDB(void)
-{
- Reset();
-}
-
-/////////////////////////////////////////////////////////////////////////////
-BlockDataManager_LevelDB::~BlockDataManager_LevelDB(void)
+BlockWriteBatcher::BlockWriteBatcher(InterfaceToLDB* iface)
+ : iface_(iface), dbUpdateSize_(0), mostRecentBlockApplied_(0)
{
- set::iterator iter;
- for(iter = registeredWallets_.begin();
- iter != registeredWallets_.end();
- iter++)
- {
- delete *iter;
- }
- Reset();
}
-/////////////////////////////////////////////////////////////////////////////
-// We must set the network-specific data for this blockchain
-//
-// bdm.SetBtcNetworkParams( READHEX(MAINNET_GENESIS_HASH_HEX),
-// READHEX(MAINNET_GENESIS_TX_HASH_HEX),
-// READHEX(MAINNET_MAGIC_BYTES));
-//
-// The above call will work
-void BlockDataManager_LevelDB::SetBtcNetworkParams(
- BinaryData const & GenHash,
- BinaryData const & GenTxHash,
- BinaryData const & MagicBytes)
+BlockWriteBatcher::~BlockWriteBatcher()
{
- LOGINFO << "SetBtcNetworkParams";
- GenesisHash_.copyFrom(GenHash);
- GenesisTxHash_.copyFrom(GenTxHash);
- MagicBytes_.copyFrom(MagicBytes);
+ commit();
}
-
-
-/////////////////////////////////////////////////////////////////////////////
-void BlockDataManager_LevelDB::SetHomeDirLocation(string homeDir)
+void BlockWriteBatcher::applyBlockToDB(StoredHeader &sbh)
{
- // This will eventually be used to store blocks/DB
- LOGINFO << "Set home directory: " << armoryHomeDir_.c_str();
- armoryHomeDir_ = homeDir;
- blkProgressFile_ = homeDir + string("/blkfiles.txt");
- abortLoadFile_ = homeDir + string("/abortload.txt");
-}
+ if(iface_->getValidDupIDForHeight(sbh.blockHeight_) != sbh.duplicateID_)
+ {
+ LOGERR << "Dup requested is not the main branch for the given height!";
+ return;
+ }
+ else
+ sbh.isMainBranch_ = true;
+
+ mostRecentBlockApplied_= sbh.blockHeight_;
-/////////////////////////////////////////////////////////////////////////////
-// Bitcoin-Qt/bitcoind 0.8+ changed the location and naming convention for
-// the blkXXXX.dat files. The first block file use to be:
-//
-// ~/.bitcoin/blocks/blk00000.dat
-//
-// UPDATE: Compatibility with pre-0.8 nodes removed after 6+ months and
-// a hard-fork that makes it tougher to use old versions.
-//
-bool BlockDataManager_LevelDB::SetBlkFileLocation(string blkdir)
-{
- blkFileDir_ = blkdir;
- isBlkParamsSet_ = true;
+ // We will accumulate undoData as we apply the tx
+ StoredUndoData sud;
+ sud.blockHash_ = sbh.thisHash_;
+ sud.blockHeight_ = sbh.blockHeight_;
+ sud.duplicateID_ = sbh.duplicateID_;
+
+ // Apply all the tx to the update data
+ for(map::iterator iter = sbh.stxMap_.begin();
+ iter != sbh.stxMap_.end(); iter++)
+ {
+ // This will fetch all the affected [Stored]Tx and modify the maps in
+ // RAM. It will check the maps first to see if it's already been pulled,
+ // and then it will modify either the pulled StoredTx or pre-existing
+ // one. This means that if a single Tx is affected by multiple TxIns
+ // or TxOuts, earlier changes will not be overwritten by newer changes.
+ applyTxToBatchWriteData(iter->second, &sud);
+ }
- detectAllBlkFiles();
+ // At this point we should have a list of STX and SSH with all the correct
+ // modifications (or creations) to represent this block. Let's apply it.
+ sbh.blockAppliedToDB_ = true;
+ updateBlkDataHeader(iface_, sbh);
+ //iface_->putStoredHeader(sbh, false);
- LOGINFO << "Set blkfile dir: " << blkFileDir_.c_str();
+ // we want to commit the undo data at the same time as actual changes
+ iface_->startBatch(BLKDATA);
+
+ // Now actually write all the changes to the DB all at once
+ // if we've gotten to that threshold
+ if (dbUpdateSize_ > UPDATE_BYTES_THRESH)
+ commit();
- return (numBlkFiles_!=UINT16_MAX);
+ // Only if pruning, we need to store
+ // TODO: this is going to get run every block, probably should batch it
+ // like we do with the other data...when we actually implement pruning
+ if(DBUtils.getDbPruneType() == DB_PRUNE_ALL)
+ iface_->putStoredUndoData(sud);
+
+
+ iface_->commitBatch(BLKDATA);
}
-/////////////////////////////////////////////////////////////////////////////
-void BlockDataManager_LevelDB::SetLevelDBLocation(string ldbdir)
-{
- leveldbDir_ = ldbdir;
- isLevelDBSet_ = true;
- LOGINFO << "Set leveldb dir: " << leveldbDir_.c_str();
-}
-/////////////////////////////////////////////////////////////////////////////
-void BlockDataManager_LevelDB::SelectNetwork(string netName)
+////////////////////////////////////////////////////////////////////////////////
+void BlockWriteBatcher::undoBlockFromDB(StoredUndoData & sud)
{
- if(netName.compare("Main") == 0)
+ SCOPED_TIMER("undoBlockFromDB");
+
+ StoredHeader sbh;
+ iface_->getStoredHeader(sbh, sud.blockHeight_, sud.duplicateID_);
+ if(!sbh.blockAppliedToDB_)
{
- SetBtcNetworkParams( READHEX(MAINNET_GENESIS_HASH_HEX),
- READHEX(MAINNET_GENESIS_TX_HASH_HEX),
- READHEX(MAINNET_MAGIC_BYTES) );
+ LOGERR << "This block was never applied to the DB...can't undo!";
+ return /*false*/;
}
- else if(netName.compare("Test") == 0)
+
+ mostRecentBlockApplied_ = sud.blockHeight_;
+
+ // In the future we will accommodate more user modes
+ if(DBUtils.getArmoryDbType() != ARMORY_DB_SUPER)
{
- SetBtcNetworkParams( READHEX(TESTNET_GENESIS_HASH_HEX),
- READHEX(TESTNET_GENESIS_TX_HASH_HEX),
- READHEX(TESTNET_MAGIC_BYTES) );
+ LOGERR << "Don't know what to do this in non-supernode mode!";
}
- else
- LOGERR << "ERROR: Unrecognized network name";
+
+ ///// Put the STXOs back into the DB which were removed by this block
+ // Process the stxOutsRemovedByBlock_ in reverse order
+ // Use int32_t index so that -1 != UINT32_MAX and we go into inf loop
+ for(int32_t i=sud.stxOutsRemovedByBlock_.size()-1; i>=0; i--)
+ {
+ StoredTxOut & sudStxo = sud.stxOutsRemovedByBlock_[i];
+ StoredTx * stxptr = makeSureSTXInMap(
+ iface_,
+ sudStxo.blockHeight_,
+ sudStxo.duplicateID_,
+ sudStxo.txIndex_,
+ sudStxo.parentHash_,
+ stxToModify_,
+ &dbUpdateSize_);
- isNetParamsSet_ = true;
-}
+
+ const uint16_t stxoIdx = sudStxo.txOutIndex_;
+
+ if(DBUtils.getDbPruneType() == DB_PRUNE_NONE)
+ {
+ // If full/super, we have the TxOut in DB, just need mark it unspent
+ map::iterator iter = stxptr->stxoMap_.find(stxoIdx);
+ //if(iter == stxptr->stxoMap_.end())
+ if(ITER_NOT_IN_MAP(iter, stxptr->stxoMap_))
+ {
+ LOGERR << "Expecting to find existing STXO, but DNE";
+ continue;
+ }
+
+ StoredTxOut & stxoReAdd = iter->second;
+ if(stxoReAdd.spentness_ == TXOUT_UNSPENT ||
+ stxoReAdd.spentByTxInKey_.getSize() == 0 )
+ {
+ LOGERR << "STXO needs to be re-added/marked-unspent but it";
+ LOGERR << "was already declared unspent in the DB";
+ }
+
+ stxoReAdd.spentness_ = TXOUT_UNSPENT;
+ stxoReAdd.spentByTxInKey_ = BinaryData(0);
+ }
+ else
+ {
+ // If we're pruning, we should have the Tx in the DB, but without the
+ // TxOut because it had been pruned by this block on the forward op
+ map::iterator iter = stxptr->stxoMap_.find(stxoIdx);
+ //if(iter != stxptr->stxoMap_.end())
+ if(ITER_IN_MAP(iter, stxptr->stxoMap_))
+ LOGERR << "Somehow this TxOut had not been pruned!";
+ else
+ iter->second = sudStxo;
+ iter->second.spentness_ = TXOUT_UNSPENT;
+ iter->second.spentByTxInKey_ = BinaryData(0);
+ }
-/////////////////////////////////////////////////////////////////////////////
-bool BlockDataManager_LevelDB::checkLdbStatus(leveldb::Status stat)
-{
- if( stat.ok() )
- return true;
+ {
+ ////// Finished updating STX, now update the SSH in the DB
+ // Updating the SSH objects works the same regardless of pruning
+ map::iterator iter = stxptr->stxoMap_.find(stxoIdx);
+ //if(iter == stxptr->stxoMap_.end())
+ if(ITER_NOT_IN_MAP(iter, stxptr->stxoMap_))
+ {
+ LOGERR << "Somehow STXO DNE even though we should've just added it!";
+ continue;
+ }
- LOGERR << "***LevelDB Error: " << stat.ToString();
- return false;
-}
+ StoredTxOut & stxoReAdd = iter->second;
+ BinaryData uniqKey = stxoReAdd.getScrAddress();
+ BinaryData hgtX = stxoReAdd.getHgtX();
+ StoredScriptHistory* sshptr = makeSureSSHInMap(
+ iface_, uniqKey, hgtX, sshToModify_, &dbUpdateSize_
+ );
+ if(sshptr==NULL)
+ {
+ LOGERR << "No SSH found for marking TxOut unspent on undo";
+ continue;
+ }
-//////////////////////////////////////////////////////////////////////////
-// This method opens the databases, and figures out up to what block each
-// of them is sync'd to. Then it figures out where that corresponds in
-// the blk*.dat files, so that it can pick up where it left off. You can
-// use the last argument to specify an approximate amount of blocks
-// (specified in bytes) that you would like to replay: i.e. if 10 MB,
-// startScanBlkFile_ and endOfLastBlockByte_ variables will be set to
-// the first block that is approximately 10 MB behind your latest block.
-// Then you can pick up from there and let the DB clean up any mess that
-// was left from an unclean shutdown.
-bool BlockDataManager_LevelDB::initializeDBInterface(ARMORY_DB_TYPE dbtype,
- DB_PRUNE_TYPE prtype)
-{
- SCOPED_TIMER("initializeDBInterface");
- if(!isBlkParamsSet_ || !isLevelDBSet_)
- {
- LOGERR << "Cannot sync DB until blkfile and LevelDB paths are set. ";
- return false;
+ // Now get the TxIOPair in the StoredScriptHistory and mark unspent
+ sshptr->markTxOutUnspent(stxoReAdd.getDBKey(false),
+ stxoReAdd.getValue(),
+ stxoReAdd.isCoinbase_,
+ false);
+
+
+ // If multisig, we need to update the SSHs for individual addresses
+ if(uniqKey[0] == SCRIPT_PREFIX_MULTISIG)
+ {
+ vector addr160List;
+ BtcUtils::getMultisigAddrList(stxoReAdd.getScriptRef(), addr160List);
+ for(uint32_t a=0; amarkTxOutUnspent(stxoReAdd.getDBKey(false),
+ stxoReAdd.getValue(),
+ stxoReAdd.isCoinbase_,
+ true);
+ }
+ }
+ }
}
- if(iface_->databasesAreOpen())
+
+ // The OutPoint list is every new, unspent TxOut created by this block.
+ // When they were added, we updated all the StoredScriptHistory objects
+ // to include references to them. We need to remove them now.
+ // Use int32_t index so that -1 != UINT32_MAX and we go into inf loop
+ for(int16_t itx=sbh.numTx_-1; itx>=0; itx--)
{
- LOGERR << "Attempted to initialize a database that was already open";
- return false;
- }
+ // Ironically, even though I'm using hgt & dup, I still need the hash
+ // in order to key the stxToModify map
+ BinaryData txHash = iface_->getHashForDBKey(sbh.blockHeight_,
+ sbh.duplicateID_,
+ itx);
+ StoredTx * stxptr = makeSureSTXInMap(
+ iface_,
+ sbh.blockHeight_,
+ sbh.duplicateID_,
+ itx,
+ txHash,
+ stxToModify_,
+ &dbUpdateSize_);
- bool openWithErr = iface_->openDatabases(leveldbDir_,
- GenesisHash_,
- GenesisTxHash_,
- MagicBytes_,
- dbtype,
- prtype);
+ for(int16_t txoIdx = stxptr->stxoMap_.size()-1; txoIdx >= 0; txoIdx--)
+ {
- return openWithErr;
+ StoredTxOut & stxo = stxptr->stxoMap_[txoIdx];
+ BinaryData stxoKey = stxo.getDBKey(false);
+
+
+ // Then fetch the StoredScriptHistory of the StoredTxOut scraddress
+ BinaryData uniqKey = stxo.getScrAddress();
+ BinaryData hgtX = stxo.getHgtX();
+ StoredScriptHistory * sshptr = makeSureSSHInMap(
+ iface_, uniqKey,
+ hgtX,
+ sshToModify_,
+ &dbUpdateSize_,
+ false);
+
+
+ // If we are tracking that SSH, remove the reference to this OutPoint
+ if(sshptr != NULL)
+ sshptr->eraseTxio(stxoKey);
+
+ // Now remove any multisig entries that were added due to this TxOut
+ if(uniqKey[0] == SCRIPT_PREFIX_MULTISIG)
+ {
+ vector addr160List;
+ BtcUtils::getMultisigAddrList(stxo.getScriptRef(), addr160List);
+ for(uint32_t a=0; aeraseTxio(stxoKey);
+ }
+ }
+ }
+ }
+
+ // Finally, mark this block as UNapplied.
+ sbh.blockAppliedToDB_ = false;
+ updateBlkDataHeader(iface_, sbh);
+
+ if (dbUpdateSize_ > UPDATE_BYTES_THRESH)
+ commit();
}
+
////////////////////////////////////////////////////////////////////////////////
-bool BlockDataManager_LevelDB::detectCurrentSyncState(
- bool forceRebuild,
- bool initialLoad)
+// Assume that stx.blockHeight_ and .duplicateID_ are set correctly.
+// We created the maps and sets outside this function, because we need to keep
+// a master list of updates induced by all tx in this block.
+// TODO: Make sure that if Tx5 spends an input from Tx2 in the same
+// block that it is handled correctly, etc.
+bool BlockWriteBatcher::applyTxToBatchWriteData(
+ StoredTx & thisSTX,
+ StoredUndoData * sud)
{
- // Make sure we detected all the available blk files
- detectAllBlkFiles();
- vector firstHashes = getFirstHashOfEachBlkFile();
- LOGINFO << "Total blk*.dat files: " << numBlkFiles_;
+ SCOPED_TIMER("applyTxToBatchWriteData");
- if(!iface_->databasesAreOpen())
- {
- LOGERR << "Could not open databases!";
- return false;
- }
-
- // We add 1 to each of these, since we always use exclusive upperbound
- startHeaderHgt_ = getTopBlockHeightInDB(HEADERS) + 1;
- startRawBlkHgt_ = getTopBlockHeightInDB(BLKDATA) + 1;
- startApplyHgt_ = getAppliedToHeightInDB() + 1;
-
- // If the values were supposed to be zero, they'll get set to 1. Fix it
- startHeaderHgt_ -= (startHeaderHgt_==1 ? 1 : 0);
- startRawBlkHgt_ -= (startRawBlkHgt_==1 ? 1 : 0);
- startApplyHgt_ -= (startApplyHgt_ ==1 ? 1 : 0);
+ Tx tx = thisSTX.getTxCopy();
- LOGINFO << "Current Top block in HEADERS DB: " << startHeaderHgt_;
- LOGINFO << "Current Top block in BLKDATA DB: " << startRawBlkHgt_;
- LOGINFO << "Current Applied blocks up to hgt: " << startApplyHgt_;
+ // We never expect thisSTX to already be in the map (other tx in the map
+ // may be affected/retrieved multiple times).
+ if(KEY_IN_MAP(tx.getThisHash(), stxToModify_))
+ LOGERR << "How did we already add this tx?";
- if(startHeaderHgt_ == 0 || forceRebuild)
- {
- if(forceRebuild)
- LOGINFO << "Ignore existing sync state, rebuilding databases";
+ // I just noticed we never set TxOuts to TXOUT_UNSPENT. Might as well do
+ // it here -- by definition if we just added this Tx to the DB, it couldn't
+ // have been spent yet.
+
+ for(map::iterator iter = thisSTX.stxoMap_.begin();
+ iter != thisSTX.stxoMap_.end();
+ iter++)
+ iter->second.spentness_ = TXOUT_UNSPENT;
- startHeaderHgt_ = 0;
- startHeaderBlkFile_ = 0;
- startHeaderOffset_ = 0;
- startRawBlkHgt_ = 0;
- startRawBlkFile_ = 0;
- startRawOffset_ = 0;
- startApplyHgt_ = 0;
- startApplyBlkFile_ = 0;
- startApplyOffset_ = 0;
- headerMap_.clear();
- topBlockPtr_ = NULL;
- genBlockPtr_ = NULL;
- lastTopBlock_ = UINT32_MAX;;
- return true;
- }
+ // This tx itself needs to be added to the map, which makes it accessible
+ // to future tx in the same block which spend outputs from this tx, without
+ // doing anything crazy in the code here
+ stxToModify_[tx.getThisHash()] = thisSTX;
- // This fetches the header data from the DB
- if(!initialLoad)
+ dbUpdateSize_ += thisSTX.numBytes_;
+
+ // Go through and find all the previous TxOuts that are affected by this tx
+ for(uint32_t iin=0; iin sbhMap;
- headerMap_.clear();
- iface_->readAllHeaders(headerMap_, sbhMap);
-
+ TxIn txin = tx.getTxInCopy(iin);
+ if(txin.isCoinbase())
+ continue;
- // Organize them into the longest chain
- organizeChain(true); // true ~ force rebuild
+ // Get the OutPoint data of TxOut being spent
+ const OutPoint op = txin.getOutPoint();
+ const BinaryDataRef opTxHash = op.getTxHashRef();
+ const uint32_t opTxoIdx = op.getTxOutIndex();
+ // This will fetch the STX from DB and put it in the stxToModify
+ // map if it's not already there. Or it will do nothing if it's
+ // already part of the map. In both cases, it returns a pointer
+ // to the STX that will be written to DB that we can modify.
+ StoredTx * stxptr = makeSureSTXInMap(iface_, opTxHash, stxToModify_, &dbUpdateSize_);
+ StoredTxOut & stxo = stxptr->stxoMap_[opTxoIdx];
+ BinaryData uniqKey = stxo.getScrAddress();
- // If the headers DB ended up corrupted (triggered by organizeChain),
- // then nuke and rebuild the headers
- if(corruptHeadersDB_)
- {
- LOGERR << "Corrupted headers DB!";
- startHeaderHgt_ = 0;
- startHeaderBlkFile_ = 0;
- startHeaderOffset_ = 0;
- startRawBlkHgt_ = 0;
- startRawBlkFile_ = 0;
- startRawOffset_ = 0;
- startApplyHgt_ = 0;
- startApplyBlkFile_ = 0;
- startApplyOffset_ = 0;
- headerMap_.clear();
- headersByHeight_.clear();
- topBlockPtr_ = NULL;
- prevTopBlockPtr_ = NULL;
- corruptHeadersDB_ = false;
- lastTopBlock_ = UINT32_MAX;
- genBlockPtr_ = NULL;
- return true;
- }
- else
- {
- // Now go through the linear list of main-chain headers, mark valid
- for(uint32_t i=0; i::iterator iter = stxptr->stxoMap_.find(opTxoIdx);
+
+ // Some sanity checks
+ //if(iter == stxptr->stxoMap_.end())
+ if(ITER_NOT_IN_MAP(iter, stxptr->stxoMap_))
{
- BinaryDataRef headHash = headersByHeight_[i]->getThisHashRef();
- StoredHeader & sbh = sbhMap[headHash];
- sbh.isMainBranch_ = true;
- iface_->setValidDupIDForHeight(sbh.blockHeight_, sbh.duplicateID_);
+ LOGERR << "Needed to get OutPoint for a TxIn, but DNE";
+ continue;
}
- // startHeaderBlkFile_/Offset_ is where we were before the last shutdown
- for(startHeaderBlkFile_ = 0;
- startHeaderBlkFile_ < firstHashes.size();
- startHeaderBlkFile_++)
+ // We're aliasing this because "iter->second" is not clear at all
+ StoredTxOut & stxoSpend = iter->second;
+
+ if(stxoSpend.spentness_ == TXOUT_SPENT)
{
- // hasHeaderWithHash is probing the RAM block headers we just organized
- if(!hasHeaderWithHash(firstHashes[startHeaderBlkFile_]))
- break;
+ LOGERR << "Trying to mark TxOut spent, but it's already marked";
+ continue;
}
- // If no new blkfiles since last load, the above loop ends w/o "break"
- // If it's zero, then we don't have anything, start at zero
- // If new blk file, then startHeaderBlkFile_ is at the first blk file
- // with an unrecognized hash... we must've left off in the prev blkfile
- if(startHeaderBlkFile_ > 0)
- startHeaderBlkFile_--;
-
- startHeaderOffset_ = findOffsetFirstUnrecognized(startHeaderBlkFile_);
- }
+ // Just about to {remove-if-pruning, mark-spent-if-not} STXO
+ // Record it in the StoredUndoData object
+ if(sud != NULL)
+ sud->stxOutsRemovedByBlock_.push_back(stxoSpend);
- LOGINFO << "First unrecognized hash file: " << startHeaderBlkFile_;
- LOGINFO << "Offset of first unrecog block: " << startHeaderOffset_;
+ // Need to modify existing UTXOs, so that we can delete or mark as spent
+ stxoSpend.spentness_ = TXOUT_SPENT;
+ stxoSpend.spentByTxInKey_ = thisSTX.getDBKeyOfChild(iin, false);
+ if(DBUtils.getArmoryDbType() != ARMORY_DB_SUPER)
+ {
+ LOGERR << "Don't know what to do this in non-supernode mode!";
+ }
- // Note that startRawBlkHgt_ is topBlk+1, so this return where we should
- // actually start processing raw blocks, not the last one we processed
- pair rawBlockLoc;
- rawBlockLoc = findFileAndOffsetForHgt(startRawBlkHgt_, &firstHashes);
- startRawBlkFile_ = rawBlockLoc.first;
- startRawOffset_ = rawBlockLoc.second;
- LOGINFO << "First blkfile not in DB: " << startRawBlkFile_;
- LOGINFO << "Location of first block not in DB: " << startRawOffset_;
+ ////// Now update the SSH to show this TxIOPair was spent
+ // Same story as stxToModify above, except this will actually create a new
+ // SSH if it doesn't exist in the map or the DB
+ BinaryData hgtX = stxo.getHgtX();
+ StoredScriptHistory* sshptr = makeSureSSHInMap(
+ iface_,
+ uniqKey,
+ hgtX,
+ sshToModify_,
+ &dbUpdateSize_
+ );
- if(DBUtils.getArmoryDbType() != ARMORY_DB_BARE)
- {
- // TODO: finish this
- findFirstUnappliedBlock();
- LOGINFO << "Blkfile of first unapplied block: " << startApplyBlkFile_;
- LOGINFO << "Location of first unapplied block: " << startApplyOffset_;
+ // Assuming supernode, we don't need to worry about removing references
+ // to multisig scripts that reference this script. Simply find and
+ // update the correct SSH TXIO directly
+ sshptr->markTxOutSpent(stxoSpend.getDBKey(false),
+ thisSTX.getDBKeyOfChild(iin, false));
}
- // If we're content here, just return
- return true;
-
- /*
- // If we want to replay some blocks, we need to adjust startScanBlkFile_
- // and startScanOffset_ to be approx "replayNBytes" behind where
- // they are currently set.
- int32_t targOffset = (int32_t)startScanOffset_ - (int32_t)replayNBytes;
- if(targOffset > 0 || startScanBlkFile_==0)
- {
- targOffset = max(0, targOffset);
- startScanOffset_ = findFirstBlkApproxOffset(startScanBlkFile_, targOffset);
- }
- else
+ // We don't need to update any TXDATA, since it is part of writing thisSTX
+ // to the DB ... but we do need to update the StoredScriptHistory objects
+ // with references to the new [unspent] TxOuts
+ for(uint32_t iout=0; ioutmarkTxOutUnspent(stxoToAdd.getDBKey(false),
+ stxoToAdd.getValue(),
+ stxoToAdd.isCoinbase_,
+ false);
+
+ // If this was a multisig address, add a ref to each individual scraddr
+ if(uniqKey[0] == SCRIPT_PREFIX_MULTISIG)
+ {
+ vector addr160List;
+ BtcUtils::getMultisigAddrList(stxoToAdd.getScriptRef(), addr160List);
+ for(uint32_t a=0; amarkTxOutUnspent(stxoToAdd.getDBKey(false),
+ stxoToAdd.getValue(),
+ stxoToAdd.isCoinbase_,
+ true);
+ }
+ }
}
- LOGINFO << "Rewinding start block to enforce DB integrity";
- LOGINFO << "Start at blockfile: " << startScanBlkFile_;
- LOGINFO << "Start location in above blkfile: " << startScanOffset_;
return true;
- */
}
-////////////////////////////////////////////////////////////////////////////////
-vector BlockDataManager_LevelDB::getFirstHashOfEachBlkFile(void) const
+
+void BlockWriteBatcher::commit()
{
- if(!isBlkParamsSet_)
+ // Check for any SSH objects that are now completely empty. If they exist,
+ // they should be removed from the DB, instead of simply written as empty
+ // objects
+ const set keysToDelete = searchForSSHKeysToDelete();
+
+ iface_->startBatch(BLKDATA);
+
+ for(map::iterator iter_stx = stxToModify_.begin();
+ iter_stx != stxToModify_.end();
+ iter_stx++)
{
- LOGERR << "Can't get blk files until blkfile params are set";
- return vector(0);
+ iface_->putStoredTx(iter_stx->second, true);
+ }
+
+ for(map::iterator iter_ssh = sshToModify_.begin();
+ iter_ssh != sshToModify_.end();
+ iter_ssh++)
+ {
+ iface_->putStoredScriptHistory(iter_ssh->second);
}
- uint32_t nFile = (uint32_t)blkFileList_.size();
- BinaryData magic(4), szstr(4), rawHead(HEADER_SIZE);
- vector headHashes(nFile);
- for(uint32_t f=0; f::const_iterator iter_del = keysToDelete.begin();
+ iter_del != keysToDelete.end();
+ iter_del++)
{
- ifstream is(blkFileList_[f].c_str(), ios::in|ios::binary);
- is.seekg(0, ios::end);
- size_t filesize = (size_t)is.tellg();
- is.seekg(0, ios::beg);
- if(filesize < 88)
+ iface_->deleteValue(BLKDATA, *iter_del);
+ }
+
+
+ if(mostRecentBlockApplied_ != 0)
+ {
+ StoredDBInfo sdbi;
+ iface_->getStoredDBInfo(BLKDATA, sdbi);
+ if(!sdbi.isInitialized())
+ LOGERR << "How do we have invalid SDBI in applyMods?";
+ else
{
- is.close();
- LOGERR << "File: " << blkFileList_[f] << " is less than 88 bytes!";
- continue;
+ sdbi.appliedToHgt_ = mostRecentBlockApplied_;
+ iface_->putStoredDBInfo(BLKDATA, sdbi);
}
+ }
- is.read((char*)magic.getPtr(), 4);
- is.read((char*)szstr.getPtr(), 4);
- if(magic != MagicBytes_)
+ iface_->commitBatch(BLKDATA);
+
+ stxToModify_.clear();
+ sshToModify_.clear();
+ dbUpdateSize_ = 0;
+}
+
+set BlockWriteBatcher::searchForSSHKeysToDelete()
+{
+ set keysToDelete;
+ vector fullSSHToDelete;
+
+ for(map::iterator iterSSH = sshToModify_.begin();
+ iterSSH != sshToModify_.end(); )
+ {
+ // get our next one in case we delete the current
+ map::iterator nextSSHi = iterSSH;
+ ++nextSSHi;
+
+ StoredScriptHistory & ssh = iterSSH->second;
+
+ for(map::iterator iterSub = ssh.subHistMap_.begin();
+ iterSub != ssh.subHistMap_.end();
+ iterSub++)
{
- is.close();
- LOGERR << "Magic bytes mismatch. Block file is for another network!";
- return vector(0);
+ StoredSubHistory & subssh = iterSub->second;
+ if(subssh.txioSet_.size() == 0)
+ keysToDelete.insert(subssh.getDBKey(true));
+ }
+
+ // If the full SSH is empty (not just sub history), mark it to be removed
+ if(iterSSH->second.totalTxioCount_ == 0)
+ {
+ sshToModify_.erase(iterSSH);
}
- is.read((char*)rawHead.getPtr(), HEADER_SIZE);
- headHashes[f] = BinaryData(32);
- BtcUtils::getHash256(rawHead, headHashes[f]);
- is.close();
+ iterSSH = nextSSHi;
}
- return headHashes;
+
+ return keysToDelete;
}
////////////////////////////////////////////////////////////////////////////////
-uint32_t BlockDataManager_LevelDB::findOffsetFirstUnrecognized(uint32_t fnum)
+////////////////////////////////////////////////////////////////////////////////
+//
+// Start BlockDataManager_LevelDB methods
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+BlockDataManager_LevelDB::BlockDataManager_LevelDB(void)
{
- uint32_t loc = 0;
- BinaryData magic(4), szstr(4), rawHead(80), hashResult(32);
+ Reset();
+}
- ifstream is(blkFileList_[fnum].c_str(), ios::in|ios::binary);
- while(!is.eof())
+/////////////////////////////////////////////////////////////////////////////
+BlockDataManager_LevelDB::~BlockDataManager_LevelDB(void)
+{
+ set::iterator iter;
+ for(iter = registeredWallets_.begin();
+ iter != registeredWallets_.end();
+ iter++)
{
- is.read((char*)magic.getPtr(), 4);
- if(is.eof()) break;
-
-
- // This is not an error, it just simply hit the padding
- if(magic!=MagicBytes_)
- break;
+ delete *iter;
+ }
- is.read((char*)szstr.getPtr(), 4);
- uint32_t blksize = READ_UINT32_LE(szstr.getPtr());
- if(is.eof()) break;
+ Reset();
+}
- is.read((char*)rawHead.getPtr(), HEADER_SIZE);
+/////////////////////////////////////////////////////////////////////////////
+// We must set the network-specific data for this blockchain
+//
+// bdm.SetBtcNetworkParams( READHEX(MAINNET_GENESIS_HASH_HEX),
+// READHEX(MAINNET_GENESIS_TX_HASH_HEX),
+// READHEX(MAINNET_MAGIC_BYTES));
+//
+// The above call will work
+void BlockDataManager_LevelDB::SetBtcNetworkParams(
+ BinaryData const & GenHash,
+ BinaryData const & GenTxHash,
+ BinaryData const & MagicBytes)
+{
+ LOGINFO << "SetBtcNetworkParams";
+ GenesisHash_.copyFrom(GenHash);
+ GenesisTxHash_.copyFrom(GenTxHash);
+ MagicBytes_.copyFrom(MagicBytes);
+}
- BtcUtils::getHash256_NoSafetyCheck(rawHead.getPtr(), HEADER_SIZE, hashResult);
- if(getHeaderByHash(hashResult) == NULL)
- break; // first hash in the file that isn't in our header map
- loc += blksize + 8;
- is.seekg(blksize - HEADER_SIZE, ios::cur);
- }
-
- is.close();
- return loc;
+/////////////////////////////////////////////////////////////////////////////
+void BlockDataManager_LevelDB::SetHomeDirLocation(string homeDir)
+{
+ // This will eventually be used to store blocks/DB
+ LOGINFO << "Set home directory: " << armoryHomeDir_.c_str();
+ armoryHomeDir_ = homeDir;
+ blkProgressFile_ = homeDir + string("/blkfiles.txt");
+ abortLoadFile_ = homeDir + string("/abortload.txt");
}
-////////////////////////////////////////////////////////////////////////////////
-uint32_t BlockDataManager_LevelDB::findFirstBlkApproxOffset(uint32_t fnum,
- uint32_t offset) const
+/////////////////////////////////////////////////////////////////////////////
+// Bitcoin-Qt/bitcoind 0.8+ changed the location and naming convention for
+// the blkXXXX.dat files. The first block file use to be:
+//
+// ~/.bitcoin/blocks/blk00000.dat
+//
+// UPDATE: Compatibility with pre-0.8 nodes removed after 6+ months and
+// a hard-fork that makes it tougher to use old versions.
+//
+bool BlockDataManager_LevelDB::SetBlkFileLocation(string blkdir)
{
- if(fnum >= numBlkFiles_)
- {
- LOGERR << "Blkfile number out of range! (" << fnum << ")";
- return UINT32_MAX;
- }
-
- uint32_t loc = 0;
- BinaryData magic(4), szstr(4), rawHead(80), hashResult(32);
- ifstream is(blkFileList_[fnum].c_str(), ios::in|ios::binary);
- while(!is.eof() && loc <= offset)
- {
- is.read((char*)magic.getPtr(), 4);
- if(is.eof()) break;
- if(magic!=MagicBytes_)
- return UINT32_MAX;
+ blkFileDir_ = blkdir;
+ isBlkParamsSet_ = true;
- is.read((char*)szstr.getPtr(), 4);
- uint32_t blksize = READ_UINT32_LE(szstr.getPtr());
- if(is.eof()) break;
+ detectAllBlkFiles();
- loc += blksize + 8;
- is.seekg(blksize, ios::cur);
- }
+ LOGINFO << "Set blkfile dir: " << blkFileDir_.c_str();
- is.close();
- return loc;
+ return (numBlkFiles_!=UINT16_MAX);
}
-////////////////////////////////////////////////////////////////////////////////
-pair BlockDataManager_LevelDB::findFileAndOffsetForHgt(
- uint32_t hgt,
- vector * firstHashes)
+/////////////////////////////////////////////////////////////////////////////
+void BlockDataManager_LevelDB::SetLevelDBLocation(string ldbdir)
{
- vector recomputedHashes;
- if(firstHashes==NULL)
- {
- recomputedHashes = getFirstHashOfEachBlkFile();
- firstHashes = &recomputedHashes;
- }
+ leveldbDir_ = ldbdir;
+ isLevelDBSet_ = true;
+ LOGINFO << "Set leveldb dir: " << leveldbDir_.c_str();
+}
- pair outPair;
- int32_t blkfile;
- for(blkfile = 0; blkfile < (int32_t)firstHashes->size(); blkfile++)
+/////////////////////////////////////////////////////////////////////////////
+void BlockDataManager_LevelDB::SelectNetwork(string netName)
+{
+ if(netName.compare("Main") == 0)
{
- BlockHeader * bhptr = getHeaderByHash((*firstHashes)[blkfile]);
- if(bhptr == NULL)
- break;
-
- if(bhptr->getBlockHeight() > hgt)
- break;
+ SetBtcNetworkParams( READHEX(MAINNET_GENESIS_HASH_HEX),
+ READHEX(MAINNET_GENESIS_TX_HASH_HEX),
+ READHEX(MAINNET_MAGIC_BYTES) );
}
-
- blkfile = max(blkfile-1, 0);
- if(blkfile >= (int32_t)numBlkFiles_)
+ else if(netName.compare("Test") == 0)
{
- LOGERR << "Blkfile number out of range! (" << blkfile << ")";
- return outPair;
+ SetBtcNetworkParams( READHEX(TESTNET_GENESIS_HASH_HEX),
+ READHEX(TESTNET_GENESIS_TX_HASH_HEX),
+ READHEX(TESTNET_MAGIC_BYTES) );
}
+ else
+ LOGERR << "ERROR: Unrecognized network name";
- uint32_t loc = 0;
- BinaryData magic(4), szstr(4), rawHead(HEADER_SIZE), hashResult(32);
- ifstream is(blkFileList_[blkfile].c_str(), ios::in|ios::binary);
- while(!is.eof())
- {
- is.read((char*)magic.getPtr(), 4);
- if(is.eof()) break;
- if(magic!=MagicBytes_)
- break;
-
- is.read((char*)szstr.getPtr(), 4);
- uint32_t blksize = READ_UINT32_LE(szstr.getPtr());
- if(is.eof()) break;
-
- is.read((char*)rawHead.getPtr(), HEADER_SIZE);
- BtcUtils::getHash256_NoSafetyCheck(rawHead.getPtr(),
- HEADER_SIZE,
- hashResult);
-
- BlockHeader * bhptr = getHeaderByHash(hashResult);
- if(bhptr == NULL)
- break;
-
- if(bhptr->getBlockHeight() >= hgt)
- break;
+ isNetParamsSet_ = true;
+}
- loc += blksize + 8;
- is.seekg(blksize - HEADER_SIZE, ios::cur);
- }
- is.close();
- outPair.first = blkfile;
- outPair.second = loc;
-
- return outPair;
-
+/////////////////////////////////////////////////////////////////////////////
+bool BlockDataManager_LevelDB::checkLdbStatus(leveldb::Status stat)
+{
+ if( stat.ok() )
+ return true;
+ LOGERR << "***LevelDB Error: " << stat.ToString();
+ return false;
}
-
-////////////////////////////////////////////////////////////////////////////////
-// This behaves very much like the algorithm for finding the branch point
-// in the header tree with a peer.
-uint32_t BlockDataManager_LevelDB::findFirstUnappliedBlock(void)
+//////////////////////////////////////////////////////////////////////////
+// This method opens the databases, and figures out up to what block each
+// of them is sync'd to. Then it figures out where that corresponds in
+// the blk*.dat files, so that it can pick up where it left off. You can
+// use the last argument to specify an approximate amount of blocks
+// (specified in bytes) that you would like to replay: i.e. if 10 MB,
+// startScanBlkFile_ and endOfLastBlockByte_ variables will be set to
+// the first block that is approximately 10 MB behind your latest block.
+// Then you can pick up from there and let the DB clean up any mess that
+// was left from an unclean shutdown.
+bool BlockDataManager_LevelDB::initializeDBInterface(ARMORY_DB_TYPE dbtype,
+ DB_PRUNE_TYPE prtype)
{
- SCOPED_TIMER("findFirstUnappliedBlock");
-
- if(!iface_->databasesAreOpen())
+ SCOPED_TIMER("initializeDBInterface");
+ if(!isBlkParamsSet_ || !isLevelDBSet_)
{
- LOGERR << "Database is not open!";
- return UINT32_MAX;
+ LOGERR << "Cannot sync DB until blkfile and LevelDB paths are set. ";
+ return false;
}
-
- int32_t blkCheck = (int32_t)getTopBlockHeightInDB(BLKDATA);
-
- StoredHeader sbh;
- uint32_t toSub = 0;
- uint32_t nIter = 0;
- do
- {
- blkCheck -= toSub;
- if(blkCheck < 0)
- {
- blkCheck = 0;
- break;
- }
-
- iface_->getStoredHeader(sbh, (uint32_t)blkCheck);
-
- if(nIter++ < 10)
- toSub += 1; // we get some N^2 action here (for the first 10 iter)
- else
- toSub = (uint32_t)(1.5*toSub); // after that, increase exponentially
-
- } while(!sbh.blockAppliedToDB_);
- // We likely overshot in the last loop, so walk forward until we get to it.
- do
+ if(iface_->databasesAreOpen())
{
- iface_->getStoredHeader(sbh, (uint32_t)blkCheck);
- blkCheck += 1;
- } while(sbh.blockAppliedToDB_);
+ LOGERR << "Attempted to initialize a database that was already open";
+ return false;
+ }
- return (uint32_t)blkCheck;
-}
-////////////////////////////////////////////////////////////////////////////////
-uint32_t BlockDataManager_LevelDB::getTopBlockHeightInDB(DB_SELECT db)
-{
- StoredDBInfo sdbi;
- iface_->getStoredDBInfo(db, sdbi, false);
- return sdbi.topBlkHgt_;
-}
+ bool openWithErr = iface_->openDatabases(leveldbDir_,
+ GenesisHash_,
+ GenesisTxHash_,
+ MagicBytes_,
+ dbtype,
+ prtype);
-////////////////////////////////////////////////////////////////////////////////
-uint32_t BlockDataManager_LevelDB::getAppliedToHeightInDB(void)
-{
- StoredDBInfo sdbi;
- iface_->getStoredDBInfo(BLKDATA, sdbi, false);
- return sdbi.appliedToHgt_;
+ return openWithErr;
}
////////////////////////////////////////////////////////////////////////////////
-// The name of this function reflects that we are going to implement headers-
-// first "verification." Rather, we are going to organize the chain of headers
-// before we add any blocks, and then only add blocks that are on the main
-// chain. Return false if these headers induced a reorg.
-bool BlockDataManager_LevelDB::addHeadersFirst(BinaryDataRef rawHeader)
+bool BlockDataManager_LevelDB::detectCurrentSyncState(
+ bool forceRebuild,
+ bool initialLoad)
{
- vector toAdd(1);
- toAdd[0].unserialize(rawHeader);
- return addHeadersFirst(toAdd);
-}
+ // Make sure we detected all the available blk files
+ detectAllBlkFiles();
+ vector firstHashes = getFirstHashOfEachBlkFile();
+ LOGINFO << "Total blk*.dat files: " << numBlkFiles_;
-////////////////////////////////////////////////////////////////////////////////
-// Add the headers to the DB, which is required before putting raw blocks.
-// Can only put raw blocks when we know their height and dupID. After we
-// put the headers, then we put raw blocks. Then we go through the valid
-// headers and applyToDB the raw blocks.
-bool BlockDataManager_LevelDB::addHeadersFirst(vector const & headVect)
-{
- vector headersToDB;
- headersToDB.reserve(headVect.size());
- for(uint32_t h=0; hdatabasesAreOpen())
{
- pair bhInputPair;
- pair