Branch data Line data Source code
# 1 : : // Copyright (c) 2009-2010 Satoshi Nakamoto
# 2 : : // Copyright (c) 2009-2020 The Bitcoin Core developers
# 3 : : // Distributed under the MIT software license, see the accompanying
# 4 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
# 5 : :
# 6 : : #include <validation.h>
# 7 : :
# 8 : : #include <arith_uint256.h>
# 9 : : #include <chain.h>
# 10 : : #include <chainparams.h>
# 11 : : #include <checkqueue.h>
# 12 : : #include <consensus/consensus.h>
# 13 : : #include <consensus/merkle.h>
# 14 : : #include <consensus/tx_check.h>
# 15 : : #include <consensus/tx_verify.h>
# 16 : : #include <consensus/validation.h>
# 17 : : #include <cuckoocache.h>
# 18 : : #include <flatfile.h>
# 19 : : #include <hash.h>
# 20 : : #include <index/blockfilterindex.h>
# 21 : : #include <index/txindex.h>
# 22 : : #include <logging.h>
# 23 : : #include <logging/timer.h>
# 24 : : #include <node/blockstorage.h>
# 25 : : #include <node/coinstats.h>
# 26 : : #include <node/ui_interface.h>
# 27 : : #include <policy/policy.h>
# 28 : : #include <policy/settings.h>
# 29 : : #include <pow.h>
# 30 : : #include <primitives/block.h>
# 31 : : #include <primitives/transaction.h>
# 32 : : #include <random.h>
# 33 : : #include <reverse_iterator.h>
# 34 : : #include <script/script.h>
# 35 : : #include <script/sigcache.h>
# 36 : : #include <shutdown.h>
# 37 : : #include <signet.h>
# 38 : : #include <timedata.h>
# 39 : : #include <tinyformat.h>
# 40 : : #include <txdb.h>
# 41 : : #include <txmempool.h>
# 42 : : #include <uint256.h>
# 43 : : #include <undo.h>
# 44 : : #include <util/check.h> // For NDEBUG compile time check
# 45 : : #include <util/hasher.h>
# 46 : : #include <util/moneystr.h>
# 47 : : #include <util/rbf.h>
# 48 : : #include <util/strencodings.h>
# 49 : : #include <util/system.h>
# 50 : : #include <util/translation.h>
# 51 : : #include <validationinterface.h>
# 52 : : #include <warnings.h>
# 53 : :
# 54 : : #include <numeric>
# 55 : : #include <optional>
# 56 : : #include <string>
# 57 : :
# 58 : : #include <boost/algorithm/string/replace.hpp>
# 59 : :
# 60 : : #define MICRO 0.000001
# 61 : : #define MILLI 0.001
# 62 : :
# 63 : : /**
# 64 : : * An extra transaction can be added to a package, as long as it only has one
# 65 : : * ancestor and is no larger than this. Not really any reason to make this
# 66 : : * configurable as it doesn't materially change DoS parameters.
# 67 : : */
# 68 : : static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
# 69 : : /** Maximum kilobytes for transactions to store for processing during reorg */
# 70 : : static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
# 71 : : /** Time to wait between writing blocks/block index to disk. */
# 72 : : static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
# 73 : : /** Time to wait between flushing chainstate to disk. */
# 74 : : static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
# 75 : : /** Maximum age of our tip for us to be considered current for fee estimation */
# 76 : : static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
# 77 : : const std::vector<std::string> CHECKLEVEL_DOC {
# 78 : : "level 0 reads the blocks from disk",
# 79 : : "level 1 verifies block validity",
# 80 : : "level 2 verifies undo data",
# 81 : : "level 3 checks disconnection of tip blocks",
# 82 : : "level 4 tries to reconnect the blocks",
# 83 : : "each level includes the checks of the previous levels",
# 84 : : };
# 85 : :
# 86 : 329572058 : bool CBlockIndexWorkComparator::operator()(const CBlockIndex *pa, const CBlockIndex *pb) const {
# 87 : : // First sort by most total work, ...
# 88 [ + + ]: 329572058 : if (pa->nChainWork > pb->nChainWork) return false;
# 89 [ + + ]: 283623222 : if (pa->nChainWork < pb->nChainWork) return true;
# 90 : :
# 91 : : // ... then by earliest time received, ...
# 92 [ + + ]: 1696698 : if (pa->nSequenceId < pb->nSequenceId) return false;
# 93 [ + + ]: 1660396 : if (pa->nSequenceId > pb->nSequenceId) return true;
# 94 : :
# 95 : : // Use pointer address as tie breaker (should only happen with blocks
# 96 : : // loaded from disk, as those all have id 0).
# 97 [ + + ]: 1639915 : if (pa < pb) return false;
# 98 [ + + ]: 1639902 : if (pa > pb) return true;
# 99 : :
# 100 : : // Identical blocks.
# 101 : 1639885 : return false;
# 102 : 1639885 : }
# 103 : :
# 104 : : ChainstateManager g_chainman;
# 105 : :
# 106 : : CChainState& ChainstateActive()
# 107 : 9942264 : {
# 108 : 9942264 : LOCK(::cs_main);
# 109 : 9942264 : assert(g_chainman.m_active_chainstate);
# 110 : 9942264 : return *g_chainman.m_active_chainstate;
# 111 : 9942264 : }
# 112 : :
# 113 : : CChain& ChainActive()
# 114 : 7637860 : {
# 115 : 7637860 : LOCK(::cs_main);
# 116 : 7637860 : return ::ChainstateActive().m_chain;
# 117 : 7637860 : }
# 118 : :
# 119 : : /**
# 120 : : * Mutex to guard access to validation specific variables, such as reading
# 121 : : * or changing the chainstate.
# 122 : : *
# 123 : : * This may also need to be locked when updating the transaction pool, e.g. on
# 124 : : * AcceptToMemoryPool. See CTxMemPool::cs comment for details.
# 125 : : *
# 126 : : * The transaction pool has a separate lock to allow reading from it and the
# 127 : : * chainstate at the same time.
# 128 : : */
# 129 : : RecursiveMutex cs_main;
# 130 : :
# 131 : : CBlockIndex *pindexBestHeader = nullptr;
# 132 : : Mutex g_best_block_mutex;
# 133 : : std::condition_variable g_best_block_cv;
# 134 : : uint256 g_best_block;
# 135 : : bool g_parallel_script_checks{false};
# 136 : : bool fRequireStandard = true;
# 137 : : bool fCheckBlockIndex = false;
# 138 : : bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
# 139 : : int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
# 140 : :
# 141 : : uint256 hashAssumeValid;
# 142 : : arith_uint256 nMinimumChainWork;
# 143 : :
# 144 : : CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
# 145 : :
# 146 : : // Internal stuff
# 147 : : namespace {
# 148 : : CBlockIndex* pindexBestInvalid = nullptr;
# 149 : : } // namespace
# 150 : :
# 151 : : // Internal stuff from blockstorage ...
# 152 : : extern RecursiveMutex cs_LastBlockFile;
# 153 : : extern std::vector<CBlockFileInfo> vinfoBlockFile;
# 154 : : extern int nLastBlockFile;
# 155 : : extern bool fCheckForPruning;
# 156 : : extern std::set<CBlockIndex*> setDirtyBlockIndex;
# 157 : : extern std::set<int> setDirtyFileInfo;
# 158 : : void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
# 159 : : // ... TODO move fully to blockstorage
# 160 : :
# 161 : : CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
# 162 : 727272 : {
# 163 : 727272 : AssertLockHeld(cs_main);
# 164 : 727272 : assert(std::addressof(g_chainman.BlockIndex()) == std::addressof(m_block_index));
# 165 : 727272 : BlockMap::const_iterator it = m_block_index.find(hash);
# 166 [ + + ]: 727272 : return it == m_block_index.end() ? nullptr : it->second;
# 167 : 727272 : }
# 168 : :
# 169 : : CBlockIndex* BlockManager::FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator)
# 170 : 1368 : {
# 171 : 1368 : AssertLockHeld(cs_main);
# 172 : :
# 173 : 1368 : assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this));
# 174 : : // Find the latest block common to locator and chain - we expect that
# 175 : : // locator.vHave is sorted descending by height.
# 176 [ + + ]: 1751 : for (const uint256& hash : locator.vHave) {
# 177 : 1751 : CBlockIndex* pindex = LookupBlockIndex(hash);
# 178 [ + + ]: 1751 : if (pindex) {
# 179 [ + + ]: 1385 : if (chain.Contains(pindex))
# 180 : 1349 : return pindex;
# 181 [ + + ]: 36 : if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
# 182 : 12 : return chain.Tip();
# 183 : 12 : }
# 184 : 36 : }
# 185 : 1751 : }
# 186 : 1368 : return chain.Genesis();
# 187 : 1368 : }
# 188 : :
# 189 : : std::unique_ptr<CBlockTreeDB> pblocktree;
# 190 : :
# 191 : : bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
# 192 : : const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
# 193 : : bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
# 194 : : std::vector<CScriptCheck>* pvChecks = nullptr)
# 195 : : EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 196 : :
# 197 : : bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags)
# 198 : 2456339 : {
# 199 : 2456339 : AssertLockHeld(cs_main);
# 200 : 2456339 : assert(active_chain_tip); // TODO: Make active_chain_tip a reference
# 201 : 2456339 : assert(std::addressof(*::ChainActive().Tip()) == std::addressof(*active_chain_tip));
# 202 : :
# 203 : : // By convention a negative value for flags indicates that the
# 204 : : // current network-enforced consensus rules should be used. In
# 205 : : // a future soft-fork scenario that would mean checking which
# 206 : : // rules would be enforced for the next block and setting the
# 207 : : // appropriate flags. At the present time no soft-forks are
# 208 : : // scheduled, so no flags are set.
# 209 : 2456339 : flags = std::max(flags, 0);
# 210 : :
# 211 : : // CheckFinalTx() uses active_chain_tip.Height()+1 to evaluate
# 212 : : // nLockTime because when IsFinalTx() is called within
# 213 : : // CBlock::AcceptBlock(), the height of the block *being*
# 214 : : // evaluated is what is used. Thus if we want to know if a
# 215 : : // transaction can be part of the *next* block, we need to call
# 216 : : // IsFinalTx() with one more than active_chain_tip.Height().
# 217 : 2456339 : const int nBlockHeight = active_chain_tip->nHeight + 1;
# 218 : :
# 219 : : // BIP113 requires that time-locked transactions have nLockTime set to
# 220 : : // less than the median time of the previous block they're contained in.
# 221 : : // When the next block is created its previous block will be the current
# 222 : : // chain tip, so we use that to calculate the median time passed to
# 223 : : // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
# 224 [ + + ]: 2456339 : const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
# 225 : 2456339 : ? active_chain_tip->GetMedianTimePast()
# 226 : 2456339 : : GetAdjustedTime();
# 227 : :
# 228 : 2456339 : return IsFinalTx(tx, nBlockHeight, nBlockTime);
# 229 : 2456339 : }
# 230 : :
# 231 : : bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp)
# 232 : 702 : {
# 233 : 702 : AssertLockHeld(cs_main);
# 234 : 702 : assert(lp);
# 235 : : // If there are relative lock times then the maxInputBlock will be set
# 236 : : // If there are no relative lock times, the LockPoints don't depend on the chain
# 237 [ + - ]: 702 : if (lp->maxInputBlock) {
# 238 : : // Check whether ::ChainActive() is an extension of the block at which the LockPoints
# 239 : : // calculation was valid. If not LockPoints are no longer valid
# 240 : 702 : assert(std::addressof(::ChainActive()) == std::addressof(active_chain));
# 241 [ + + ]: 702 : if (!active_chain.Contains(lp->maxInputBlock)) {
# 242 : 5 : return false;
# 243 : 5 : }
# 244 : 697 : }
# 245 : :
# 246 : : // LockPoints still valid
# 247 : 697 : return true;
# 248 : 697 : }
# 249 : :
# 250 : : bool CheckSequenceLocks(CBlockIndex* tip,
# 251 : : const CCoinsView& coins_view,
# 252 : : const CTransaction& tx,
# 253 : : int flags,
# 254 : : LockPoints* lp,
# 255 : : bool useExistingLockPoints)
# 256 : 27755 : {
# 257 : 27755 : assert(tip != nullptr);
# 258 : :
# 259 : 27755 : CBlockIndex index;
# 260 : 27755 : index.pprev = tip;
# 261 : : // CheckSequenceLocks() uses active_chainstate.m_chain.Height()+1 to evaluate
# 262 : : // height based locks because when SequenceLocks() is called within
# 263 : : // ConnectBlock(), the height of the block *being*
# 264 : : // evaluated is what is used.
# 265 : : // Thus if we want to know if a transaction can be part of the
# 266 : : // *next* block, we need to use one more than active_chainstate.m_chain.Height()
# 267 : 27755 : index.nHeight = tip->nHeight + 1;
# 268 : :
# 269 : 27755 : std::pair<int, int64_t> lockPair;
# 270 [ + + ]: 27755 : if (useExistingLockPoints) {
# 271 : 695 : assert(lp);
# 272 : 695 : lockPair.first = lp->height;
# 273 : 695 : lockPair.second = lp->time;
# 274 : 695 : }
# 275 : 27060 : else {
# 276 : 27060 : std::vector<int> prevheights;
# 277 : 27060 : prevheights.resize(tx.vin.size());
# 278 [ + + ]: 87818 : for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
# 279 : 60758 : const CTxIn& txin = tx.vin[txinIndex];
# 280 : 60758 : Coin coin;
# 281 [ - + ]: 60758 : if (!coins_view.GetCoin(txin.prevout, coin)) {
# 282 : 0 : return error("%s: Missing input", __func__);
# 283 : 0 : }
# 284 [ + + ]: 60758 : if (coin.nHeight == MEMPOOL_HEIGHT) {
# 285 : : // Assume all mempool transaction confirm in the next block
# 286 : 6577 : prevheights[txinIndex] = tip->nHeight + 1;
# 287 : 54181 : } else {
# 288 : 54181 : prevheights[txinIndex] = coin.nHeight;
# 289 : 54181 : }
# 290 : 60758 : }
# 291 : 27060 : lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
# 292 [ + + ]: 27060 : if (lp) {
# 293 : 27044 : lp->height = lockPair.first;
# 294 : 27044 : lp->time = lockPair.second;
# 295 : : // Also store the hash of the block with the highest height of
# 296 : : // all the blocks which have sequence locked prevouts.
# 297 : : // This hash needs to still be on the chain
# 298 : : // for these LockPoint calculations to be valid
# 299 : : // Note: It is impossible to correctly calculate a maxInputBlock
# 300 : : // if any of the sequence locked inputs depend on unconfirmed txs,
# 301 : : // except in the special case where the relative lock time/height
# 302 : : // is 0, which is equivalent to no sequence lock. Since we assume
# 303 : : // input height of tip+1 for mempool txs and test the resulting
# 304 : : // lockPair from CalculateSequenceLocks against tip+1. We know
# 305 : : // EvaluateSequenceLocks will fail if there was a non-zero sequence
# 306 : : // lock on a mempool input, so we can use the return value of
# 307 : : // CheckSequenceLocks to indicate the LockPoints validity
# 308 : 27044 : int maxInputHeight = 0;
# 309 [ + + ]: 60742 : for (const int height : prevheights) {
# 310 : : // Can ignore mempool inputs since we'll fail if they had non-zero locks
# 311 [ + + ]: 60742 : if (height != tip->nHeight+1) {
# 312 : 59848 : maxInputHeight = std::max(maxInputHeight, height);
# 313 : 59848 : }
# 314 : 60742 : }
# 315 : 27044 : lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
# 316 : 27044 : }
# 317 : 27060 : }
# 318 : 27755 : return EvaluateSequenceLocks(index, lockPair);
# 319 : 27755 : }
# 320 : :
# 321 : : // Returns the script flags which should be checked for a given block
# 322 : : static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
# 323 : :
# 324 : : static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, size_t limit, std::chrono::seconds age)
# 325 : : EXCLUSIVE_LOCKS_REQUIRED(pool.cs, ::cs_main)
# 326 : 21872 : {
# 327 : 21872 : int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
# 328 [ + + ]: 21872 : if (expired != 0) {
# 329 [ + - ]: 2 : LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
# 330 : 2 : }
# 331 : :
# 332 : 21872 : std::vector<COutPoint> vNoSpendsRemaining;
# 333 : 21872 : pool.TrimToSize(limit, &vNoSpendsRemaining);
# 334 : 21872 : assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_cache));
# 335 [ + + ]: 21872 : for (const COutPoint& removed : vNoSpendsRemaining)
# 336 : 26 : coins_cache.Uncache(removed);
# 337 : 21872 : }
# 338 : :
# 339 : : static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
# 340 : 19660 : {
# 341 : 19660 : AssertLockHeld(cs_main);
# 342 : 19660 : assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
# 343 [ + + ]: 19660 : if (active_chainstate.IsInitialBlockDownload())
# 344 : 68 : return false;
# 345 [ + + ]: 19592 : if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
# 346 : 108 : return false;
# 347 [ - + ]: 19484 : if (active_chainstate.m_chain.Height() < pindexBestHeader->nHeight - 1)
# 348 : 0 : return false;
# 349 : 19484 : return true;
# 350 : 19484 : }
# 351 : :
# 352 : : /* Make mempool consistent after a reorg, by re-adding or recursively erasing
# 353 : : * disconnected block transactions from the mempool, and also removing any
# 354 : : * other transactions from the mempool that are no longer valid given the new
# 355 : : * tip/height.
# 356 : : *
# 357 : : * Note: we assume that disconnectpool only contains transactions that are NOT
# 358 : : * confirmed in the current chain nor already in the mempool (otherwise,
# 359 : : * in-mempool descendants of such transactions would be removed).
# 360 : : *
# 361 : : * Passing fAddToMempool=false will skip trying to add the transactions back,
# 362 : : * and instead just erase from the mempool as needed.
# 363 : : */
# 364 : :
# 365 : : static void UpdateMempoolForReorg(CChainState& active_chainstate, CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs)
# 366 : 2040 : {
# 367 : 2040 : AssertLockHeld(cs_main);
# 368 : 2040 : AssertLockHeld(mempool.cs);
# 369 : 2040 : assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
# 370 : 2040 : std::vector<uint256> vHashUpdate;
# 371 : : // disconnectpool's insertion_order index sorts the entries from
# 372 : : // oldest to newest, but the oldest entry will be the last tx from the
# 373 : : // latest mined block that was disconnected.
# 374 : : // Iterate disconnectpool in reverse, so that we add transactions
# 375 : : // back to the mempool starting with the earliest transaction that had
# 376 : : // been previously seen in a block.
# 377 : 2040 : auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
# 378 [ + + ]: 9711 : while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
# 379 : : // ignore validation errors in resurrected transactions
# 380 [ + + ][ + + ]: 7671 : if (!fAddToMempool || (*it)->IsCoinBase() ||
# [ + + ]
# 381 [ + + ]: 7671 : AcceptToMemoryPool(active_chainstate, mempool, *it, true /* bypass_limits */).m_result_type != MempoolAcceptResult::ResultType::VALID) {
# 382 : : // If the transaction doesn't make it in to the mempool, remove any
# 383 : : // transactions that depend on it (which would now be orphans).
# 384 : 7378 : mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
# 385 [ + - ]: 7378 : } else if (mempool.exists((*it)->GetHash())) {
# 386 : 293 : vHashUpdate.push_back((*it)->GetHash());
# 387 : 293 : }
# 388 : 7671 : ++it;
# 389 : 7671 : }
# 390 : 2040 : disconnectpool.queuedTx.clear();
# 391 : : // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
# 392 : : // no in-mempool children, which is generally not true when adding
# 393 : : // previously-confirmed transactions back to the mempool.
# 394 : : // UpdateTransactionsFromBlock finds descendants of any transactions in
# 395 : : // the disconnectpool that were added back and cleans up the mempool state.
# 396 : 2040 : mempool.UpdateTransactionsFromBlock(vHashUpdate);
# 397 : :
# 398 : : // We also need to remove any now-immature transactions
# 399 : 2040 : mempool.removeForReorg(active_chainstate, STANDARD_LOCKTIME_VERIFY_FLAGS);
# 400 : : // Re-limit mempool size, in case we added any transactions
# 401 : 2040 : LimitMempoolSize(mempool, active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
# 402 : 2040 : }
# 403 : :
# 404 : : /**
# 405 : : * Checks to avoid mempool polluting consensus critical paths since cached
# 406 : : * signature and script validity results will be reused if we validate this
# 407 : : * transaction again during block validation.
# 408 : : * */
# 409 : : static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state,
# 410 : : const CCoinsViewCache& view, const CTxMemPool& pool,
# 411 : : unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip)
# 412 : : EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs)
# 413 : 23511 : {
# 414 : 23511 : AssertLockHeld(cs_main);
# 415 : 23511 : AssertLockHeld(pool.cs);
# 416 : :
# 417 : 23511 : assert(!tx.IsCoinBase());
# 418 [ + + ]: 51951 : for (const CTxIn& txin : tx.vin) {
# 419 : 51951 : const Coin& coin = view.AccessCoin(txin.prevout);
# 420 : :
# 421 : : // This coin was checked in PreChecks and MemPoolAccept
# 422 : : // has been holding cs_main since then.
# 423 : 51951 : Assume(!coin.IsSpent());
# 424 [ - + ]: 51951 : if (coin.IsSpent()) return false;
# 425 : :
# 426 : : // If the Coin is available, there are 2 possibilities:
# 427 : : // it is available in our current ChainstateActive UTXO set,
# 428 : : // or it's a UTXO provided by a transaction in our mempool.
# 429 : : // Ensure the scriptPubKeys in Coins from CoinsView are correct.
# 430 : 51951 : const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
# 431 [ + + ]: 51951 : if (txFrom) {
# 432 : 5988 : assert(txFrom->GetHash() == txin.prevout.hash);
# 433 : 5988 : assert(txFrom->vout.size() > txin.prevout.n);
# 434 : 5988 : assert(txFrom->vout[txin.prevout.n] == coin.out);
# 435 : 45963 : } else {
# 436 : 45963 : assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_tip));
# 437 : 45963 : const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
# 438 : 45963 : assert(!coinFromUTXOSet.IsSpent());
# 439 : 45963 : assert(coinFromUTXOSet.out == coin.out);
# 440 : 45963 : }
# 441 : 51951 : }
# 442 : :
# 443 : : // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
# 444 : 23511 : return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true, /* cacheFullSciptStore = */ true, txdata);
# 445 : 23511 : }
# 446 : :
# 447 : : namespace {
# 448 : :
# 449 : : class MemPoolAccept
# 450 : : {
# 451 : : public:
# 452 : : explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate),
# 453 : : m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
# 454 : : m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
# 455 : : m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
# 456 : 31832 : m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {
# 457 : 31832 : assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate));
# 458 : 31832 : }
# 459 : :
# 460 : : // We put the arguments we're handed into a struct, so we can pass them
# 461 : : // around easier.
# 462 : : struct ATMPArgs {
# 463 : : const CChainParams& m_chainparams;
# 464 : : const int64_t m_accept_time;
# 465 : : const bool m_bypass_limits;
# 466 : : /*
# 467 : : * Return any outpoints which were not previously present in the coins
# 468 : : * cache, but were added as a result of validating the tx for mempool
# 469 : : * acceptance. This allows the caller to optionally remove the cache
# 470 : : * additions if the associated transaction ends up being rejected by
# 471 : : * the mempool.
# 472 : : */
# 473 : : std::vector<COutPoint>& m_coins_to_uncache;
# 474 : : const bool m_test_accept;
# 475 : : /** Disable BIP125 RBFing; disallow all conflicts with mempool transactions. */
# 476 : : const bool disallow_mempool_conflicts;
# 477 : : };
# 478 : :
# 479 : : // Single transaction acceptance
# 480 : : MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 481 : :
# 482 : : /**
# 483 : : * Multiple transaction acceptance. Transactions may or may not be interdependent,
# 484 : : * but must not conflict with each other. Parents must come before children if any
# 485 : : * dependencies exist, otherwise a TX_MISSING_INPUTS error will be returned.
# 486 : : */
# 487 : : PackageMempoolAcceptResult AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 488 : :
# 489 : : private:
# 490 : : // All the intermediate state that gets passed between the various levels
# 491 : : // of checking a given transaction.
# 492 : : struct Workspace {
# 493 : 32234 : explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
# 494 : : std::set<uint256> m_conflicts;
# 495 : : CTxMemPool::setEntries m_all_conflicting;
# 496 : : CTxMemPool::setEntries m_ancestors;
# 497 : : std::unique_ptr<CTxMemPoolEntry> m_entry;
# 498 : : std::list<CTransactionRef> m_replaced_transactions;
# 499 : :
# 500 : : bool m_replacement_transaction;
# 501 : : CAmount m_base_fees;
# 502 : : CAmount m_modified_fees;
# 503 : : CAmount m_conflicting_fees;
# 504 : : size_t m_conflicting_size;
# 505 : :
# 506 : : const CTransactionRef& m_ptx;
# 507 : : const uint256& m_hash;
# 508 : : TxValidationState m_state;
# 509 : : };
# 510 : :
# 511 : : // Run the policy checks on a given transaction, excluding any script checks.
# 512 : : // Looks up inputs, calculates feerate, considers replacement, evaluates
# 513 : : // package limits, etc. As this function can be invoked for "free" by a peer,
# 514 : : // only tests that are fast should be done here (to avoid CPU DoS).
# 515 : : bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 516 : :
# 517 : : // Run the script checks using our policy flags. As this can be slow, we should
# 518 : : // only invoke this on transactions that have otherwise passed policy checks.
# 519 : : bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 520 : :
# 521 : : // Re-run the script checks, using consensus flags, and try to cache the
# 522 : : // result in the scriptcache. This should be done after
# 523 : : // PolicyScriptChecks(). This requires that all inputs either be in our
# 524 : : // utxo set or in the mempool.
# 525 : : bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 526 : :
# 527 : : // Try to add the transaction to the mempool, removing any conflicts first.
# 528 : : // Returns true if the transaction is in the mempool after any size
# 529 : : // limiting is performed, false otherwise.
# 530 : : bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 531 : :
# 532 : : // Compare a package's feerate against minimum allowed.
# 533 : : bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs)
# 534 : 25968 : {
# 535 : 25968 : CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
# 536 [ + + ][ + + ]: 25968 : if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
# 537 : 1 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
# 538 : 1 : }
# 539 : :
# 540 [ + + ]: 25967 : if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
# 541 : 19 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
# 542 : 19 : }
# 543 : 25948 : return true;
# 544 : 25948 : }
# 545 : :
# 546 : : private:
# 547 : : CTxMemPool& m_pool;
# 548 : : CCoinsViewCache m_view;
# 549 : : CCoinsViewMemPool m_viewmempool;
# 550 : : CCoinsView m_dummy;
# 551 : :
# 552 : : CChainState& m_active_chainstate;
# 553 : :
# 554 : : // The package limits in effect at the time of invocation.
# 555 : : const size_t m_limit_ancestors;
# 556 : : const size_t m_limit_ancestor_size;
# 557 : : // These may be modified while evaluating a transaction (eg to account for
# 558 : : // in-mempool conflicts; see below).
# 559 : : size_t m_limit_descendants;
# 560 : : size_t m_limit_descendant_size;
# 561 : : };
# 562 : :
# 563 : : bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
# 564 : 32227 : {
# 565 : 32227 : const CTransactionRef& ptx = ws.m_ptx;
# 566 : 32227 : const CTransaction& tx = *ws.m_ptx;
# 567 : 32227 : const uint256& hash = ws.m_hash;
# 568 : :
# 569 : : // Copy/alias what we need out of args
# 570 : 32227 : const int64_t nAcceptTime = args.m_accept_time;
# 571 : 32227 : const bool bypass_limits = args.m_bypass_limits;
# 572 : 32227 : std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
# 573 : :
# 574 : : // Alias what we need out of ws
# 575 : 32227 : TxValidationState& state = ws.m_state;
# 576 : 32227 : std::set<uint256>& setConflicts = ws.m_conflicts;
# 577 : 32227 : CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
# 578 : 32227 : CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
# 579 : 32227 : std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
# 580 : 32227 : bool& fReplacementTransaction = ws.m_replacement_transaction;
# 581 : 32227 : CAmount& nModifiedFees = ws.m_modified_fees;
# 582 : 32227 : CAmount& nConflictingFees = ws.m_conflicting_fees;
# 583 : 32227 : size_t& nConflictingSize = ws.m_conflicting_size;
# 584 : :
# 585 [ + + ]: 32227 : if (!CheckTransaction(tx, state)) {
# 586 : 12 : return false; // state filled in by CheckTransaction
# 587 : 12 : }
# 588 : :
# 589 : : // Coinbase is only valid in a block, not as a loose transaction
# 590 [ + + ]: 32215 : if (tx.IsCoinBase())
# 591 : 3 : return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
# 592 : :
# 593 : : // Rather not work on nonstandard transactions (unless -testnet/-regtest)
# 594 : 32212 : std::string reason;
# 595 [ + + ][ + + ]: 32212 : if (fRequireStandard && !IsStandardTx(tx, reason))
# 596 : 1486 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
# 597 : :
# 598 : : // Do not work on transactions that are too small.
# 599 : : // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
# 600 : : // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
# 601 : : // 64-byte transactions.
# 602 [ + + ]: 30726 : if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
# 603 : 6 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
# 604 : :
# 605 : : // Only accept nLockTime-using transactions that can be mined in the next
# 606 : : // block; we don't want our mempool filled up with transactions that can't
# 607 : : // be mined yet.
# 608 : 30720 : assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain));
# 609 [ + + ]: 30720 : if (!CheckFinalTx(m_active_chainstate.m_chain.Tip(), tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
# 610 : 85 : return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
# 611 : :
# 612 : : // is it already in the memory pool?
# 613 [ + + ]: 30635 : if (m_pool.exists(hash)) {
# 614 : 7 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
# 615 : 7 : }
# 616 : :
# 617 : : // Check for conflicts with in-memory transactions
# 618 [ + + ]: 30628 : for (const CTxIn &txin : tx.vin)
# 619 : 67668 : {
# 620 : 67668 : const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
# 621 [ + + ]: 67668 : if (ptxConflicting) {
# 622 [ + + ]: 550 : if (!setConflicts.count(ptxConflicting->GetHash()))
# 623 : 542 : {
# 624 : : // Allow opt-out of transaction replacement by setting
# 625 : : // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
# 626 : : //
# 627 : : // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
# 628 : : // non-replaceable transactions. All inputs rather than just one
# 629 : : // is for the sake of multi-party protocols, where we don't
# 630 : : // want a single party to be able to disable replacement.
# 631 : : //
# 632 : : // The opt-out ignores descendants as anyone relying on
# 633 : : // first-seen mempool behavior should be checking all
# 634 : : // unconfirmed ancestors anyway; doing otherwise is hopelessly
# 635 : : // insecure.
# 636 : 542 : bool fReplacementOptOut = true;
# 637 [ + + ]: 542 : for (const CTxIn &_txin : ptxConflicting->vin)
# 638 : 543 : {
# 639 [ + + ]: 543 : if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
# 640 : 538 : {
# 641 : 538 : fReplacementOptOut = false;
# 642 : 538 : break;
# 643 : 538 : }
# 644 : 543 : }
# 645 [ + + ][ + + ]: 542 : if (fReplacementOptOut || args.disallow_mempool_conflicts) {
# 646 : 5 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
# 647 : 5 : }
# 648 : :
# 649 : 537 : setConflicts.insert(ptxConflicting->GetHash());
# 650 : 537 : }
# 651 : 550 : }
# 652 : 67668 : }
# 653 : :
# 654 : 30628 : LockPoints lp;
# 655 : 30623 : m_view.SetBackend(m_viewmempool);
# 656 : :
# 657 : 30623 : assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip()));
# 658 : 30623 : const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip();
# 659 : : // do all inputs exist?
# 660 [ + + ]: 64321 : for (const CTxIn& txin : tx.vin) {
# 661 [ + + ]: 64321 : if (!coins_cache.HaveCoinInCache(txin.prevout)) {
# 662 : 13193 : coins_to_uncache.push_back(txin.prevout);
# 663 : 13193 : }
# 664 : :
# 665 : : // Note: this call may add txin.prevout to the coins cache
# 666 : : // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
# 667 : : // later (via coins_to_uncache) if this tx turns out to be invalid.
# 668 [ + + ]: 64321 : if (!m_view.HaveCoin(txin.prevout)) {
# 669 : : // Are inputs missing because we already have the tx?
# 670 [ + + ]: 7186 : for (size_t out = 0; out < tx.vout.size(); out++) {
# 671 : : // Optimistically just do efficient check of cache for outputs
# 672 [ + + ]: 3604 : if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
# 673 : 2 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
# 674 : 2 : }
# 675 : 3604 : }
# 676 : : // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
# 677 : 3584 : return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
# 678 : 3584 : }
# 679 : 64321 : }
# 680 : :
# 681 : : // This is const, but calls into the back end CoinsViews. The CCoinsViewDB at the bottom of the
# 682 : : // hierarchy brings the best block into scope. See CCoinsViewDB::GetBestBlock().
# 683 : 30623 : m_view.GetBestBlock();
# 684 : :
# 685 : : // we have all inputs cached now, so switch back to dummy (to protect
# 686 : : // against bugs where we pull more inputs from disk that miss being added
# 687 : : // to coins_to_uncache)
# 688 : 27039 : m_view.SetBackend(m_dummy);
# 689 : :
# 690 : : // Only accept BIP68 sequence locked transactions that can be mined in the next
# 691 : : // block; we don't want our mempool filled up with transactions that can't
# 692 : : // be mined yet.
# 693 : : // Pass in m_view which has all of the relevant inputs cached. Note that, since m_view's
# 694 : : // backend was removed, it no longer pulls coins from the mempool.
# 695 : 27039 : assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate));
# 696 [ + + ]: 27039 : if (!CheckSequenceLocks(m_active_chainstate.m_chain.Tip(), m_view, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
# 697 : 361 : return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
# 698 : :
# 699 : 26678 : assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_active_chainstate.m_blockman));
# 700 [ + + ]: 26678 : if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_blockman.GetSpendHeight(m_view), ws.m_base_fees)) {
# 701 : 4 : return false; // state filled in by CheckTxInputs
# 702 : 4 : }
# 703 : :
# 704 : : // Check for non-standard pay-to-script-hash in inputs
# 705 : 26674 : const auto& params = args.m_chainparams.GetConsensus();
# 706 : 26674 : assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain));
# 707 : 26674 : auto taproot_state = VersionBitsState(m_active_chainstate.m_chain.Tip(), params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache);
# 708 [ + + ][ + + ]: 26674 : if (fRequireStandard && !AreInputsStandard(tx, m_view, taproot_state == ThresholdState::ACTIVE)) {
# 709 : 213 : return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
# 710 : 213 : }
# 711 : :
# 712 : : // Check for non-standard witnesses.
# 713 [ + + ][ + + ]: 26461 : if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
# [ + + ]
# 714 : 152 : return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
# 715 : :
# 716 : 26309 : int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
# 717 : :
# 718 : : // nModifiedFees includes any fee deltas from PrioritiseTransaction
# 719 : 26309 : nModifiedFees = ws.m_base_fees;
# 720 : 26309 : m_pool.ApplyDelta(hash, nModifiedFees);
# 721 : :
# 722 : : // Keep track of transactions that spend a coinbase, which we re-scan
# 723 : : // during reorgs to ensure COINBASE_MATURITY is still met.
# 724 : 26309 : bool fSpendsCoinbase = false;
# 725 [ + + ]: 54204 : for (const CTxIn &txin : tx.vin) {
# 726 : 54204 : const Coin &coin = m_view.AccessCoin(txin.prevout);
# 727 [ + + ]: 54204 : if (coin.IsCoinBase()) {
# 728 : 2879 : fSpendsCoinbase = true;
# 729 : 2879 : break;
# 730 : 2879 : }
# 731 : 54204 : }
# 732 : :
# 733 : 26309 : assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain));
# 734 : 26309 : entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(),
# 735 : 26309 : fSpendsCoinbase, nSigOpsCost, lp));
# 736 : 26309 : unsigned int nSize = entry->GetTxSize();
# 737 : :
# 738 [ + + ]: 26309 : if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
# 739 : 4 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
# 740 : 4 : strprintf("%d", nSigOpsCost));
# 741 : :
# 742 : : // No transactions are allowed below minRelayTxFee except from disconnected
# 743 : : // blocks
# 744 [ + + ][ + + ]: 26305 : if (!bypass_limits && !CheckFeeRate(nSize, nModifiedFees, state)) return false;
# 745 : :
# 746 : 26285 : const CTxMemPool::setEntries setIterConflicting = m_pool.GetIterSet(setConflicts);
# 747 : : // Calculate in-mempool ancestors, up to a limit.
# 748 [ + + ]: 26285 : if (setConflicts.size() == 1) {
# 749 : : // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
# 750 : : // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
# 751 : : // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
# 752 : : // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
# 753 : : // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
# 754 : : // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
# 755 : : // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
# 756 : : // for off-chain contract systems (see link in the comment below).
# 757 : : //
# 758 : : // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
# 759 : : // conflict directly with exactly one other transaction (but may evict children of said transaction),
# 760 : : // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
# 761 : : // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
# 762 : : // amended, we may need to move that check to here instead of removing it wholesale.
# 763 : : //
# 764 : : // Such transactions are clearly not merging any existing packages, so we are only concerned with
# 765 : : // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
# 766 : : // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
# 767 : : // to.
# 768 : : //
# 769 : : // To check these we first check if we meet the RBF criteria, above, and increment the descendant
# 770 : : // limits by the direct conflict and its descendants (as these are recalculated in
# 771 : : // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
# 772 : : // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
# 773 : : // the ancestor limits should be the same for both our new transaction and any conflicts).
# 774 : : // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
# 775 : : // into force here (as we're only adding a single transaction).
# 776 : 336 : assert(setIterConflicting.size() == 1);
# 777 : 336 : CTxMemPool::txiter conflict = *setIterConflicting.begin();
# 778 : :
# 779 : 336 : m_limit_descendants += 1;
# 780 : 336 : m_limit_descendant_size += conflict->GetSizeWithDescendants();
# 781 : 336 : }
# 782 : :
# 783 : 26285 : std::string errString;
# 784 [ + + ]: 26285 : if (!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
# 785 : 70 : setAncestors.clear();
# 786 : : // If CalculateMemPoolAncestors fails second time, we want the original error string.
# 787 : 70 : std::string dummy_err_string;
# 788 : : // Contracting/payment channels CPFP carve-out:
# 789 : : // If the new transaction is relatively small (up to 40k weight)
# 790 : : // and has at most one ancestor (ie ancestor limit of 2, including
# 791 : : // the new transaction), allow it if its parent has exactly the
# 792 : : // descendant limit descendants.
# 793 : : //
# 794 : : // This allows protocols which rely on distrusting counterparties
# 795 : : // being able to broadcast descendants of an unconfirmed transaction
# 796 : : // to be secure by simply only having two immediately-spendable
# 797 : : // outputs - one for each counterparty. For more info on the uses for
# 798 : : // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
# 799 [ + + ]: 70 : if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
# 800 [ + + ]: 70 : !m_pool.CalculateMemPoolAncestors(*entry, setAncestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
# 801 : 64 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
# 802 : 64 : }
# 803 : 26221 : }
# 804 : :
# 805 : : // A transaction that spends outputs that would be replaced by it is invalid. Now
# 806 : : // that we have the set of all ancestors we can detect this
# 807 : : // pathological case by making sure setConflicts and setAncestors don't
# 808 : : // intersect.
# 809 [ + + ]: 26221 : for (CTxMemPool::txiter ancestorIt : setAncestors)
# 810 : 12152 : {
# 811 : 12152 : const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
# 812 [ + + ]: 12152 : if (setConflicts.count(hashAncestor))
# 813 : 2 : {
# 814 : 2 : return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx",
# 815 : 2 : strprintf("%s spends conflicting transaction %s",
# 816 : 2 : hash.ToString(),
# 817 : 2 : hashAncestor.ToString()));
# 818 : 2 : }
# 819 : 12152 : }
# 820 : :
# 821 : : // Check if it's economically rational to mine this transaction rather
# 822 : : // than the ones it replaces.
# 823 : 26221 : nConflictingFees = 0;
# 824 : 26219 : nConflictingSize = 0;
# 825 : 26219 : uint64_t nConflictingCount = 0;
# 826 : :
# 827 : : // If we don't hold the lock allConflicting might be incomplete; the
# 828 : : // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
# 829 : : // mempool consistency for us.
# 830 : 26219 : fReplacementTransaction = setConflicts.size();
# 831 [ + + ]: 26219 : if (fReplacementTransaction)
# 832 : 336 : {
# 833 : 336 : CFeeRate newFeeRate(nModifiedFees, nSize);
# 834 : 336 : std::set<uint256> setConflictsParents;
# 835 : 336 : const int maxDescendantsToVisit = 100;
# 836 [ + + ]: 535 : for (const auto& mi : setIterConflicting) {
# 837 : : // Don't allow the replacement to reduce the feerate of the
# 838 : : // mempool.
# 839 : : //
# 840 : : // We usually don't want to accept replacements with lower
# 841 : : // feerates than what they replaced as that would lower the
# 842 : : // feerate of the next block. Requiring that the feerate always
# 843 : : // be increased is also an easy-to-reason about way to prevent
# 844 : : // DoS attacks via replacements.
# 845 : : //
# 846 : : // We only consider the feerates of transactions being directly
# 847 : : // replaced, not their indirect descendants. While that does
# 848 : : // mean high feerate children are ignored when deciding whether
# 849 : : // or not to replace, we do require the replacement to pay more
# 850 : : // overall fees too, mitigating most cases.
# 851 : 535 : CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
# 852 [ + + ]: 535 : if (newFeeRate <= oldFeeRate)
# 853 : 8 : {
# 854 : 8 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
# 855 : 8 : strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
# 856 : 8 : hash.ToString(),
# 857 : 8 : newFeeRate.ToString(),
# 858 : 8 : oldFeeRate.ToString()));
# 859 : 8 : }
# 860 : :
# 861 [ + + ]: 527 : for (const CTxIn &txin : mi->GetTx().vin)
# 862 : 536 : {
# 863 : 536 : setConflictsParents.insert(txin.prevout.hash);
# 864 : 536 : }
# 865 : :
# 866 : 527 : nConflictingCount += mi->GetCountWithDescendants();
# 867 : 527 : }
# 868 : : // This potentially overestimates the number of actual descendants
# 869 : : // but we just want to be conservative to avoid doing too much
# 870 : : // work.
# 871 [ + + ]: 336 : if (nConflictingCount <= maxDescendantsToVisit) {
# 872 : : // If not too many to replace, then calculate the set of
# 873 : : // transactions that would have to be evicted
# 874 [ + + ]: 424 : for (CTxMemPool::txiter it : setIterConflicting) {
# 875 : 424 : m_pool.CalculateDescendants(it, allConflicting);
# 876 : 424 : }
# 877 [ + + ]: 706 : for (CTxMemPool::txiter it : allConflicting) {
# 878 : 706 : nConflictingFees += it->GetModifiedFee();
# 879 : 706 : nConflictingSize += it->GetTxSize();
# 880 : 706 : }
# 881 : 325 : } else {
# 882 : 3 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements",
# 883 : 3 : strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
# 884 : 3 : hash.ToString(),
# 885 : 3 : nConflictingCount,
# 886 : 3 : maxDescendantsToVisit));
# 887 : 3 : }
# 888 : :
# 889 [ + + ]: 765 : for (unsigned int j = 0; j < tx.vin.size(); j++)
# 890 : 441 : {
# 891 : : // We don't want to accept replacements that require low
# 892 : : // feerate junk to be mined first. Ideally we'd keep track of
# 893 : : // the ancestor feerates and make the decision based on that,
# 894 : : // but for now requiring all new inputs to be confirmed works.
# 895 : : //
# 896 : : // Note that if you relax this to make RBF a little more useful,
# 897 : : // this may break the CalculateMempoolAncestors RBF relaxation,
# 898 : : // above. See the comment above the first CalculateMempoolAncestors
# 899 : : // call for more info.
# 900 [ + + ]: 441 : if (!setConflictsParents.count(tx.vin[j].prevout.hash))
# 901 : 5 : {
# 902 : : // Rather than check the UTXO set - potentially expensive -
# 903 : : // it's cheaper to just check if the new input refers to a
# 904 : : // tx that's in the mempool.
# 905 [ + + ]: 5 : if (m_pool.exists(tx.vin[j].prevout.hash)) {
# 906 : 1 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "replacement-adds-unconfirmed",
# 907 : 1 : strprintf("replacement %s adds unconfirmed input, idx %d",
# 908 : 1 : hash.ToString(), j));
# 909 : 1 : }
# 910 : 5 : }
# 911 : 441 : }
# 912 : :
# 913 : : // The replacement must pay greater fees than the transactions it
# 914 : : // replaces - if we did the bandwidth used by those conflicting
# 915 : : // transactions would not be paid for.
# 916 [ + + ]: 325 : if (nModifiedFees < nConflictingFees)
# 917 : 2 : {
# 918 : 2 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
# 919 : 2 : strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
# 920 : 2 : hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
# 921 : 2 : }
# 922 : :
# 923 : : // Finally in addition to paying more fees than the conflicts the
# 924 : : // new transaction must pay for its own bandwidth.
# 925 : 322 : CAmount nDeltaFees = nModifiedFees - nConflictingFees;
# 926 [ - + ]: 322 : if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
# 927 : 0 : {
# 928 : 0 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
# 929 : 0 : strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
# 930 : 0 : hash.ToString(),
# 931 : 0 : FormatMoney(nDeltaFees),
# 932 : 0 : FormatMoney(::incrementalRelayFee.GetFee(nSize))));
# 933 : 0 : }
# 934 : 26205 : }
# 935 : 26205 : return true;
# 936 : 26205 : }
# 937 : :
# 938 : : bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
# 939 : 26200 : {
# 940 : 26200 : const CTransaction& tx = *ws.m_ptx;
# 941 : 26200 : TxValidationState& state = ws.m_state;
# 942 : :
# 943 : 26200 : constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
# 944 : :
# 945 : : // Check input scripts and signatures.
# 946 : : // This is done last to help prevent CPU exhaustion denial-of-service attacks.
# 947 [ + + ]: 26200 : if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, txdata)) {
# 948 : : // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
# 949 : : // need to turn both off, and compare against just turning off CLEANSTACK
# 950 : : // to see if the failure is specifically due to witness validation.
# 951 : 2254 : TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
# 952 [ + + ][ + + ]: 2254 : if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
# 953 [ + - ]: 2254 : !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
# 954 : : // Only the witness is missing, so the transaction itself may be fine.
# 955 : 9 : state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED,
# 956 : 9 : state.GetRejectReason(), state.GetDebugMessage());
# 957 : 9 : }
# 958 : 2254 : return false; // state filled in by CheckInputScripts
# 959 : 2254 : }
# 960 : :
# 961 : 23946 : return true;
# 962 : 23946 : }
# 963 : :
# 964 : : bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
# 965 : 23511 : {
# 966 : 23511 : const CTransaction& tx = *ws.m_ptx;
# 967 : 23511 : const uint256& hash = ws.m_hash;
# 968 : 23511 : TxValidationState& state = ws.m_state;
# 969 : 23511 : const CChainParams& chainparams = args.m_chainparams;
# 970 : :
# 971 : : // Check again against the current block tip's script verification
# 972 : : // flags to cache our script execution flags. This is, of course,
# 973 : : // useless if the next block has different script flags from the
# 974 : : // previous one, but because the cache tracks script flags for us it
# 975 : : // will auto-invalidate and we'll just have a few blocks of extra
# 976 : : // misses on soft-fork activation.
# 977 : : //
# 978 : : // This is also useful in case of bugs in the standard flags that cause
# 979 : : // transactions to pass as valid when they're actually invalid. For
# 980 : : // instance the STRICTENC flag was incorrectly allowing certain
# 981 : : // CHECKSIG NOT scripts to pass, even though they were invalid.
# 982 : : //
# 983 : : // There is a similar check in CreateNewBlock() to prevent creating
# 984 : : // invalid blocks (using TestBlockValidity), however allowing such
# 985 : : // transactions into the mempool can be exploited as a DoS attack.
# 986 : 23511 : assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain));
# 987 : 23511 : unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(m_active_chainstate.m_chain.Tip(), chainparams.GetConsensus());
# 988 : 23511 : assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip()));
# 989 [ - + ]: 23511 : if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata, m_active_chainstate.CoinsTip())) {
# 990 : 0 : return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s",
# 991 : 0 : __func__, hash.ToString(), state.ToString());
# 992 : 0 : }
# 993 : :
# 994 : 23511 : return true;
# 995 : 23511 : }
# 996 : :
# 997 : : bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
# 998 : 20131 : {
# 999 : 20131 : const CTransaction& tx = *ws.m_ptx;
# 1000 : 20131 : const uint256& hash = ws.m_hash;
# 1001 : 20131 : TxValidationState& state = ws.m_state;
# 1002 : 20131 : const bool bypass_limits = args.m_bypass_limits;
# 1003 : :
# 1004 : 20131 : CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
# 1005 : 20131 : CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
# 1006 : 20131 : const CAmount& nModifiedFees = ws.m_modified_fees;
# 1007 : 20131 : const CAmount& nConflictingFees = ws.m_conflicting_fees;
# 1008 : 20131 : const size_t& nConflictingSize = ws.m_conflicting_size;
# 1009 : 20131 : const bool fReplacementTransaction = ws.m_replacement_transaction;
# 1010 : 20131 : std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
# 1011 : :
# 1012 : : // Remove conflicting transactions from the mempool
# 1013 [ + + ]: 20131 : for (CTxMemPool::txiter it : allConflicting)
# 1014 : 414 : {
# 1015 [ + - ]: 414 : LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
# 1016 : 414 : it->GetTx().GetHash().ToString(),
# 1017 : 414 : hash.ToString(),
# 1018 : 414 : FormatMoney(nModifiedFees - nConflictingFees),
# 1019 : 414 : (int)entry->GetTxSize() - (int)nConflictingSize);
# 1020 : 414 : ws.m_replaced_transactions.push_back(it->GetSharedTx());
# 1021 : 414 : }
# 1022 : 20131 : m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
# 1023 : :
# 1024 : : // This transaction should only count for fee estimation if:
# 1025 : : // - it isn't a BIP 125 replacement transaction (may not be widely supported)
# 1026 : : // - it's not being re-added during a reorg which bypasses typical mempool fee limits
# 1027 : : // - the node is not behind
# 1028 : : // - the transaction is not dependent on any other transactions in the mempool
# 1029 : 20131 : assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate));
# 1030 [ + + ][ + + ]: 20131 : bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx);
# [ + + ][ + + ]
# 1031 : :
# 1032 : : // Store transaction in memory
# 1033 : 20131 : m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation);
# 1034 : :
# 1035 : : // trim mempool and check if tx was trimmed
# 1036 [ + + ]: 20131 : if (!bypass_limits) {
# 1037 : 19832 : assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip()));
# 1038 : 19832 : LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
# 1039 [ - + ]: 19832 : if (!m_pool.exists(hash))
# 1040 : 0 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
# 1041 : 20131 : }
# 1042 : 20131 : return true;
# 1043 : 20131 : }
# 1044 : :
# 1045 : : MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
# 1046 : 31782 : {
# 1047 : 31782 : AssertLockHeld(cs_main);
# 1048 : 31782 : LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
# 1049 : :
# 1050 : 31782 : Workspace ws(ptx);
# 1051 : :
# 1052 [ + + ]: 31782 : if (!PreChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1053 : :
# 1054 : : // Only compute the precomputed transaction data if we need to verify
# 1055 : : // scripts (ie, other policy checks pass). We perform the inexpensive
# 1056 : : // checks first and avoid hashing and signature verification unless those
# 1057 : : // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
# 1058 : 25764 : PrecomputedTransactionData txdata;
# 1059 : :
# 1060 [ + + ]: 25764 : if (!PolicyScriptChecks(args, ws, txdata)) return MempoolAcceptResult::Failure(ws.m_state);
# 1061 : :
# 1062 [ - + ]: 23511 : if (!ConsensusScriptChecks(args, ws, txdata)) return MempoolAcceptResult::Failure(ws.m_state);
# 1063 : :
# 1064 : : // Tx was accepted, but not added
# 1065 [ + + ]: 23511 : if (args.m_test_accept) {
# 1066 : 3380 : return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_base_fees);
# 1067 : 3380 : }
# 1068 : :
# 1069 [ - + ]: 20131 : if (!Finalize(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1070 : :
# 1071 : 20131 : GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence());
# 1072 : :
# 1073 : 20131 : return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_base_fees);
# 1074 : 20131 : }
# 1075 : :
# 1076 : : PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args)
# 1077 : 50 : {
# 1078 : 50 : AssertLockHeld(cs_main);
# 1079 : :
# 1080 : 50 : PackageValidationState package_state;
# 1081 : 50 : const unsigned int package_count = txns.size();
# 1082 : :
# 1083 : : // These context-free package limits can be checked before taking the mempool lock.
# 1084 [ + + ]: 50 : if (package_count > MAX_PACKAGE_COUNT) {
# 1085 : 2 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-many-transactions");
# 1086 : 2 : return PackageMempoolAcceptResult(package_state, {});
# 1087 : 2 : }
# 1088 : :
# 1089 : 48 : const int64_t total_size = std::accumulate(txns.cbegin(), txns.cend(), 0,
# 1090 : 485 : [](int64_t sum, const auto& tx) { return sum + GetVirtualTransactionSize(*tx); });
# 1091 : : // If the package only contains 1 tx, it's better to report the policy violation on individual tx size.
# 1092 [ + + ][ + + ]: 48 : if (package_count > 1 && total_size > MAX_PACKAGE_SIZE * 1000) {
# 1093 : 2 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-large");
# 1094 : 2 : return PackageMempoolAcceptResult(package_state, {});
# 1095 : 2 : }
# 1096 : :
# 1097 : : // Construct workspaces and check package policies.
# 1098 : 46 : std::vector<Workspace> workspaces{};
# 1099 : 46 : workspaces.reserve(package_count);
# 1100 : 46 : {
# 1101 : 46 : std::unordered_set<uint256, SaltedTxidHasher> later_txids;
# 1102 : 46 : std::transform(txns.cbegin(), txns.cend(), std::inserter(later_txids, later_txids.end()),
# 1103 : 477 : [](const auto& tx) { return tx->GetHash(); });
# 1104 : : // Require the package to be sorted in order of dependency, i.e. parents appear before children.
# 1105 : : // An unsorted package will fail anyway on missing-inputs, but it's better to quit earlier and
# 1106 : : // fail on something less ambiguous (missing-inputs could also be an orphan or trying to
# 1107 : : // spend nonexistent coins).
# 1108 [ + + ]: 453 : for (const auto& tx : txns) {
# 1109 [ + + ]: 2779 : for (const auto& input : tx->vin) {
# 1110 [ + + ]: 2779 : if (later_txids.find(input.prevout.hash) != later_txids.end()) {
# 1111 : : // The parent is a subsequent transaction in the package.
# 1112 : 1 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-sorted");
# 1113 : 1 : return PackageMempoolAcceptResult(package_state, {});
# 1114 : 1 : }
# 1115 : 2779 : }
# 1116 : 453 : later_txids.erase(tx->GetHash());
# 1117 : 452 : workspaces.emplace_back(Workspace(tx));
# 1118 : 452 : }
# 1119 : 46 : }
# 1120 : 46 : std::map<const uint256, const MempoolAcceptResult> results;
# 1121 : 45 : {
# 1122 : : // Don't allow any conflicting transactions, i.e. spending the same inputs, in a package.
# 1123 : 45 : std::unordered_set<COutPoint, SaltedOutpointHasher> inputs_seen;
# 1124 [ + + ]: 452 : for (const auto& tx : txns) {
# 1125 [ + + ]: 2778 : for (const auto& input : tx->vin) {
# 1126 [ + + ]: 2778 : if (inputs_seen.find(input.prevout) != inputs_seen.end()) {
# 1127 : : // This input is also present in another tx in the package.
# 1128 : 3 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "conflict-in-package");
# 1129 : 3 : return PackageMempoolAcceptResult(package_state, {});
# 1130 : 3 : }
# 1131 : 2778 : }
# 1132 : : // Batch-add all the inputs for a tx at a time. If we added them 1 at a time, we could
# 1133 : : // catch duplicate inputs within a single tx. This is a more severe, consensus error,
# 1134 : : // and we want to report that from CheckTransaction instead.
# 1135 : 452 : std::transform(tx->vin.cbegin(), tx->vin.cend(), std::inserter(inputs_seen, inputs_seen.end()),
# 1136 : 2775 : [](const auto& input) { return input.prevout; });
# 1137 : 449 : }
# 1138 : 45 : }
# 1139 : :
# 1140 : 45 : LOCK(m_pool.cs);
# 1141 : :
# 1142 : : // Do all PreChecks first and fail fast to avoid running expensive script checks when unnecessary.
# 1143 [ + + ]: 445 : for (Workspace& ws : workspaces) {
# 1144 [ + + ]: 445 : if (!PreChecks(args, ws)) {
# 1145 : 4 : package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
# 1146 : : // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
# 1147 : 4 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1148 : 4 : return PackageMempoolAcceptResult(package_state, std::move(results));
# 1149 : 4 : }
# 1150 : : // Make the coins created by this transaction available for subsequent transactions in the
# 1151 : : // package to spend. Since we already checked conflicts in the package and RBFs are
# 1152 : : // impossible, we don't need to track the coins spent. Note that this logic will need to be
# 1153 : : // updated if RBFs in packages are allowed in the future.
# 1154 : 441 : assert(args.disallow_mempool_conflicts);
# 1155 : 441 : m_viewmempool.PackageAddTransaction(ws.m_ptx);
# 1156 : 441 : }
# 1157 : :
# 1158 [ + + ]: 436 : for (Workspace& ws : workspaces) {
# 1159 : 436 : PrecomputedTransactionData txdata;
# 1160 [ + + ]: 436 : if (!PolicyScriptChecks(args, ws, txdata)) {
# 1161 : : // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
# 1162 : 1 : package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
# 1163 : 1 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1164 : 1 : return PackageMempoolAcceptResult(package_state, std::move(results));
# 1165 : 1 : }
# 1166 [ + - ]: 435 : if (args.m_test_accept) {
# 1167 : : // When test_accept=true, transactions that pass PolicyScriptChecks are valid because there are
# 1168 : : // no further mempool checks (passing PolicyScriptChecks implies passing ConsensusScriptChecks).
# 1169 : 435 : results.emplace(ws.m_ptx->GetWitnessHash(),
# 1170 : 435 : MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_base_fees));
# 1171 : 435 : }
# 1172 : 435 : }
# 1173 : :
# 1174 : 38 : return PackageMempoolAcceptResult(package_state, std::move(results));
# 1175 : 38 : }
# 1176 : :
# 1177 : : } // anon namespace
# 1178 : :
# 1179 : : /** (try to) add transaction to memory pool with a specified acceptance time **/
# 1180 : : static MempoolAcceptResult AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool,
# 1181 : : CChainState& active_chainstate,
# 1182 : : const CTransactionRef &tx, int64_t nAcceptTime,
# 1183 : : bool bypass_limits, bool test_accept)
# 1184 : : EXCLUSIVE_LOCKS_REQUIRED(cs_main)
# 1185 : 31782 : {
# 1186 : 31782 : std::vector<COutPoint> coins_to_uncache;
# 1187 : 31782 : MemPoolAccept::ATMPArgs args { chainparams, nAcceptTime, bypass_limits, coins_to_uncache,
# 1188 : 31782 : test_accept, /* disallow_mempool_conflicts */ false };
# 1189 : :
# 1190 : 31782 : assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
# 1191 : 31782 : const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args);
# 1192 [ + + ]: 31782 : if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) {
# 1193 : : // Remove coins that were not present in the coins cache before calling
# 1194 : : // AcceptSingleTransaction(); this is to prevent memory DoS in case we receive a large
# 1195 : : // number of invalid transactions that attempt to overrun the in-memory coins cache
# 1196 : : // (`CCoinsViewCache::cacheCoins`).
# 1197 : :
# 1198 [ + + ]: 8271 : for (const COutPoint& hashTx : coins_to_uncache)
# 1199 : 3796 : active_chainstate.CoinsTip().Uncache(hashTx);
# 1200 : 8271 : }
# 1201 : : // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
# 1202 : 31782 : BlockValidationState state_dummy;
# 1203 : 31782 : active_chainstate.FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC);
# 1204 : 31782 : return result;
# 1205 : 31782 : }
# 1206 : :
# 1207 : : MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, CTxMemPool& pool, const CTransactionRef& tx,
# 1208 : : bool bypass_limits, bool test_accept)
# 1209 : 31649 : {
# 1210 : 31649 : assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
# 1211 : 31649 : return AcceptToMemoryPoolWithTime(Params(), pool, active_chainstate, tx, GetTime(), bypass_limits, test_accept);
# 1212 : 31649 : }
# 1213 : :
# 1214 : : PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTxMemPool& pool,
# 1215 : : const Package& package, bool test_accept)
# 1216 : 50 : {
# 1217 : 50 : AssertLockHeld(cs_main);
# 1218 : 50 : assert(test_accept); // Only allow package accept dry-runs (testmempoolaccept RPC).
# 1219 : 50 : assert(!package.empty());
# 1220 : 50 : assert(std::all_of(package.cbegin(), package.cend(), [](const auto& tx){return tx != nullptr;}));
# 1221 : :
# 1222 : 50 : std::vector<COutPoint> coins_to_uncache;
# 1223 : 50 : const CChainParams& chainparams = Params();
# 1224 : 50 : MemPoolAccept::ATMPArgs args { chainparams, GetTime(), /* bypass_limits */ false, coins_to_uncache,
# 1225 : 50 : test_accept, /* disallow_mempool_conflicts */ true };
# 1226 : 50 : assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
# 1227 : 50 : const PackageMempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args);
# 1228 : :
# 1229 : : // Uncache coins pertaining to transactions that were not submitted to the mempool.
# 1230 : : // Ensure the cache is still within its size limits.
# 1231 [ + + ]: 391 : for (const COutPoint& hashTx : coins_to_uncache) {
# 1232 : 391 : active_chainstate.CoinsTip().Uncache(hashTx);
# 1233 : 391 : }
# 1234 : 50 : return result;
# 1235 : 50 : }
# 1236 : :
# 1237 : : CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock)
# 1238 : 784 : {
# 1239 : 784 : LOCK(cs_main);
# 1240 : :
# 1241 [ + + ]: 784 : if (block_index) {
# 1242 : 77 : CBlock block;
# 1243 [ + - ]: 77 : if (ReadBlockFromDisk(block, block_index, consensusParams)) {
# 1244 [ + + ]: 189 : for (const auto& tx : block.vtx) {
# 1245 [ + + ]: 189 : if (tx->GetHash() == hash) {
# 1246 : 75 : hashBlock = block_index->GetBlockHash();
# 1247 : 75 : return tx;
# 1248 : 75 : }
# 1249 : 189 : }
# 1250 : 77 : }
# 1251 : 77 : return nullptr;
# 1252 : 707 : }
# 1253 [ + + ]: 707 : if (mempool) {
# 1254 : 704 : CTransactionRef ptx = mempool->get(hash);
# 1255 [ + + ]: 704 : if (ptx) return ptx;
# 1256 : 8 : }
# 1257 [ + + ]: 8 : if (g_txindex) {
# 1258 : 6 : CTransactionRef tx;
# 1259 [ + - ]: 6 : if (g_txindex->FindTx(hash, hashBlock, tx)) return tx;
# 1260 : 2 : }
# 1261 : 2 : return nullptr;
# 1262 : 2 : }
# 1263 : :
# 1264 : : CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
# 1265 : 157094 : {
# 1266 : 157094 : int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
# 1267 : : // Force block reward to zero when right shift is undefined.
# 1268 [ + + ]: 157094 : if (halvings >= 64)
# 1269 : 1126 : return 0;
# 1270 : :
# 1271 : 155968 : CAmount nSubsidy = 50 * COIN;
# 1272 : : // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
# 1273 : 155968 : nSubsidy >>= halvings;
# 1274 : 155968 : return nSubsidy;
# 1275 : 155968 : }
# 1276 : :
# 1277 : : CoinsViews::CoinsViews(
# 1278 : : std::string ldb_name,
# 1279 : : size_t cache_size_bytes,
# 1280 : : bool in_memory,
# 1281 : : bool should_wipe) : m_dbview(
# 1282 : : gArgs.GetDataDirNet() / ldb_name, cache_size_bytes, in_memory, should_wipe),
# 1283 : 794 : m_catcherview(&m_dbview) {}
# 1284 : :
# 1285 : : void CoinsViews::InitCache()
# 1286 : 794 : {
# 1287 : 794 : m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
# 1288 : 794 : }
# 1289 : :
# 1290 : : CChainState::CChainState(CTxMemPool& mempool, BlockManager& blockman, std::optional<uint256> from_snapshot_blockhash)
# 1291 : : : m_mempool(mempool),
# 1292 : : m_blockman(blockman),
# 1293 : 794 : m_from_snapshot_blockhash(from_snapshot_blockhash) {}
# 1294 : :
# 1295 : : void CChainState::InitCoinsDB(
# 1296 : : size_t cache_size_bytes,
# 1297 : : bool in_memory,
# 1298 : : bool should_wipe,
# 1299 : : std::string leveldb_name)
# 1300 : 794 : {
# 1301 [ + + ]: 794 : if (m_from_snapshot_blockhash) {
# 1302 : 9 : leveldb_name += "_" + m_from_snapshot_blockhash->ToString();
# 1303 : 9 : }
# 1304 : :
# 1305 : 794 : m_coins_views = std::make_unique<CoinsViews>(
# 1306 : 794 : leveldb_name, cache_size_bytes, in_memory, should_wipe);
# 1307 : 794 : }
# 1308 : :
# 1309 : : void CChainState::InitCoinsCache(size_t cache_size_bytes)
# 1310 : 794 : {
# 1311 : 794 : assert(m_coins_views != nullptr);
# 1312 : 794 : m_coinstip_cache_size_bytes = cache_size_bytes;
# 1313 : 794 : m_coins_views->InitCache();
# 1314 : 794 : }
# 1315 : :
# 1316 : : // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
# 1317 : : // is a performance-related implementation detail. This function must be marked
# 1318 : : // `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
# 1319 : : // can call it.
# 1320 : : //
# 1321 : : bool CChainState::IsInitialBlockDownload() const
# 1322 : 1292292 : {
# 1323 : : // Optimization: pre-test latch before taking the lock.
# 1324 [ + + ]: 1292292 : if (m_cached_finished_ibd.load(std::memory_order_relaxed))
# 1325 : 1240889 : return false;
# 1326 : :
# 1327 : 51403 : LOCK(cs_main);
# 1328 [ - + ]: 51403 : if (m_cached_finished_ibd.load(std::memory_order_relaxed))
# 1329 : 0 : return false;
# 1330 [ + + ][ - + ]: 51403 : if (fImporting || fReindex)
# 1331 : 10487 : return true;
# 1332 [ + + ]: 40916 : if (m_chain.Tip() == nullptr)
# 1333 : 2 : return true;
# 1334 [ + + ]: 40914 : if (m_chain.Tip()->nChainWork < nMinimumChainWork)
# 1335 : 3331 : return true;
# 1336 [ + + ]: 37583 : if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
# 1337 : 37071 : return true;
# 1338 : 512 : LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
# 1339 : 512 : m_cached_finished_ibd.store(true, std::memory_order_relaxed);
# 1340 : 512 : return false;
# 1341 : 512 : }
# 1342 : :
# 1343 : : static void AlertNotify(const std::string& strMessage)
# 1344 : 2 : {
# 1345 : 2 : uiInterface.NotifyAlertChanged();
# 1346 : 2 : #if HAVE_SYSTEM
# 1347 : 2 : std::string strCmd = gArgs.GetArg("-alertnotify", "");
# 1348 [ - + ]: 2 : if (strCmd.empty()) return;
# 1349 : :
# 1350 : : // Alert text should be plain ascii coming from a trusted source, but to
# 1351 : : // be safe we first strip anything not in safeChars, then add single quotes around
# 1352 : : // the whole string before passing it to the shell:
# 1353 : 2 : std::string singleQuote("'");
# 1354 : 2 : std::string safeStatus = SanitizeString(strMessage);
# 1355 : 2 : safeStatus = singleQuote+safeStatus+singleQuote;
# 1356 : 2 : boost::replace_all(strCmd, "%s", safeStatus);
# 1357 : :
# 1358 : 2 : std::thread t(runCommand, strCmd);
# 1359 : 2 : t.detach(); // thread runs free
# 1360 : 2 : #endif
# 1361 : 2 : }
# 1362 : :
# 1363 : : void CChainState::CheckForkWarningConditions()
# 1364 : 72466 : {
# 1365 : 72466 : AssertLockHeld(cs_main);
# 1366 : 72466 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 1367 : :
# 1368 : : // Before we get past initial download, we cannot reliably alert about forks
# 1369 : : // (we assume we don't get stuck on a fork before finishing our initial sync)
# 1370 [ + + ]: 72466 : if (IsInitialBlockDownload()) {
# 1371 : 5536 : return;
# 1372 : 5536 : }
# 1373 : :
# 1374 [ + + ][ + + ]: 66930 : if (pindexBestInvalid && pindexBestInvalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) {
# [ + + ]
# 1375 : 21 : LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
# 1376 : 21 : SetfLargeWorkInvalidChainFound(true);
# 1377 : 66909 : } else {
# 1378 : 66909 : SetfLargeWorkInvalidChainFound(false);
# 1379 : 66909 : }
# 1380 : 66930 : }
# 1381 : :
# 1382 : : // Called both upon regular invalid block discovery *and* InvalidateBlock
# 1383 : : void CChainState::InvalidChainFound(CBlockIndex* pindexNew)
# 1384 : 5605 : {
# 1385 : 5605 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 1386 [ + + ][ + + ]: 5605 : if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
# 1387 : 1538 : pindexBestInvalid = pindexNew;
# 1388 [ + - ][ + + ]: 5605 : if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
# 1389 : 2826 : pindexBestHeader = m_chain.Tip();
# 1390 : 2826 : }
# 1391 : :
# 1392 : 5605 : LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__,
# 1393 : 5605 : pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
# 1394 : 5605 : log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
# 1395 : 5605 : CBlockIndex *tip = m_chain.Tip();
# 1396 : 5605 : assert (tip);
# 1397 : 5605 : LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__,
# 1398 : 5605 : tip->GetBlockHash().ToString(), m_chain.Height(), log(tip->nChainWork.getdouble())/log(2.0),
# 1399 : 5605 : FormatISO8601DateTime(tip->GetBlockTime()));
# 1400 : 5605 : CheckForkWarningConditions();
# 1401 : 5605 : }
# 1402 : :
# 1403 : : // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
# 1404 : : // which does its own setBlockIndexCandidates manageent.
# 1405 : 2763 : void CChainState::InvalidBlockFound(CBlockIndex *pindex, const BlockValidationState &state) {
# 1406 [ + - ]: 2763 : if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
# 1407 : 2763 : pindex->nStatus |= BLOCK_FAILED_VALID;
# 1408 : 2763 : m_blockman.m_failed_blocks.insert(pindex);
# 1409 : 2763 : setDirtyBlockIndex.insert(pindex);
# 1410 : 2763 : setBlockIndexCandidates.erase(pindex);
# 1411 : 2763 : InvalidChainFound(pindex);
# 1412 : 2763 : }
# 1413 : 2763 : }
# 1414 : :
# 1415 : : void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
# 1416 : 5468465 : {
# 1417 : : // mark inputs spent
# 1418 [ + + ]: 5468465 : if (!tx.IsCoinBase()) {
# 1419 : 5359232 : txundo.vprevout.reserve(tx.vin.size());
# 1420 [ + + ]: 7726354 : for (const CTxIn &txin : tx.vin) {
# 1421 : 7726354 : txundo.vprevout.emplace_back();
# 1422 : 7726354 : bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
# 1423 : 7726354 : assert(is_spent);
# 1424 : 7726354 : }
# 1425 : 5359232 : }
# 1426 : : // add outputs
# 1427 : 5468465 : AddCoins(inputs, tx, nHeight);
# 1428 : 5468465 : }
# 1429 : :
# 1430 : : void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
# 1431 : 5226130 : {
# 1432 : 5226130 : CTxUndo txundo;
# 1433 : 5226130 : UpdateCoins(tx, inputs, txundo, nHeight);
# 1434 : 5226130 : }
# 1435 : :
# 1436 : 598338 : bool CScriptCheck::operator()() {
# 1437 : 598338 : const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
# 1438 : 598338 : const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
# 1439 : 598338 : return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
# 1440 : 598338 : }
# 1441 : :
# 1442 : : int BlockManager::GetSpendHeight(const CCoinsViewCache& inputs)
# 1443 : 112954 : {
# 1444 : 112954 : AssertLockHeld(cs_main);
# 1445 : 112954 : assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this));
# 1446 : 112954 : CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
# 1447 : 112954 : return pindexPrev->nHeight + 1;
# 1448 : 112954 : }
# 1449 : :
# 1450 : :
# 1451 : : static CuckooCache::cache<uint256, SignatureCacheHasher> g_scriptExecutionCache;
# 1452 : : static CSHA256 g_scriptExecutionCacheHasher;
# 1453 : :
# 1454 : 1511 : void InitScriptExecutionCache() {
# 1455 : : // Setup the salted hasher
# 1456 : 1511 : uint256 nonce = GetRandHash();
# 1457 : : // We want the nonce to be 64 bytes long to force the hasher to process
# 1458 : : // this chunk, which makes later hash computations more efficient. We
# 1459 : : // just write our 32-byte entropy twice to fill the 64 bytes.
# 1460 : 1511 : g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
# 1461 : 1511 : g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
# 1462 : : // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
# 1463 : : // setup_bytes creates the minimum possible cache (2 elements).
# 1464 : 1511 : size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
# 1465 : 1511 : size_t nElems = g_scriptExecutionCache.setup_bytes(nMaxCacheSize);
# 1466 : 1511 : LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
# 1467 : 1511 : (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
# 1468 : 1511 : }
# 1469 : :
# 1470 : : /**
# 1471 : : * Check whether all of this transaction's input scripts succeed.
# 1472 : : *
# 1473 : : * This involves ECDSA signature checks so can be computationally intensive. This function should
# 1474 : : * only be called after the cheap sanity checks in CheckTxInputs passed.
# 1475 : : *
# 1476 : : * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any
# 1477 : : * script checks which are not necessary (eg due to script execution cache hits) are, obviously,
# 1478 : : * not pushed onto pvChecks/run.
# 1479 : : *
# 1480 : : * Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache
# 1481 : : * which are matched. This is useful for checking blocks where we will likely never need the cache
# 1482 : : * entry again.
# 1483 : : *
# 1484 : : * Note that we may set state.reason to NOT_STANDARD for extra soft-fork flags in flags, block-checking
# 1485 : : * callers should probably reset it to CONSENSUS in such cases.
# 1486 : : *
# 1487 : : * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
# 1488 : : */
# 1489 : : bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
# 1490 : : const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
# 1491 : : bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
# 1492 : : std::vector<CScriptCheck>* pvChecks)
# 1493 : 634810 : {
# 1494 [ - + ]: 634810 : if (tx.IsCoinBase()) return true;
# 1495 : :
# 1496 [ + + ]: 634810 : if (pvChecks) {
# 1497 : 319705 : pvChecks->reserve(tx.vin.size());
# 1498 : 319705 : }
# 1499 : :
# 1500 : : // First check if script executions have been cached with the same
# 1501 : : // flags. Note that this assumes that the inputs provided are
# 1502 : : // correct (ie that the transaction hash which is in tx's prevouts
# 1503 : : // properly commits to the scriptPubKey in the inputs view of that
# 1504 : : // transaction).
# 1505 : 634810 : uint256 hashCacheEntry;
# 1506 : 634810 : CSHA256 hasher = g_scriptExecutionCacheHasher;
# 1507 : 634810 : hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
# 1508 : 634810 : AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
# 1509 [ + + ]: 634810 : if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
# 1510 : 158330 : return true;
# 1511 : 158330 : }
# 1512 : :
# 1513 [ + + ]: 476480 : if (!txdata.m_spent_outputs_ready) {
# 1514 : 67197 : std::vector<CTxOut> spent_outputs;
# 1515 : 67197 : spent_outputs.reserve(tx.vin.size());
# 1516 : :
# 1517 [ + + ]: 129926 : for (const auto& txin : tx.vin) {
# 1518 : 129926 : const COutPoint& prevout = txin.prevout;
# 1519 : 129926 : const Coin& coin = inputs.AccessCoin(prevout);
# 1520 : 129926 : assert(!coin.IsSpent());
# 1521 : 129926 : spent_outputs.emplace_back(coin.out);
# 1522 : 129926 : }
# 1523 : 67197 : txdata.Init(tx, std::move(spent_outputs));
# 1524 : 67197 : }
# 1525 : 476480 : assert(txdata.m_spent_outputs.size() == tx.vin.size());
# 1526 : :
# 1527 [ + + ]: 934099 : for (unsigned int i = 0; i < tx.vin.size(); i++) {
# 1528 : :
# 1529 : : // We very carefully only pass in things to CScriptCheck which
# 1530 : : // are clearly committed to by tx' witness hash. This provides
# 1531 : : // a sanity check that our caching is not introducing consensus
# 1532 : : // failures through additional data in, eg, the coins being
# 1533 : : // spent being checked as a part of CScriptCheck.
# 1534 : :
# 1535 : : // Verify signature
# 1536 : 589056 : CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata);
# 1537 [ + + ]: 589056 : if (pvChecks) {
# 1538 : 188397 : pvChecks->push_back(CScriptCheck());
# 1539 : 188397 : check.swap(pvChecks->back());
# 1540 [ + + ]: 400659 : } else if (!check()) {
# 1541 [ + + ]: 131437 : if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
# 1542 : : // Check whether the failure was caused by a
# 1543 : : // non-mandatory script verification check, such as
# 1544 : : // non-standard DER encodings or non-null dummy
# 1545 : : // arguments; if so, ensure we return NOT_STANDARD
# 1546 : : // instead of CONSENSUS to avoid downstream users
# 1547 : : // splitting the network between upgraded and
# 1548 : : // non-upgraded nodes by banning CONSENSUS-failing
# 1549 : : // data providers.
# 1550 : 131433 : CScriptCheck check2(txdata.m_spent_outputs[i], tx, i,
# 1551 : 131433 : flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
# 1552 [ + + ]: 131433 : if (check2())
# 1553 : 98468 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
# 1554 : 32969 : }
# 1555 : : // MANDATORY flag failures correspond to
# 1556 : : // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
# 1557 : : // failures are the most serious case of validation
# 1558 : : // failures, we may need to consider using
# 1559 : : // RECENT_CONSENSUS_CHANGE for any script failure that
# 1560 : : // could be due to non-upgraded nodes which we may want to
# 1561 : : // support, to avoid splitting the network (but this
# 1562 : : // depends on the details of how net_processing handles
# 1563 : : // such errors).
# 1564 : 32969 : return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
# 1565 : 32969 : }
# 1566 : 589056 : }
# 1567 : :
# 1568 [ + + ][ + + ]: 476480 : if (cacheFullScriptStore && !pvChecks) {
# 1569 : : // We executed all of the provided scripts, and were told to
# 1570 : : // cache the result. Do so now.
# 1571 : 147101 : g_scriptExecutionCache.insert(hashCacheEntry);
# 1572 : 147101 : }
# 1573 : :
# 1574 : 345043 : return true;
# 1575 : 476480 : }
# 1576 : :
# 1577 : : bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage)
# 1578 : 1 : {
# 1579 : 1 : AbortNode(strMessage, userMessage);
# 1580 : 1 : return state.Error(strMessage);
# 1581 : 1 : }
# 1582 : :
# 1583 : : /**
# 1584 : : * Restore the UTXO in a Coin at a given COutPoint
# 1585 : : * @param undo The Coin to be restored.
# 1586 : : * @param view The coins view to which to apply the changes.
# 1587 : : * @param out The out point that corresponds to the tx input.
# 1588 : : * @return A DisconnectResult as an int
# 1589 : : */
# 1590 : : int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
# 1591 : 21131 : {
# 1592 : 21131 : bool fClean = true;
# 1593 : :
# 1594 [ + + ]: 21131 : if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
# 1595 : :
# 1596 [ - + ]: 21131 : if (undo.nHeight == 0) {
# 1597 : : // Missing undo metadata (height and coinbase). Older versions included this
# 1598 : : // information only in undo records for the last spend of a transactions'
# 1599 : : // outputs. This implies that it must be present for some other output of the same tx.
# 1600 : 0 : const Coin& alternate = AccessByTxid(view, out.hash);
# 1601 [ # # ]: 0 : if (!alternate.IsSpent()) {
# 1602 : 0 : undo.nHeight = alternate.nHeight;
# 1603 : 0 : undo.fCoinBase = alternate.fCoinBase;
# 1604 : 0 : } else {
# 1605 : 0 : return DISCONNECT_FAILED; // adding output for transaction without known metadata
# 1606 : 0 : }
# 1607 : 21131 : }
# 1608 : : // If the coin already exists as an unspent coin in the cache, then the
# 1609 : : // possible_overwrite parameter to AddCoin must be set to true. We have
# 1610 : : // already checked whether an unspent coin exists above using HaveCoin, so
# 1611 : : // we don't need to guess. When fClean is false, an unspent coin already
# 1612 : : // existed and it is an overwrite.
# 1613 : 21131 : view.AddCoin(out, std::move(undo), !fClean);
# 1614 : :
# 1615 [ + + ]: 21131 : return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
# 1616 : 21131 : }
# 1617 : :
# 1618 : : /** Undo the effects of this block (with given index) on the UTXO set represented by coins.
# 1619 : : * When FAILED is returned, view is left in an indeterminate state. */
# 1620 : : DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
# 1621 : 6764 : {
# 1622 : 6764 : bool fClean = true;
# 1623 : :
# 1624 : 6764 : CBlockUndo blockUndo;
# 1625 [ + + ]: 6764 : if (!UndoReadFromDisk(blockUndo, pindex)) {
# 1626 : 1 : error("DisconnectBlock(): failure reading undo data");
# 1627 : 1 : return DISCONNECT_FAILED;
# 1628 : 1 : }
# 1629 : :
# 1630 [ - + ]: 6763 : if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
# 1631 : 0 : error("DisconnectBlock(): block and undo data inconsistent");
# 1632 : 0 : return DISCONNECT_FAILED;
# 1633 : 0 : }
# 1634 : :
# 1635 : : // undo transactions in reverse order
# 1636 [ + + ]: 22014 : for (int i = block.vtx.size() - 1; i >= 0; i--) {
# 1637 : 15251 : const CTransaction &tx = *(block.vtx[i]);
# 1638 : 15251 : uint256 hash = tx.GetHash();
# 1639 : 15251 : bool is_coinbase = tx.IsCoinBase();
# 1640 : :
# 1641 : : // Check that all outputs are available and match the outputs in the block itself
# 1642 : : // exactly.
# 1643 [ + + ]: 51806 : for (size_t o = 0; o < tx.vout.size(); o++) {
# 1644 [ + + ]: 36555 : if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
# 1645 : 22341 : COutPoint out(hash, o);
# 1646 : 22341 : Coin coin;
# 1647 : 22341 : bool is_spent = view.SpendCoin(out, &coin);
# 1648 [ - + ][ - + ]: 22341 : if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
# [ - + ][ - + ]
# 1649 : 0 : fClean = false; // transaction output mismatch
# 1650 : 0 : }
# 1651 : 22341 : }
# 1652 : 36555 : }
# 1653 : :
# 1654 : : // restore inputs
# 1655 [ + + ]: 15251 : if (i > 0) { // not coinbases
# 1656 : 8488 : CTxUndo &txundo = blockUndo.vtxundo[i-1];
# 1657 [ - + ]: 8488 : if (txundo.vprevout.size() != tx.vin.size()) {
# 1658 : 0 : error("DisconnectBlock(): transaction and undo data inconsistent");
# 1659 : 0 : return DISCONNECT_FAILED;
# 1660 : 0 : }
# 1661 [ + + ]: 26119 : for (unsigned int j = tx.vin.size(); j-- > 0;) {
# 1662 : 17631 : const COutPoint &out = tx.vin[j].prevout;
# 1663 : 17631 : int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
# 1664 [ - + ]: 17631 : if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
# 1665 [ + - ][ + - ]: 17631 : fClean = fClean && res != DISCONNECT_UNCLEAN;
# 1666 : 17631 : }
# 1667 : : // At this point, all of txundo.vprevout should have been moved out.
# 1668 : 8488 : }
# 1669 : 15251 : }
# 1670 : :
# 1671 : : // move best block pointer to prevout block
# 1672 : 6763 : view.SetBestBlock(pindex->pprev->GetBlockHash());
# 1673 : :
# 1674 [ + - ]: 6763 : return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
# 1675 : 6763 : }
# 1676 : :
# 1677 : : static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
# 1678 : :
# 1679 : : void StartScriptCheckWorkerThreads(int threads_num)
# 1680 : 813 : {
# 1681 : 813 : scriptcheckqueue.StartWorkerThreads(threads_num);
# 1682 : 813 : }
# 1683 : :
# 1684 : : void StopScriptCheckWorkerThreads()
# 1685 : 820 : {
# 1686 : 820 : scriptcheckqueue.StopWorkerThreads();
# 1687 : 820 : }
# 1688 : :
# 1689 : : VersionBitsCache versionbitscache GUARDED_BY(cs_main);
# 1690 : :
# 1691 : : int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
# 1692 : 70477 : {
# 1693 : 70477 : LOCK(cs_main);
# 1694 : 70477 : int32_t nVersion = VERSIONBITS_TOP_BITS;
# 1695 : :
# 1696 [ + + ]: 211431 : for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
# 1697 : 140954 : ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache);
# 1698 [ + + ][ + + ]: 140954 : if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) {
# 1699 : 44171 : nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i));
# 1700 : 44171 : }
# 1701 : 140954 : }
# 1702 : :
# 1703 : 70477 : return nVersion;
# 1704 : 70477 : }
# 1705 : :
# 1706 : : /**
# 1707 : : * Threshold condition checker that triggers when unknown versionbits are seen on the network.
# 1708 : : */
# 1709 : : class WarningBitsConditionChecker : public AbstractThresholdConditionChecker
# 1710 : : {
# 1711 : : private:
# 1712 : : int bit;
# 1713 : :
# 1714 : : public:
# 1715 : 1925716 : explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
# 1716 : :
# 1717 : 1925716 : int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
# 1718 : 1925716 : int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
# 1719 : 1925716 : int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
# 1720 : 1925716 : int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
# 1721 : :
# 1722 : : bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
# 1723 : 960192 : {
# 1724 [ + - ]: 960192 : return pindex->nHeight >= params.MinBIP9WarningHeight &&
# 1725 [ + + ]: 960192 : ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
# 1726 [ + + ]: 960192 : ((pindex->nVersion >> bit) & 1) != 0 &&
# 1727 [ + + ]: 960192 : ((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
# 1728 : 960192 : }
# 1729 : : };
# 1730 : :
# 1731 : : static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main);
# 1732 : :
# 1733 : : // 0.13.0 was shipped with a segwit deployment defined for testnet, but not for
# 1734 : : // mainnet. We no longer need to support disabling the segwit deployment
# 1735 : : // except for testing purposes, due to limitations of the functional test
# 1736 : : // environment. See test/functional/p2p-segwit.py.
# 1737 : : static bool IsScriptWitnessEnabled(const Consensus::Params& params)
# 1738 : 124208 : {
# 1739 : 124208 : return params.SegwitHeight != std::numeric_limits<int>::max();
# 1740 : 124208 : }
# 1741 : :
# 1742 : 124208 : static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
# 1743 : 124208 : AssertLockHeld(cs_main);
# 1744 : :
# 1745 : 124208 : unsigned int flags = SCRIPT_VERIFY_NONE;
# 1746 : :
# 1747 : : // BIP16 didn't become active until Apr 1 2012 (on mainnet, and
# 1748 : : // retroactively applied to testnet)
# 1749 : : // However, only one historical block violated the P2SH rules (on both
# 1750 : : // mainnet and testnet), so for simplicity, always leave P2SH
# 1751 : : // on except for the one violating block.
# 1752 [ + + ]: 124208 : if (consensusparams.BIP16Exception.IsNull() || // no bip16 exception on this chain
# 1753 [ - + ]: 124208 : pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity()
# 1754 [ + - ]: 124208 : *pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception
# 1755 : 124208 : {
# 1756 : 124208 : flags |= SCRIPT_VERIFY_P2SH;
# 1757 : 124208 : }
# 1758 : :
# 1759 : : // Enforce WITNESS rules whenever P2SH is in effect (and the segwit
# 1760 : : // deployment is defined).
# 1761 [ + - ][ + + ]: 124208 : if (flags & SCRIPT_VERIFY_P2SH && IsScriptWitnessEnabled(consensusparams)) {
# 1762 : 123939 : flags |= SCRIPT_VERIFY_WITNESS;
# 1763 : 123939 : }
# 1764 : :
# 1765 : : // Start enforcing the DERSIG (BIP66) rule
# 1766 [ + + ]: 124208 : if (pindex->nHeight >= consensusparams.BIP66Height) {
# 1767 : 11042 : flags |= SCRIPT_VERIFY_DERSIG;
# 1768 : 11042 : }
# 1769 : :
# 1770 : : // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
# 1771 [ + + ]: 124208 : if (pindex->nHeight >= consensusparams.BIP65Height) {
# 1772 : 9135 : flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
# 1773 : 9135 : }
# 1774 : :
# 1775 : : // Start enforcing BIP112 (CHECKSEQUENCEVERIFY)
# 1776 [ + + ]: 124208 : if (pindex->nHeight >= consensusparams.CSVHeight) {
# 1777 : 32119 : flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
# 1778 : 32119 : }
# 1779 : :
# 1780 : : // Start enforcing Taproot using versionbits logic.
# 1781 [ + + ]: 124208 : if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_TAPROOT, versionbitscache) == ThresholdState::ACTIVE) {
# 1782 : 120963 : flags |= SCRIPT_VERIFY_TAPROOT;
# 1783 : 120963 : }
# 1784 : :
# 1785 : : // Start enforcing BIP147 NULLDUMMY (activated simultaneously with segwit)
# 1786 [ + + ]: 124208 : if (IsWitnessEnabled(pindex->pprev, consensusparams)) {
# 1787 : 120709 : flags |= SCRIPT_VERIFY_NULLDUMMY;
# 1788 : 120709 : }
# 1789 : :
# 1790 : 124208 : return flags;
# 1791 : 124208 : }
# 1792 : :
# 1793 : :
# 1794 : :
# 1795 : : static int64_t nTimeCheck = 0;
# 1796 : : static int64_t nTimeForks = 0;
# 1797 : : static int64_t nTimeVerify = 0;
# 1798 : : static int64_t nTimeConnect = 0;
# 1799 : : static int64_t nTimeIndex = 0;
# 1800 : : static int64_t nTimeCallbacks = 0;
# 1801 : : static int64_t nTimeTotal = 0;
# 1802 : : static int64_t nBlocksTotal = 0;
# 1803 : :
# 1804 : : /** Apply the effects of this block (with given index) on the UTXO set represented by coins.
# 1805 : : * Validity checks that depend on the UTXO set are also done; ConnectBlock()
# 1806 : : * can fail if those validity checks fail (among other reasons). */
# 1807 : : bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex,
# 1808 : : CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck)
# 1809 : 101102 : {
# 1810 : 101102 : AssertLockHeld(cs_main);
# 1811 : 101102 : assert(pindex);
# 1812 : 101102 : assert(*pindex->phashBlock == block.GetHash());
# 1813 : 101102 : int64_t nTimeStart = GetTimeMicros();
# 1814 : :
# 1815 : : // Check it again in case a previous version let a bad block in
# 1816 : : // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
# 1817 : : // ContextualCheckBlockHeader() here. This means that if we add a new
# 1818 : : // consensus rule that is enforced in one of those two functions, then we
# 1819 : : // may have let in a block that violates the rule prior to updating the
# 1820 : : // software, and we would NOT be enforcing the rule here. Fully solving
# 1821 : : // upgrade from one software version to the next after a consensus rule
# 1822 : : // change is potentially tricky and issue-specific (see RewindBlockIndex()
# 1823 : : // for one general approach that was used for BIP 141 deployment).
# 1824 : : // Also, currently the rule against blocks more than 2 hours in the future
# 1825 : : // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
# 1826 : : // re-enforce that rule here (at least until we make it impossible for
# 1827 : : // GetAdjustedTime() to go backward).
# 1828 [ - + ]: 101102 : if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
# 1829 [ # # ]: 0 : if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
# 1830 : : // We don't write down blocks to disk if they may have been
# 1831 : : // corrupted, so this should be impossible unless we're having hardware
# 1832 : : // problems.
# 1833 : 0 : return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
# 1834 : 0 : }
# 1835 : 0 : return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
# 1836 : 0 : }
# 1837 : :
# 1838 : : // verify that the view's current state corresponds to the previous block
# 1839 [ + + ]: 101102 : uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
# 1840 : 101102 : assert(hashPrevBlock == view.GetBestBlock());
# 1841 : :
# 1842 : 101102 : nBlocksTotal++;
# 1843 : :
# 1844 : : // Special case for the genesis block, skipping connection of its transactions
# 1845 : : // (its coinbase is unspendable)
# 1846 [ + + ]: 101102 : if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
# 1847 [ + - ]: 404 : if (!fJustCheck)
# 1848 : 404 : view.SetBestBlock(pindex->GetBlockHash());
# 1849 : 404 : return true;
# 1850 : 404 : }
# 1851 : :
# 1852 : 100698 : bool fScriptChecks = true;
# 1853 [ + + ]: 100698 : if (!hashAssumeValid.IsNull()) {
# 1854 : : // We've been configured with the hash of a block which has been externally verified to have a valid history.
# 1855 : : // A suitable default value is included with the software and updated from time to time. Because validity
# 1856 : : // relative to a piece of software is an objective fact these defaults can be easily reviewed.
# 1857 : : // This setting doesn't force the selection of any particular chain but makes validating some faster by
# 1858 : : // effectively caching the result of part of the verification.
# 1859 : 2568 : BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
# 1860 [ + + ]: 2568 : if (it != m_blockman.m_block_index.end()) {
# 1861 [ + + ]: 2304 : if (it->second->GetAncestor(pindex->nHeight) == pindex &&
# 1862 [ + - ]: 2304 : pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
# 1863 [ + - ]: 2304 : pindexBestHeader->nChainWork >= nMinimumChainWork) {
# 1864 : : // This block is a member of the assumed verified chain and an ancestor of the best header.
# 1865 : : // Script verification is skipped when connecting blocks under the
# 1866 : : // assumevalid block. Assuming the assumevalid block is valid this
# 1867 : : // is safe because block merkle hashes are still computed and checked,
# 1868 : : // Of course, if an assumed valid block is invalid due to false scriptSigs
# 1869 : : // this optimization would allow an invalid chain to be accepted.
# 1870 : : // The equivalent time check discourages hash power from extorting the network via DOS attack
# 1871 : : // into accepting an invalid block through telling users they must manually set assumevalid.
# 1872 : : // Requiring a software change or burying the invalid block, regardless of the setting, makes
# 1873 : : // it hard to hide the implication of the demand. This also avoids having release candidates
# 1874 : : // that are hardly doing any signature verification at all in testing without having to
# 1875 : : // artificially set the default assumed verified block further back.
# 1876 : : // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
# 1877 : : // least as good as the expected chain.
# 1878 : 204 : fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
# 1879 : 204 : }
# 1880 : 2304 : }
# 1881 : 2568 : }
# 1882 : :
# 1883 : 100698 : int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
# 1884 [ + - ]: 100698 : LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
# 1885 : :
# 1886 : : // Do not allow blocks that contain transactions which 'overwrite' older transactions,
# 1887 : : // unless those are already completely spent.
# 1888 : : // If such overwrites are allowed, coinbases and transactions depending upon those
# 1889 : : // can be duplicated to remove the ability to spend the first instance -- even after
# 1890 : : // being sent to another address.
# 1891 : : // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
# 1892 : : // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
# 1893 : : // already refuses previously-known transaction ids entirely.
# 1894 : : // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
# 1895 : : // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
# 1896 : : // two in the chain that violate it. This prevents exploiting the issue against nodes during their
# 1897 : : // initial block download.
# 1898 [ - + ][ # # ]: 100698 : bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
# 1899 [ - + ][ # # ]: 100698 : (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
# 1900 : :
# 1901 : : // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
# 1902 : : // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
# 1903 : : // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
# 1904 : : // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
# 1905 : : // duplicate transactions descending from the known pairs either.
# 1906 : : // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
# 1907 : :
# 1908 : : // BIP34 requires that a block at height X (block X) has its coinbase
# 1909 : : // scriptSig start with a CScriptNum of X (indicated height X). The above
# 1910 : : // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
# 1911 : : // case that there is a block X before the BIP34 height of 227,931 which has
# 1912 : : // an indicated height Y where Y is greater than X. The coinbase for block
# 1913 : : // X would also be a valid coinbase for block Y, which could be a BIP30
# 1914 : : // violation. An exhaustive search of all mainnet coinbases before the
# 1915 : : // BIP34 height which have an indicated height greater than the block height
# 1916 : : // reveals many occurrences. The 3 lowest indicated heights found are
# 1917 : : // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
# 1918 : : // heights would be the first opportunity for BIP30 to be violated.
# 1919 : :
# 1920 : : // The search reveals a great many blocks which have an indicated height
# 1921 : : // greater than 1,983,702, so we simply remove the optimization to skip
# 1922 : : // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
# 1923 : : // that block in another 25 years or so, we should take advantage of a
# 1924 : : // future consensus change to do a new and improved version of BIP34 that
# 1925 : : // will actually prevent ever creating any duplicate coinbases in the
# 1926 : : // future.
# 1927 : 100698 : static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
# 1928 : :
# 1929 : : // There is no potential to create a duplicate coinbase at block 209,921
# 1930 : : // because this is still before the BIP34 height and so explicit BIP30
# 1931 : : // checking is still active.
# 1932 : :
# 1933 : : // The final case is block 176,684 which has an indicated height of
# 1934 : : // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
# 1935 : : // before block 490,897 so there was not much opportunity to address this
# 1936 : : // case other than to carefully analyze it and determine it would not be a
# 1937 : : // problem. Block 490,897 was, in fact, mined with a different coinbase than
# 1938 : : // block 176,684, but it is important to note that even if it hadn't been or
# 1939 : : // is remined on an alternate fork with a duplicate coinbase, we would still
# 1940 : : // not run into a BIP30 violation. This is because the coinbase for 176,684
# 1941 : : // is spent in block 185,956 in transaction
# 1942 : : // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
# 1943 : : // spending transaction can't be duplicated because it also spends coinbase
# 1944 : : // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
# 1945 : : // coinbase has an indicated height of over 4.2 billion, and wouldn't be
# 1946 : : // duplicatable until that height, and it's currently impossible to create a
# 1947 : : // chain that long. Nevertheless we may wish to consider a future soft fork
# 1948 : : // which retroactively prevents block 490,897 from creating a duplicate
# 1949 : : // coinbase. The two historical BIP30 violations often provide a confusing
# 1950 : : // edge case when manipulating the UTXO and it would be simpler not to have
# 1951 : : // another edge case to deal with.
# 1952 : :
# 1953 : : // testnet3 has no blocks before the BIP34 height with indicated heights
# 1954 : : // post BIP34 before approximately height 486,000,000 and presumably will
# 1955 : : // be reset before it reaches block 1,983,702 and starts doing unnecessary
# 1956 : : // BIP30 checking again.
# 1957 : 100698 : assert(pindex->pprev);
# 1958 : 100698 : CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
# 1959 : : //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
# 1960 [ + - ][ + + ]: 100698 : fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
# [ + - ]
# 1961 : :
# 1962 : : // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
# 1963 : : // consensus change that ensures coinbases at those heights can not
# 1964 : : // duplicate earlier coinbases.
# 1965 [ + - ][ # # ]: 100698 : if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
# 1966 [ + + ]: 169691 : for (const auto& tx : block.vtx) {
# 1967 [ + + ]: 698280 : for (size_t o = 0; o < tx->vout.size(); o++) {
# 1968 [ + + ]: 528590 : if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
# 1969 : 1 : LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
# 1970 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
# 1971 : 1 : }
# 1972 : 528590 : }
# 1973 : 169691 : }
# 1974 : 100698 : }
# 1975 : :
# 1976 : : // Start enforcing BIP68 (sequence locks)
# 1977 : 100698 : int nLockTimeFlags = 0;
# 1978 [ + + ]: 100697 : if (pindex->nHeight >= chainparams.GetConsensus().CSVHeight) {
# 1979 : 31728 : nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
# 1980 : 31728 : }
# 1981 : :
# 1982 : : // Get the script flags for this block
# 1983 : 100697 : unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus());
# 1984 : :
# 1985 : 100697 : int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
# 1986 [ + - ]: 100697 : LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
# 1987 : :
# 1988 : 100697 : CBlockUndo blockundo;
# 1989 : :
# 1990 : : // Precomputed transaction data pointers must not be invalidated
# 1991 : : // until after `control` has run the script checks (potentially
# 1992 : : // in multiple threads). Preallocate the vector size so a new allocation
# 1993 : : // doesn't invalidate pointers into the vector, and keep txsdata in scope
# 1994 : : // for as long as `control`.
# 1995 [ + + ][ + + ]: 100697 : CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
# 1996 : 100697 : std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
# 1997 : :
# 1998 : 100697 : std::vector<int> prevheights;
# 1999 : 100697 : CAmount nFees = 0;
# 2000 : 100697 : int nInputs = 0;
# 2001 : 100697 : int64_t nSigOpsCost = 0;
# 2002 : 100697 : blockundo.vtxundo.reserve(block.vtx.size() - 1);
# 2003 [ + + ]: 267662 : for (unsigned int i = 0; i < block.vtx.size(); i++)
# 2004 : 169686 : {
# 2005 : 169686 : const CTransaction &tx = *(block.vtx[i]);
# 2006 : :
# 2007 : 169686 : nInputs += tx.vin.size();
# 2008 : :
# 2009 [ + + ]: 169686 : if (!tx.IsCoinBase())
# 2010 : 68989 : {
# 2011 : 68989 : CAmount txfee = 0;
# 2012 : 68989 : TxValidationState tx_state;
# 2013 [ + + ]: 68989 : if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
# 2014 : : // Any transaction validation failure in ConnectBlock is a block consensus failure
# 2015 : 32 : state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
# 2016 : 32 : tx_state.GetRejectReason(), tx_state.GetDebugMessage());
# 2017 : 32 : return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
# 2018 : 32 : }
# 2019 : 68957 : nFees += txfee;
# 2020 [ - + ]: 68957 : if (!MoneyRange(nFees)) {
# 2021 : 0 : LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
# 2022 : 0 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
# 2023 : 0 : }
# 2024 : :
# 2025 : : // Check that transaction is BIP68 final
# 2026 : : // BIP68 lock checks (as opposed to nLockTime checks) must
# 2027 : : // be in ConnectBlock because they require the UTXO set
# 2028 : 68957 : prevheights.resize(tx.vin.size());
# 2029 [ + + ]: 191342 : for (size_t j = 0; j < tx.vin.size(); j++) {
# 2030 : 122385 : prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
# 2031 : 122385 : }
# 2032 : :
# 2033 [ + + ]: 68957 : if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
# 2034 : 12 : LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
# 2035 : 12 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
# 2036 : 12 : }
# 2037 : 169642 : }
# 2038 : :
# 2039 : : // GetTransactionSigOpCost counts 3 types of sigops:
# 2040 : : // * legacy (always)
# 2041 : : // * p2sh (when P2SH enabled in flags and excludes coinbase)
# 2042 : : // * witness (when witness enabled in flags and excludes coinbase)
# 2043 : 169642 : nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
# 2044 [ + + ]: 169642 : if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
# 2045 : 5 : LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
# 2046 : 5 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
# 2047 : 5 : }
# 2048 : :
# 2049 [ + + ]: 169637 : if (!tx.IsCoinBase())
# 2050 : 68940 : {
# 2051 : 68940 : std::vector<CScriptCheck> vChecks;
# 2052 : 68940 : bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
# 2053 : 68940 : TxValidationState tx_state;
# 2054 [ + + ][ + + ]: 68940 : if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
# [ + + ]
# 2055 : : // Any transaction validation failure in ConnectBlock is a block consensus failure
# 2056 : 2672 : state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
# 2057 : 2672 : tx_state.GetRejectReason(), tx_state.GetDebugMessage());
# 2058 : 2672 : return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
# 2059 : 2672 : tx.GetHash().ToString(), state.ToString());
# 2060 : 2672 : }
# 2061 : 66268 : control.Add(vChecks);
# 2062 : 66268 : }
# 2063 : :
# 2064 : 169637 : CTxUndo undoDummy;
# 2065 [ + + ]: 166965 : if (i > 0) {
# 2066 : 66268 : blockundo.vtxundo.push_back(CTxUndo());
# 2067 : 66268 : }
# 2068 [ + + ]: 166965 : UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
# 2069 : 166965 : }
# 2070 : 100697 : int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
# 2071 [ + - ][ + + ]: 97976 : LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
# 2072 : :
# 2073 : 97976 : CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
# 2074 [ + + ]: 97976 : if (block.vtx[0]->GetValueOut() > blockReward) {
# 2075 : 5 : LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
# 2076 : 5 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
# 2077 : 5 : }
# 2078 : :
# 2079 [ + + ]: 97971 : if (!control.Wait()) {
# 2080 : 44 : LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
# 2081 : 44 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
# 2082 : 44 : }
# 2083 : 97927 : int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
# 2084 [ + - ][ + + ]: 97927 : LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
# 2085 : :
# 2086 [ + + ]: 97927 : if (fJustCheck)
# 2087 : 29961 : return true;
# 2088 : :
# 2089 [ - + ]: 67966 : if (!WriteUndoDataForBlock(blockundo, state, pindex, chainparams))
# 2090 : 0 : return false;
# 2091 : :
# 2092 [ + + ]: 67966 : if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
# 2093 : 66218 : pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
# 2094 : 66218 : setDirtyBlockIndex.insert(pindex);
# 2095 : 66218 : }
# 2096 : :
# 2097 : 67966 : assert(pindex->phashBlock);
# 2098 : : // add this block to the view's block chain
# 2099 : 67966 : view.SetBestBlock(pindex->GetBlockHash());
# 2100 : :
# 2101 : 67966 : int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
# 2102 [ + - ]: 67966 : LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
# 2103 : :
# 2104 : 67966 : int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
# 2105 [ + - ]: 67966 : LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal);
# 2106 : :
# 2107 : 67966 : return true;
# 2108 : 67966 : }
# 2109 : :
# 2110 : : CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
# 2111 : 237089 : {
# 2112 : 237089 : return this->GetCoinsCacheSizeState(
# 2113 : 237089 : tx_pool,
# 2114 : 237089 : m_coinstip_cache_size_bytes,
# 2115 : 237089 : gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
# 2116 : 237089 : }
# 2117 : :
# 2118 : : CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
# 2119 : : const CTxMemPool* tx_pool,
# 2120 : : size_t max_coins_cache_size_bytes,
# 2121 : : size_t max_mempool_size_bytes)
# 2122 : 237091 : {
# 2123 [ + - ]: 237091 : const int64_t nMempoolUsage = tx_pool ? tx_pool->DynamicMemoryUsage() : 0;
# 2124 : 237091 : int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
# 2125 : 237091 : int64_t nTotalSpace =
# 2126 : 237091 : max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
# 2127 : :
# 2128 : : //! No need to periodic flush if at least this much space still available.
# 2129 : 237091 : static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
# 2130 : 237091 : int64_t large_threshold =
# 2131 : 237091 : std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
# 2132 : :
# 2133 [ + + ]: 237091 : if (cacheSize > nTotalSpace) {
# 2134 : 1 : LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
# 2135 : 1 : return CoinsCacheSizeState::CRITICAL;
# 2136 [ - + ]: 237090 : } else if (cacheSize > large_threshold) {
# 2137 : 0 : return CoinsCacheSizeState::LARGE;
# 2138 : 0 : }
# 2139 : 237090 : return CoinsCacheSizeState::OK;
# 2140 : 237090 : }
# 2141 : :
# 2142 : : bool CChainState::FlushStateToDisk(
# 2143 : : const CChainParams& chainparams,
# 2144 : : BlockValidationState &state,
# 2145 : : FlushStateMode mode,
# 2146 : : int nManualPruneHeight)
# 2147 : 237089 : {
# 2148 : 237089 : LOCK(cs_main);
# 2149 : 237089 : assert(this->CanFlushToDisk());
# 2150 : 237089 : static std::chrono::microseconds nLastWrite{0};
# 2151 : 237089 : static std::chrono::microseconds nLastFlush{0};
# 2152 : 237089 : std::set<int> setFilesToPrune;
# 2153 : 237089 : bool full_flush_completed = false;
# 2154 : :
# 2155 : 237089 : const size_t coins_count = CoinsTip().GetCacheSize();
# 2156 : 237089 : const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
# 2157 : :
# 2158 : 237089 : try {
# 2159 : 237089 : {
# 2160 : 237089 : bool fFlushForPrune = false;
# 2161 : 237089 : bool fDoFullFlush = false;
# 2162 : :
# 2163 : 237089 : CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&m_mempool);
# 2164 : 237089 : LOCK(cs_LastBlockFile);
# 2165 [ + + ][ + + ]: 237089 : if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
# [ + + ][ + - ]
# 2166 : : // make sure we don't prune above the blockfilterindexes bestblocks
# 2167 : : // pruning is height-based
# 2168 : 41 : int last_prune = m_chain.Height(); // last height we can prune
# 2169 : 41 : ForEachBlockFilterIndex([&](BlockFilterIndex& index) {
# 2170 : 13 : last_prune = std::max(1, std::min(last_prune, index.GetSummary().best_block_height));
# 2171 : 13 : });
# 2172 : :
# 2173 [ + + ]: 41 : if (nManualPruneHeight > 0) {
# 2174 : 2 : LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
# 2175 : :
# 2176 : 2 : m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height());
# 2177 : 39 : } else {
# 2178 : 39 : LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
# 2179 : :
# 2180 : 39 : m_blockman.FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload());
# 2181 : 39 : fCheckForPruning = false;
# 2182 : 39 : }
# 2183 [ + + ]: 41 : if (!setFilesToPrune.empty()) {
# 2184 : 2 : fFlushForPrune = true;
# 2185 [ + + ]: 2 : if (!fHavePruned) {
# 2186 : 1 : pblocktree->WriteFlag("prunedblockfiles", true);
# 2187 : 1 : fHavePruned = true;
# 2188 : 1 : }
# 2189 : 2 : }
# 2190 : 41 : }
# 2191 : 237089 : const auto nNow = GetTime<std::chrono::microseconds>();
# 2192 : : // Avoid writing/flushing immediately after startup.
# 2193 [ + + ]: 237089 : if (nLastWrite.count() == 0) {
# 2194 : 628 : nLastWrite = nNow;
# 2195 : 628 : }
# 2196 [ + + ]: 237089 : if (nLastFlush.count() == 0) {
# 2197 : 628 : nLastFlush = nNow;
# 2198 : 628 : }
# 2199 : : // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
# 2200 [ + + ][ - + ]: 237089 : bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
# 2201 : : // The cache is over the limit, we have to write now.
# 2202 [ + + ][ - + ]: 237089 : bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
# 2203 : : // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
# 2204 [ + + ][ + + ]: 237089 : bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
# 2205 : : // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
# 2206 [ + + ][ + + ]: 237089 : bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
# 2207 : : // Combine all conditions that result in a full cache flush.
# 2208 [ + + ][ - + ]: 237089 : fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
# [ - + ][ + + ]
# [ + + ]
# 2209 : : // Write blocks and block index to disk.
# 2210 [ + + ][ + + ]: 237089 : if (fDoFullFlush || fPeriodicWrite) {
# 2211 : : // Depend on nMinDiskSpace to ensure we can write block index
# 2212 [ - + ]: 1588 : if (!CheckDiskSpace(gArgs.GetBlocksDirPath())) {
# 2213 : 0 : return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
# 2214 : 0 : }
# 2215 : 1588 : {
# 2216 : 1588 : LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
# 2217 : :
# 2218 : : // First make sure all block and undo data is flushed to disk.
# 2219 : 1588 : FlushBlockFile();
# 2220 : 1588 : }
# 2221 : :
# 2222 : : // Then update all block file information (which may refer to block and undo files).
# 2223 : 1588 : {
# 2224 : 1588 : LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
# 2225 : :
# 2226 : 1588 : std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
# 2227 : 1588 : vFiles.reserve(setDirtyFileInfo.size());
# 2228 [ + + ]: 2071 : for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
# 2229 : 483 : vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
# 2230 : 483 : setDirtyFileInfo.erase(it++);
# 2231 : 483 : }
# 2232 : 1588 : std::vector<const CBlockIndex*> vBlocks;
# 2233 : 1588 : vBlocks.reserve(setDirtyBlockIndex.size());
# 2234 [ + + ]: 69979 : for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
# 2235 : 68391 : vBlocks.push_back(*it);
# 2236 : 68391 : setDirtyBlockIndex.erase(it++);
# 2237 : 68391 : }
# 2238 [ - + ]: 1588 : if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
# 2239 : 0 : return AbortNode(state, "Failed to write to block index database");
# 2240 : 0 : }
# 2241 : 1588 : }
# 2242 : : // Finally remove any pruned files
# 2243 [ + + ]: 1588 : if (fFlushForPrune) {
# 2244 : 2 : LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
# 2245 : :
# 2246 : 2 : UnlinkPrunedFiles(setFilesToPrune);
# 2247 : 2 : }
# 2248 : 1588 : nLastWrite = nNow;
# 2249 : 1588 : }
# 2250 : : // Flush best chain related state. This can only be done if the blocks / block index write was also done.
# 2251 [ + + ][ + + ]: 237089 : if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
# [ + - ]
# 2252 : 1579 : LOG_TIME_SECONDS(strprintf("write coins cache to disk (%d coins, %.2fkB)",
# 2253 : 1579 : coins_count, coins_mem_usage / 1000));
# 2254 : :
# 2255 : : // Typical Coin structures on disk are around 48 bytes in size.
# 2256 : : // Pushing a new one to the database can cause it to be written
# 2257 : : // twice (once in the log, and once in the tables). This is already
# 2258 : : // an overestimation, as most will delete an existing entry or
# 2259 : : // overwrite one. Still, use a conservative safety factor of 2.
# 2260 [ - + ]: 1579 : if (!CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
# 2261 : 0 : return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
# 2262 : 0 : }
# 2263 : : // Flush the chainstate (which may refer to block index entries).
# 2264 [ - + ]: 1579 : if (!CoinsTip().Flush())
# 2265 : 0 : return AbortNode(state, "Failed to write to coin database");
# 2266 : 1579 : nLastFlush = nNow;
# 2267 : 1579 : full_flush_completed = true;
# 2268 : 1579 : }
# 2269 : 237089 : }
# 2270 [ + + ]: 237089 : if (full_flush_completed) {
# 2271 : : // Update best block in wallet (so we can detect restored wallets).
# 2272 : 1579 : GetMainSignals().ChainStateFlushed(m_chain.GetLocator());
# 2273 : 1579 : }
# 2274 : 237089 : } catch (const std::runtime_error& e) {
# 2275 : 0 : return AbortNode(state, std::string("System error while flushing: ") + e.what());
# 2276 : 0 : }
# 2277 : 237089 : return true;
# 2278 : 237089 : }
# 2279 : :
# 2280 : 1561 : void CChainState::ForceFlushStateToDisk() {
# 2281 : 1561 : BlockValidationState state;
# 2282 : 1561 : const CChainParams& chainparams = Params();
# 2283 [ - + ]: 1561 : if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
# 2284 : 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
# 2285 : 0 : }
# 2286 : 1561 : }
# 2287 : :
# 2288 : 7 : void CChainState::PruneAndFlush() {
# 2289 : 7 : BlockValidationState state;
# 2290 : 7 : fCheckForPruning = true;
# 2291 : 7 : const CChainParams& chainparams = Params();
# 2292 : :
# 2293 [ - + ]: 7 : if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
# 2294 : 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
# 2295 : 0 : }
# 2296 : 7 : }
# 2297 : :
# 2298 : : static void DoWarning(const bilingual_str& warning)
# 2299 : 4 : {
# 2300 : 4 : static bool fWarned = false;
# 2301 : 4 : SetMiscWarning(warning);
# 2302 [ + + ]: 4 : if (!fWarned) {
# 2303 : 2 : AlertNotify(warning.original);
# 2304 : 2 : fWarned = true;
# 2305 : 2 : }
# 2306 : 4 : }
# 2307 : :
# 2308 : : /** Private helper function that concatenates warning messages. */
# 2309 : : static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
# 2310 : 144 : {
# 2311 [ - + ]: 144 : if (!res.empty()) res += Untranslated(", ");
# 2312 : 144 : res += warn;
# 2313 : 144 : }
# 2314 : :
# 2315 : : /** Check warning conditions and do some notifications on new chain tip set. */
# 2316 : : static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const CChainParams& chainParams, CChainState& active_chainstate)
# 2317 : : EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
# 2318 : 72878 : {
# 2319 : : // New best block
# 2320 : 72878 : mempool.AddTransactionsUpdated(1);
# 2321 : :
# 2322 : 72878 : {
# 2323 : 72878 : LOCK(g_best_block_mutex);
# 2324 : 72878 : g_best_block = pindexNew->GetBlockHash();
# 2325 : 72878 : g_best_block_cv.notify_all();
# 2326 : 72878 : }
# 2327 : :
# 2328 : 72878 : bilingual_str warning_messages;
# 2329 : 72878 : assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
# 2330 [ + + ]: 72878 : if (!active_chainstate.IsInitialBlockDownload()) {
# 2331 : 66404 : const CBlockIndex* pindex = pindexNew;
# 2332 [ + + ]: 1992120 : for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
# 2333 : 1925716 : WarningBitsConditionChecker checker(bit);
# 2334 : 1925716 : ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]);
# 2335 [ + + ][ + + ]: 1925716 : if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
# 2336 : 148 : const bilingual_str warning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit);
# 2337 [ + + ]: 148 : if (state == ThresholdState::ACTIVE) {
# 2338 : 4 : DoWarning(warning);
# 2339 : 144 : } else {
# 2340 : 144 : AppendWarning(warning_messages, warning);
# 2341 : 144 : }
# 2342 : 148 : }
# 2343 : 1925716 : }
# 2344 : 66404 : }
# 2345 : 72878 : assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
# 2346 [ + + ]: 72878 : LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__,
# 2347 : 72878 : pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
# 2348 : 72878 : log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
# 2349 : 72878 : FormatISO8601DateTime(pindexNew->GetBlockTime()),
# 2350 : 72878 : GuessVerificationProgress(chainParams.TxData(), pindexNew), active_chainstate.CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), active_chainstate.CoinsTip().GetCacheSize(),
# 2351 : 72878 : !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : "");
# 2352 : 72878 : }
# 2353 : :
# 2354 : : /** Disconnect m_chain's tip.
# 2355 : : * After calling, the mempool will be in an inconsistent state, with
# 2356 : : * transactions from disconnected blocks being added to disconnectpool. You
# 2357 : : * should make the mempool consistent again by calling UpdateMempoolForReorg.
# 2358 : : * with cs_main held.
# 2359 : : *
# 2360 : : * If disconnectpool is nullptr, then no disconnected transactions are added to
# 2361 : : * disconnectpool (note that the caller is responsible for mempool consistency
# 2362 : : * in any case).
# 2363 : : */
# 2364 : : bool CChainState::DisconnectTip(BlockValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions* disconnectpool)
# 2365 : 4717 : {
# 2366 : 4717 : AssertLockHeld(cs_main);
# 2367 : 4717 : AssertLockHeld(m_mempool.cs);
# 2368 : :
# 2369 : 4717 : CBlockIndex *pindexDelete = m_chain.Tip();
# 2370 : 4717 : assert(pindexDelete);
# 2371 : : // Read block from disk.
# 2372 : 4717 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
# 2373 : 4717 : CBlock& block = *pblock;
# 2374 [ - + ]: 4717 : if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus()))
# 2375 : 0 : return error("DisconnectTip(): Failed to read block");
# 2376 : : // Apply the block atomically to the chain state.
# 2377 : 4717 : int64_t nStart = GetTimeMicros();
# 2378 : 4717 : {
# 2379 : 4717 : CCoinsViewCache view(&CoinsTip());
# 2380 : 4717 : assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
# 2381 [ + + ]: 4717 : if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
# 2382 : 1 : return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
# 2383 : 4716 : bool flushed = view.Flush();
# 2384 : 4716 : assert(flushed);
# 2385 : 4716 : }
# 2386 [ + - ]: 4716 : LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
# 2387 : : // Write the chain state to disk, if necessary.
# 2388 [ - + ]: 4716 : if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
# 2389 : 0 : return false;
# 2390 : :
# 2391 [ + - ]: 4716 : if (disconnectpool) {
# 2392 : : // Save transactions to re-add to mempool at end of reorg
# 2393 [ + + ]: 17053 : for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
# 2394 : 12337 : disconnectpool->addTransaction(*it);
# 2395 : 12337 : }
# 2396 [ + + ]: 7921 : while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
# 2397 : : // Drop the earliest entry, and remove its children from the mempool.
# 2398 : 3205 : auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
# 2399 : 3205 : m_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
# 2400 : 3205 : disconnectpool->removeEntry(it);
# 2401 : 3205 : }
# 2402 : 4716 : }
# 2403 : :
# 2404 : 4716 : m_chain.SetTip(pindexDelete->pprev);
# 2405 : :
# 2406 : 4716 : UpdateTip(m_mempool, pindexDelete->pprev, chainparams, *this);
# 2407 : : // Let wallets know transactions went from 1-confirmed to
# 2408 : : // 0-confirmed or conflicted:
# 2409 : 4716 : GetMainSignals().BlockDisconnected(pblock, pindexDelete);
# 2410 : 4716 : return true;
# 2411 : 4716 : }
# 2412 : :
# 2413 : : static int64_t nTimeReadFromDisk = 0;
# 2414 : : static int64_t nTimeConnectTotal = 0;
# 2415 : : static int64_t nTimeFlush = 0;
# 2416 : : static int64_t nTimeChainState = 0;
# 2417 : : static int64_t nTimePostConnect = 0;
# 2418 : :
# 2419 : : struct PerBlockConnectTrace {
# 2420 : : CBlockIndex* pindex = nullptr;
# 2421 : : std::shared_ptr<const CBlock> pblock;
# 2422 : 152991 : PerBlockConnectTrace() {}
# 2423 : : };
# 2424 : : /**
# 2425 : : * Used to track blocks whose transactions were applied to the UTXO state as a
# 2426 : : * part of a single ActivateBestChainStep call.
# 2427 : : *
# 2428 : : * This class is single-use, once you call GetBlocksConnected() you have to throw
# 2429 : : * it away and make a new one.
# 2430 : : */
# 2431 : : class ConnectTrace {
# 2432 : : private:
# 2433 : : std::vector<PerBlockConnectTrace> blocksConnected;
# 2434 : :
# 2435 : : public:
# 2436 : 84829 : explicit ConnectTrace() : blocksConnected(1) {}
# 2437 : :
# 2438 : 68162 : void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
# 2439 : 68162 : assert(!blocksConnected.back().pindex);
# 2440 : 68162 : assert(pindex);
# 2441 : 68162 : assert(pblock);
# 2442 : 68162 : blocksConnected.back().pindex = pindex;
# 2443 : 68162 : blocksConnected.back().pblock = std::move(pblock);
# 2444 : 68162 : blocksConnected.emplace_back();
# 2445 : 68162 : }
# 2446 : :
# 2447 : 66861 : std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
# 2448 : : // We always keep one extra block at the end of our list because
# 2449 : : // blocks are added after all the conflicted transactions have
# 2450 : : // been filled in. Thus, the last entry should always be an empty
# 2451 : : // one waiting for the transactions from the next block. We pop
# 2452 : : // the last entry here to make sure the list we return is sane.
# 2453 : 66861 : assert(!blocksConnected.back().pindex);
# 2454 : 66861 : blocksConnected.pop_back();
# 2455 : 66861 : return blocksConnected;
# 2456 : 66861 : }
# 2457 : : };
# 2458 : :
# 2459 : : /**
# 2460 : : * Connect a new block to m_chain. pblock is either nullptr or a pointer to a CBlock
# 2461 : : * corresponding to pindexNew, to bypass loading it again from disk.
# 2462 : : *
# 2463 : : * The block is added to connectTrace if connection succeeds.
# 2464 : : */
# 2465 : : bool CChainState::ConnectTip(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
# 2466 : 70925 : {
# 2467 : 70925 : AssertLockHeld(cs_main);
# 2468 : 70925 : AssertLockHeld(m_mempool.cs);
# 2469 : :
# 2470 : 70925 : assert(pindexNew->pprev == m_chain.Tip());
# 2471 : : // Read block from disk.
# 2472 : 70925 : int64_t nTime1 = GetTimeMicros();
# 2473 : 70925 : std::shared_ptr<const CBlock> pthisBlock;
# 2474 [ + + ]: 70925 : if (!pblock) {
# 2475 : 7060 : std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
# 2476 [ - + ]: 7060 : if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus()))
# 2477 : 0 : return AbortNode(state, "Failed to read block");
# 2478 : 7060 : pthisBlock = pblockNew;
# 2479 : 63865 : } else {
# 2480 : 63865 : pthisBlock = pblock;
# 2481 : 63865 : }
# 2482 : 70925 : const CBlock& blockConnecting = *pthisBlock;
# 2483 : : // Apply the block atomically to the chain state.
# 2484 : 70925 : int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
# 2485 : 70925 : int64_t nTime3;
# 2486 [ + - ]: 70925 : LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
# 2487 : 70925 : {
# 2488 : 70925 : CCoinsViewCache view(&CoinsTip());
# 2489 : 70925 : bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams);
# 2490 : 70925 : GetMainSignals().BlockChecked(blockConnecting, state);
# 2491 [ + + ]: 70925 : if (!rv) {
# 2492 [ + - ]: 2763 : if (state.IsInvalid())
# 2493 : 2763 : InvalidBlockFound(pindexNew, state);
# 2494 : 2763 : return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
# 2495 : 2763 : }
# 2496 : 68162 : nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
# 2497 : 68162 : assert(nBlocksTotal > 0);
# 2498 [ + - ]: 68162 : LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
# 2499 : 68162 : bool flushed = view.Flush();
# 2500 : 68162 : assert(flushed);
# 2501 : 68162 : }
# 2502 : 68162 : int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
# 2503 [ + - ]: 68162 : LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
# 2504 : : // Write the chain state to disk, if necessary.
# 2505 [ - + ]: 68162 : if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
# 2506 : 0 : return false;
# 2507 : 68162 : int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
# 2508 [ + - ]: 68162 : LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
# 2509 : : // Remove conflicting transactions from the mempool.;
# 2510 : 68162 : m_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
# 2511 : 68162 : disconnectpool.removeForBlock(blockConnecting.vtx);
# 2512 : : // Update m_chain & related variables.
# 2513 : 68162 : m_chain.SetTip(pindexNew);
# 2514 : 68162 : UpdateTip(m_mempool, pindexNew, chainparams, *this);
# 2515 : :
# 2516 : 68162 : int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
# 2517 [ + - ]: 68162 : LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
# 2518 [ + - ]: 68162 : LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
# 2519 : :
# 2520 : 68162 : connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
# 2521 : 68162 : return true;
# 2522 : 68162 : }
# 2523 : :
# 2524 : : /**
# 2525 : : * Return the tip of the chain with the most work in it, that isn't
# 2526 : : * known to be invalid (it's however far from certain to be valid).
# 2527 : : */
# 2528 : 82516 : CBlockIndex* CChainState::FindMostWorkChain() {
# 2529 : 82520 : do {
# 2530 : 82520 : CBlockIndex *pindexNew = nullptr;
# 2531 : :
# 2532 : : // Find the best candidate header.
# 2533 : 82520 : {
# 2534 : 82520 : std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
# 2535 [ - + ]: 82520 : if (it == setBlockIndexCandidates.rend())
# 2536 : 0 : return nullptr;
# 2537 : 82520 : pindexNew = *it;
# 2538 : 82520 : }
# 2539 : :
# 2540 : : // Check whether all blocks on the path between the currently active chain and the candidate are valid.
# 2541 : : // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
# 2542 : 82520 : CBlockIndex *pindexTest = pindexNew;
# 2543 : 82520 : bool fInvalidAncestor = false;
# 2544 [ + + ][ + + ]: 153456 : while (pindexTest && !m_chain.Contains(pindexTest)) {
# 2545 : 70940 : assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
# 2546 : :
# 2547 : : // Pruned nodes may have entries in setBlockIndexCandidates for
# 2548 : : // which block files have been deleted. Remove those as candidates
# 2549 : : // for the most work chain if we come across them; we can't switch
# 2550 : : // to a chain unless we have all the non-active-chain parent blocks.
# 2551 : 70940 : bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
# 2552 : 70940 : bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
# 2553 [ + + ][ - + ]: 70940 : if (fFailedChain || fMissingData) {
# 2554 : : // Candidate chain is not usable (either invalid or missing data)
# 2555 [ + - ][ - + ]: 4 : if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
# [ - + ]
# 2556 : 0 : pindexBestInvalid = pindexNew;
# 2557 : 4 : CBlockIndex *pindexFailed = pindexNew;
# 2558 : : // Remove the entire chain from the set.
# 2559 [ + + ]: 8 : while (pindexTest != pindexFailed) {
# 2560 [ + - ]: 4 : if (fFailedChain) {
# 2561 : 4 : pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
# 2562 [ # # ]: 4 : } else if (fMissingData) {
# 2563 : : // If we're missing data, then add back to m_blocks_unlinked,
# 2564 : : // so that if the block arrives in the future we can try adding
# 2565 : : // to setBlockIndexCandidates again.
# 2566 : 0 : m_blockman.m_blocks_unlinked.insert(
# 2567 : 0 : std::make_pair(pindexFailed->pprev, pindexFailed));
# 2568 : 0 : }
# 2569 : 4 : setBlockIndexCandidates.erase(pindexFailed);
# 2570 : 4 : pindexFailed = pindexFailed->pprev;
# 2571 : 4 : }
# 2572 : 4 : setBlockIndexCandidates.erase(pindexTest);
# 2573 : 4 : fInvalidAncestor = true;
# 2574 : 4 : break;
# 2575 : 4 : }
# 2576 : 70936 : pindexTest = pindexTest->pprev;
# 2577 : 70936 : }
# 2578 [ + + ]: 82520 : if (!fInvalidAncestor)
# 2579 : 82516 : return pindexNew;
# 2580 : 4 : } while(true);
# 2581 : 82516 : }
# 2582 : :
# 2583 : : /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
# 2584 : 68548 : void CChainState::PruneBlockIndexCandidates() {
# 2585 : : // Note that we can't delete the current block itself, as we may need to return to it later in case a
# 2586 : : // reorganization to a better block fails.
# 2587 : 68548 : std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
# 2588 [ + + ][ + - ]: 193592 : while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
# [ + + ]
# 2589 : 125044 : setBlockIndexCandidates.erase(it++);
# 2590 : 125044 : }
# 2591 : : // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
# 2592 : 68548 : assert(!setBlockIndexCandidates.empty());
# 2593 : 68548 : }
# 2594 : :
# 2595 : : /**
# 2596 : : * Try to make some progress towards making pindexMostWork the active block.
# 2597 : : * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork.
# 2598 : : *
# 2599 : : * @returns true unless a system error occurred
# 2600 : : */
# 2601 : : bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
# 2602 : 66862 : {
# 2603 : 66862 : AssertLockHeld(cs_main);
# 2604 : 66862 : AssertLockHeld(m_mempool.cs);
# 2605 : 66862 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 2606 : :
# 2607 : 66862 : const CBlockIndex* pindexOldTip = m_chain.Tip();
# 2608 : 66862 : const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork);
# 2609 : :
# 2610 : : // Disconnect active blocks which are no longer in the best chain.
# 2611 : 66862 : bool fBlocksDisconnected = false;
# 2612 : 66862 : DisconnectedBlockTransactions disconnectpool;
# 2613 [ + + ][ + + ]: 71025 : while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
# 2614 [ + + ]: 4164 : if (!DisconnectTip(state, chainparams, &disconnectpool)) {
# 2615 : : // This is likely a fatal error, but keep the mempool consistent,
# 2616 : : // just in case. Only remove from the mempool in this case.
# 2617 : 1 : UpdateMempoolForReorg(*this, m_mempool, disconnectpool, false);
# 2618 : :
# 2619 : : // If we're unable to disconnect a block during normal operation,
# 2620 : : // then that is a failure of our local system -- we should abort
# 2621 : : // rather than stay on a less work chain.
# 2622 : 1 : AbortNode(state, "Failed to disconnect block; see debug.log for details");
# 2623 : 1 : return false;
# 2624 : 1 : }
# 2625 : 4163 : fBlocksDisconnected = true;
# 2626 : 4163 : }
# 2627 : :
# 2628 : : // Build list of new blocks to connect (in descending height order).
# 2629 : 66862 : std::vector<CBlockIndex*> vpindexToConnect;
# 2630 : 66861 : bool fContinue = true;
# 2631 [ + + ]: 66861 : int nHeight = pindexFork ? pindexFork->nHeight : -1;
# 2632 [ + + ][ + + ]: 133799 : while (fContinue && nHeight != pindexMostWork->nHeight) {
# 2633 : : // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
# 2634 : : // a few blocks along the way.
# 2635 : 66938 : int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
# 2636 : 66938 : vpindexToConnect.clear();
# 2637 : 66938 : vpindexToConnect.reserve(nTargetHeight - nHeight);
# 2638 : 66938 : CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
# 2639 [ + + ][ + + ]: 196717 : while (pindexIter && pindexIter->nHeight != nHeight) {
# 2640 : 129779 : vpindexToConnect.push_back(pindexIter);
# 2641 : 129779 : pindexIter = pindexIter->pprev;
# 2642 : 129779 : }
# 2643 : 66938 : nHeight = nTargetHeight;
# 2644 : :
# 2645 : : // Connect new blocks.
# 2646 [ + + ]: 70925 : for (CBlockIndex* pindexConnect : reverse_iterate(vpindexToConnect)) {
# 2647 [ + + ][ + + ]: 70925 : if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
# 2648 [ + - ]: 2763 : if (state.IsInvalid()) {
# 2649 : : // The block violates a consensus rule.
# 2650 [ + - ]: 2763 : if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
# 2651 : 2763 : InvalidChainFound(vpindexToConnect.front());
# 2652 : 2763 : }
# 2653 : 2763 : state = BlockValidationState();
# 2654 : 2763 : fInvalidFound = true;
# 2655 : 2763 : fContinue = false;
# 2656 : 2763 : break;
# 2657 : 2763 : } else {
# 2658 : : // A system error occurred (disk space, database error, ...).
# 2659 : : // Make the mempool consistent with the current tip, just in case
# 2660 : : // any observers try to use it before shutdown.
# 2661 : 0 : UpdateMempoolForReorg(*this, m_mempool, disconnectpool, false);
# 2662 : 0 : return false;
# 2663 : 0 : }
# 2664 : 68162 : } else {
# 2665 : 68162 : PruneBlockIndexCandidates();
# 2666 [ + + ][ + + ]: 68162 : if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
# 2667 : : // We're in a better position than we were. Return temporarily to release the lock.
# 2668 : 64003 : fContinue = false;
# 2669 : 64003 : break;
# 2670 : 64003 : }
# 2671 : 68162 : }
# 2672 : 70925 : }
# 2673 : 66938 : }
# 2674 : :
# 2675 [ + + ]: 66861 : if (fBlocksDisconnected) {
# 2676 : : // If any blocks were disconnected, disconnectpool may be non empty. Add
# 2677 : : // any disconnected transactions back to the mempool.
# 2678 : 1486 : UpdateMempoolForReorg(*this, m_mempool, disconnectpool, true);
# 2679 : 1486 : }
# 2680 : 66861 : m_mempool.check(*this);
# 2681 : :
# 2682 : 66861 : CheckForkWarningConditions();
# 2683 : :
# 2684 : 66861 : return true;
# 2685 : 66861 : }
# 2686 : :
# 2687 : : static SynchronizationState GetSynchronizationState(bool init)
# 2688 : 121139 : {
# 2689 [ + + ]: 121139 : if (!init) return SynchronizationState::POST_INIT;
# 2690 [ + + ]: 8496 : if (::fReindex) return SynchronizationState::INIT_REINDEX;
# 2691 : 6889 : return SynchronizationState::INIT_DOWNLOAD;
# 2692 : 6889 : }
# 2693 : :
# 2694 : 109587 : static bool NotifyHeaderTip(CChainState& chainstate) LOCKS_EXCLUDED(cs_main) {
# 2695 : 109587 : bool fNotify = false;
# 2696 : 109587 : bool fInitialBlockDownload = false;
# 2697 : 109587 : static CBlockIndex* pindexHeaderOld = nullptr;
# 2698 : 109587 : CBlockIndex* pindexHeader = nullptr;
# 2699 : 109587 : {
# 2700 : 109587 : LOCK(cs_main);
# 2701 : 109587 : pindexHeader = pindexBestHeader;
# 2702 : :
# 2703 [ + + ]: 109587 : if (pindexHeader != pindexHeaderOld) {
# 2704 : 57054 : fNotify = true;
# 2705 : 57054 : assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate));
# 2706 : 57054 : fInitialBlockDownload = chainstate.IsInitialBlockDownload();
# 2707 : 57054 : pindexHeaderOld = pindexHeader;
# 2708 : 57054 : }
# 2709 : 109587 : }
# 2710 : : // Send block tip changed notifications without cs_main
# 2711 [ + + ]: 109587 : if (fNotify) {
# 2712 : 57054 : uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader);
# 2713 : 57054 : }
# 2714 : 109587 : return fNotify;
# 2715 : 109587 : }
# 2716 : :
# 2717 : 85370 : static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
# 2718 : 85370 : AssertLockNotHeld(cs_main);
# 2719 : :
# 2720 [ + + ]: 85370 : if (GetMainSignals().CallbacksPending() > 10) {
# 2721 : 88 : SyncWithValidationInterfaceQueue();
# 2722 : 88 : }
# 2723 : 85370 : }
# 2724 : :
# 2725 : 79753 : bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
# 2726 : : // Note that while we're often called here from ProcessNewBlock, this is
# 2727 : : // far from a guarantee. Things in the P2P/RPC will often end up calling
# 2728 : : // us in the middle of ProcessNewBlock - do not assume pblock is set
# 2729 : : // sanely for performance or correctness!
# 2730 : 79753 : AssertLockNotHeld(cs_main);
# 2731 : :
# 2732 : : // ABC maintains a fair degree of expensive-to-calculate internal state
# 2733 : : // because this function periodically releases cs_main so that it does not lock up other threads for too long
# 2734 : : // during large connects - and to allow for e.g. the callback queue to drain
# 2735 : : // we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time
# 2736 : 79753 : LOCK(m_cs_chainstate);
# 2737 : :
# 2738 : 79753 : CBlockIndex *pindexMostWork = nullptr;
# 2739 : 79753 : CBlockIndex *pindexNewTip = nullptr;
# 2740 : 79753 : int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
# 2741 : 84738 : do {
# 2742 : : // Block until the validation queue drains. This should largely
# 2743 : : // never happen in normal operation, however may happen during
# 2744 : : // reindex, causing memory blowup if we run too far ahead.
# 2745 : : // Note that if a validationinterface callback ends up calling
# 2746 : : // ActivateBestChain this may lead to a deadlock! We should
# 2747 : : // probably have a DEBUG_LOCKORDER test for this in the future.
# 2748 : 84738 : LimitValidationInterfaceQueue();
# 2749 : :
# 2750 : 84738 : {
# 2751 : 84738 : LOCK(cs_main);
# 2752 : 84738 : LOCK(m_mempool.cs); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
# 2753 : 84738 : CBlockIndex* starting_tip = m_chain.Tip();
# 2754 : 84738 : bool blocks_connected = false;
# 2755 : 84829 : do {
# 2756 : : // We absolutely may not unlock cs_main until we've made forward progress
# 2757 : : // (with the exception of shutdown due to hardware issues, low disk space, etc).
# 2758 : 84829 : ConnectTrace connectTrace; // Destructed before cs_main is unlocked
# 2759 : :
# 2760 [ + + ]: 84829 : if (pindexMostWork == nullptr) {
# 2761 : 82516 : pindexMostWork = FindMostWorkChain();
# 2762 : 82516 : }
# 2763 : :
# 2764 : : // Whether we have anything to do at all.
# 2765 [ - + ][ + + ]: 84829 : if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
# 2766 : 17967 : break;
# 2767 : 17967 : }
# 2768 : :
# 2769 : 66862 : bool fInvalidFound = false;
# 2770 : 66862 : std::shared_ptr<const CBlock> nullBlockPtr;
# 2771 [ + + ][ + + ]: 66862 : if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
# [ + + ]
# 2772 : : // A system error occurred
# 2773 : 1 : return false;
# 2774 : 1 : }
# 2775 : 66861 : blocks_connected = true;
# 2776 : :
# 2777 [ + + ]: 66861 : if (fInvalidFound) {
# 2778 : : // Wipe cache, we may need another branch now.
# 2779 : 2763 : pindexMostWork = nullptr;
# 2780 : 2763 : }
# 2781 : 66861 : pindexNewTip = m_chain.Tip();
# 2782 : :
# 2783 [ + + ]: 68162 : for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
# 2784 : 68162 : assert(trace.pblock && trace.pindex);
# 2785 : 68162 : GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
# 2786 : 68162 : }
# 2787 [ - + ][ + + ]: 66861 : } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
# [ + + ][ + + ]
# 2788 [ + + ]: 84738 : if (!blocks_connected) return true;
# 2789 : :
# 2790 : 66770 : const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
# 2791 : 66770 : bool fInitialDownload = IsInitialBlockDownload();
# 2792 : :
# 2793 : : // Notify external listeners about the new tip.
# 2794 : : // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
# 2795 [ + + ]: 66770 : if (pindexFork != pindexNewTip) {
# 2796 : : // Notify ValidationInterface subscribers
# 2797 : 64007 : GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
# 2798 : :
# 2799 : : // Always notify the UI if a new block tip was connected
# 2800 : 64007 : uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
# 2801 : 64007 : }
# 2802 : 66770 : }
# 2803 : : // When we reach this point, we switched to a new tip (stored in pindexNewTip).
# 2804 : :
# 2805 [ + + ][ + - ]: 66770 : if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
# [ + + ]
# 2806 : :
# 2807 : : // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
# 2808 : : // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
# 2809 : : // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
# 2810 : : // that the best block hash is non-null.
# 2811 [ + + ]: 66770 : if (ShutdownRequested()) break;
# 2812 [ + + ]: 66766 : } while (pindexNewTip != pindexMostWork);
# 2813 : 79753 : CheckBlockIndex(chainparams.GetConsensus());
# 2814 : :
# 2815 : : // Write changes periodically to disk, after relay.
# 2816 [ - + ]: 61785 : if (!FlushStateToDisk(chainparams, state, FlushStateMode::PERIODIC)) {
# 2817 : 0 : return false;
# 2818 : 0 : }
# 2819 : :
# 2820 : 61785 : return true;
# 2821 : 61785 : }
# 2822 : :
# 2823 : : bool CChainState::PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex)
# 2824 : 9 : {
# 2825 : 9 : {
# 2826 : 9 : LOCK(cs_main);
# 2827 [ + + ]: 9 : if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
# 2828 : : // Nothing to do, this block is not at the tip.
# 2829 : 1 : return true;
# 2830 : 1 : }
# 2831 [ + + ]: 8 : if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
# 2832 : : // The chain has been extended since the last call, reset the counter.
# 2833 : 4 : nBlockReverseSequenceId = -1;
# 2834 : 4 : }
# 2835 : 8 : nLastPreciousChainwork = m_chain.Tip()->nChainWork;
# 2836 : 8 : setBlockIndexCandidates.erase(pindex);
# 2837 : 8 : pindex->nSequenceId = nBlockReverseSequenceId;
# 2838 [ + - ]: 8 : if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
# 2839 : : // We can't keep reducing the counter if somebody really wants to
# 2840 : : // call preciousblock 2**31-1 times on the same set of tips...
# 2841 : 8 : nBlockReverseSequenceId--;
# 2842 : 8 : }
# 2843 [ + - ][ + - ]: 8 : if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
# 2844 : 8 : setBlockIndexCandidates.insert(pindex);
# 2845 : 8 : PruneBlockIndexCandidates();
# 2846 : 8 : }
# 2847 : 8 : }
# 2848 : :
# 2849 : 8 : return ActivateBestChain(state, params, std::shared_ptr<const CBlock>());
# 2850 : 8 : }
# 2851 : :
# 2852 : : bool CChainState::InvalidateBlock(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex)
# 2853 : 79 : {
# 2854 : : // Genesis block can't be invalidated
# 2855 : 79 : assert(pindex);
# 2856 [ - + ]: 79 : if (pindex->nHeight == 0) return false;
# 2857 : :
# 2858 : 79 : CBlockIndex* to_mark_failed = pindex;
# 2859 : 79 : bool pindex_was_in_chain = false;
# 2860 : 79 : int disconnected = 0;
# 2861 : :
# 2862 : : // We do not allow ActivateBestChain() to run while InvalidateBlock() is
# 2863 : : // running, as that could cause the tip to change while we disconnect
# 2864 : : // blocks.
# 2865 : 79 : LOCK(m_cs_chainstate);
# 2866 : :
# 2867 : : // We'll be acquiring and releasing cs_main below, to allow the validation
# 2868 : : // callbacks to run. However, we should keep the block index in a
# 2869 : : // consistent state as we disconnect blocks -- in particular we need to
# 2870 : : // add equal-work blocks to setBlockIndexCandidates as we disconnect.
# 2871 : : // To avoid walking the block index repeatedly in search of candidates,
# 2872 : : // build a map once so that we can look up candidate blocks by chain
# 2873 : : // work as we go.
# 2874 : 79 : std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
# 2875 : :
# 2876 : 79 : {
# 2877 : 79 : LOCK(cs_main);
# 2878 [ + + ]: 16171 : for (const auto& entry : m_blockman.m_block_index) {
# 2879 : 16171 : CBlockIndex *candidate = entry.second;
# 2880 : : // We don't need to put anything in our active chain into the
# 2881 : : // multimap, because those candidates will be found and considered
# 2882 : : // as we disconnect.
# 2883 : : // Instead, consider only non-active-chain blocks that have at
# 2884 : : // least as much work as where we expect the new tip to end up.
# 2885 [ + + ][ + + ]: 16171 : if (!m_chain.Contains(candidate) &&
# 2886 [ + + ]: 16171 : !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
# 2887 [ + + ]: 16171 : candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
# 2888 [ + - ]: 16171 : candidate->HaveTxsDownloaded()) {
# 2889 : 5 : candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
# 2890 : 5 : }
# 2891 : 16171 : }
# 2892 : 79 : }
# 2893 : :
# 2894 : : // Disconnect (descendants of) pindex, and mark them invalid.
# 2895 : 632 : while (true) {
# 2896 [ - + ]: 632 : if (ShutdownRequested()) break;
# 2897 : :
# 2898 : : // Make sure the queue of validation callbacks doesn't grow unboundedly.
# 2899 : 632 : LimitValidationInterfaceQueue();
# 2900 : :
# 2901 : 632 : LOCK(cs_main);
# 2902 : 632 : LOCK(m_mempool.cs); // Lock for as long as disconnectpool is in scope to make sure UpdateMempoolForReorg is called after DisconnectTip without unlocking in between
# 2903 [ + + ]: 632 : if (!m_chain.Contains(pindex)) break;
# 2904 : 553 : pindex_was_in_chain = true;
# 2905 : 553 : CBlockIndex *invalid_walk_tip = m_chain.Tip();
# 2906 : :
# 2907 : : // ActivateBestChain considers blocks already in m_chain
# 2908 : : // unconditionally valid already, so force disconnect away from it.
# 2909 : 553 : DisconnectedBlockTransactions disconnectpool;
# 2910 : 553 : bool ret = DisconnectTip(state, chainparams, &disconnectpool);
# 2911 : : // DisconnectTip will add transactions to disconnectpool.
# 2912 : : // Adjust the mempool to be consistent with the new tip, adding
# 2913 : : // transactions back to the mempool if disconnecting was successful,
# 2914 : : // and we're not doing a very deep invalidation (in which case
# 2915 : : // keeping the mempool up to date is probably futile anyway).
# 2916 : 553 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 2917 [ + + ][ + - ]: 553 : UpdateMempoolForReorg(*this, m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
# 2918 [ - + ]: 553 : if (!ret) return false;
# 2919 : 553 : assert(invalid_walk_tip->pprev == m_chain.Tip());
# 2920 : :
# 2921 : : // We immediately mark the disconnected blocks as invalid.
# 2922 : : // This prevents a case where pruned nodes may fail to invalidateblock
# 2923 : : // and be left unable to start as they have no tip candidates (as there
# 2924 : : // are no blocks that meet the "have data and are not invalid per
# 2925 : : // nStatus" criteria for inclusion in setBlockIndexCandidates).
# 2926 : 553 : invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
# 2927 : 553 : setDirtyBlockIndex.insert(invalid_walk_tip);
# 2928 : 553 : setBlockIndexCandidates.erase(invalid_walk_tip);
# 2929 : 553 : setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
# 2930 [ + + ][ - + ]: 553 : if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
# 2931 : : // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
# 2932 : : // need to be BLOCK_FAILED_CHILD instead.
# 2933 : 0 : to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
# 2934 : 0 : setDirtyBlockIndex.insert(to_mark_failed);
# 2935 : 0 : }
# 2936 : :
# 2937 : : // Add any equal or more work headers to setBlockIndexCandidates
# 2938 : 553 : auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
# 2939 [ + + ]: 558 : while (candidate_it != candidate_blocks_by_work.end()) {
# 2940 [ + - ]: 5 : if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
# 2941 : 5 : setBlockIndexCandidates.insert(candidate_it->second);
# 2942 : 5 : candidate_it = candidate_blocks_by_work.erase(candidate_it);
# 2943 : 5 : } else {
# 2944 : 0 : ++candidate_it;
# 2945 : 0 : }
# 2946 : 5 : }
# 2947 : :
# 2948 : : // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
# 2949 : : // iterations, or, if it's the last one, call InvalidChainFound on it.
# 2950 : 553 : to_mark_failed = invalid_walk_tip;
# 2951 : 553 : }
# 2952 : :
# 2953 : 79 : CheckBlockIndex(chainparams.GetConsensus());
# 2954 : :
# 2955 : 79 : {
# 2956 : 79 : LOCK(cs_main);
# 2957 [ - + ]: 79 : if (m_chain.Contains(to_mark_failed)) {
# 2958 : : // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
# 2959 : 0 : return false;
# 2960 : 0 : }
# 2961 : :
# 2962 : : // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
# 2963 : 79 : to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
# 2964 : 79 : setDirtyBlockIndex.insert(to_mark_failed);
# 2965 : 79 : setBlockIndexCandidates.erase(to_mark_failed);
# 2966 : 79 : m_blockman.m_failed_blocks.insert(to_mark_failed);
# 2967 : :
# 2968 : : // If any new blocks somehow arrived while we were disconnecting
# 2969 : : // (above), then the pre-calculation of what should go into
# 2970 : : // setBlockIndexCandidates may have missed entries. This would
# 2971 : : // technically be an inconsistency in the block index, but if we clean
# 2972 : : // it up here, this should be an essentially unobservable error.
# 2973 : : // Loop back over all block index entries and add any missing entries
# 2974 : : // to setBlockIndexCandidates.
# 2975 : 79 : BlockMap::iterator it = m_blockman.m_block_index.begin();
# 2976 [ + + ]: 16250 : while (it != m_blockman.m_block_index.end()) {
# 2977 [ + + ][ + + ]: 16171 : if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
# [ + - ][ + + ]
# 2978 : 84 : setBlockIndexCandidates.insert(it->second);
# 2979 : 84 : }
# 2980 : 16171 : it++;
# 2981 : 16171 : }
# 2982 : :
# 2983 : 79 : InvalidChainFound(to_mark_failed);
# 2984 : 79 : }
# 2985 : :
# 2986 : : // Only notify about a new block tip if the active chain was modified.
# 2987 [ + + ]: 79 : if (pindex_was_in_chain) {
# 2988 : 78 : uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
# 2989 : 78 : }
# 2990 : 79 : return true;
# 2991 : 79 : }
# 2992 : :
# 2993 : 9 : void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
# 2994 : 9 : AssertLockHeld(cs_main);
# 2995 : :
# 2996 : 9 : int nHeight = pindex->nHeight;
# 2997 : :
# 2998 : : // Remove the invalidity flag from this block and all its descendants.
# 2999 : 9 : BlockMap::iterator it = m_blockman.m_block_index.begin();
# 3000 [ + + ]: 1210 : while (it != m_blockman.m_block_index.end()) {
# 3001 [ + + ][ + + ]: 1201 : if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
# 3002 : 229 : it->second->nStatus &= ~BLOCK_FAILED_MASK;
# 3003 : 229 : setDirtyBlockIndex.insert(it->second);
# 3004 [ + - ][ + - ]: 229 : if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) {
# [ + - ][ + - ]
# 3005 : 229 : setBlockIndexCandidates.insert(it->second);
# 3006 : 229 : }
# 3007 [ + + ]: 229 : if (it->second == pindexBestInvalid) {
# 3008 : : // Reset invalid block marker if it was pointing to one of those.
# 3009 : 8 : pindexBestInvalid = nullptr;
# 3010 : 8 : }
# 3011 : 229 : m_blockman.m_failed_blocks.erase(it->second);
# 3012 : 229 : }
# 3013 : 1201 : it++;
# 3014 : 1201 : }
# 3015 : :
# 3016 : : // Remove the invalidity flag from all ancestors too.
# 3017 [ + + ]: 975 : while (pindex != nullptr) {
# 3018 [ + + ]: 966 : if (pindex->nStatus & BLOCK_FAILED_MASK) {
# 3019 : 1 : pindex->nStatus &= ~BLOCK_FAILED_MASK;
# 3020 : 1 : setDirtyBlockIndex.insert(pindex);
# 3021 : 1 : m_blockman.m_failed_blocks.erase(pindex);
# 3022 : 1 : }
# 3023 : 966 : pindex = pindex->pprev;
# 3024 : 966 : }
# 3025 : 9 : }
# 3026 : :
# 3027 : : CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
# 3028 : 72312 : {
# 3029 : 72312 : AssertLockHeld(cs_main);
# 3030 : :
# 3031 : : // Check for duplicate
# 3032 : 72312 : uint256 hash = block.GetHash();
# 3033 : 72312 : BlockMap::iterator it = m_block_index.find(hash);
# 3034 [ + + ]: 72312 : if (it != m_block_index.end())
# 3035 : 3 : return it->second;
# 3036 : :
# 3037 : : // Construct new block index object
# 3038 : 72309 : CBlockIndex* pindexNew = new CBlockIndex(block);
# 3039 : : // We assign the sequence id to blocks only when the full data is available,
# 3040 : : // to avoid miners withholding blocks but broadcasting headers, to get a
# 3041 : : // competitive advantage.
# 3042 : 72309 : pindexNew->nSequenceId = 0;
# 3043 : 72309 : BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
# 3044 : 72309 : pindexNew->phashBlock = &((*mi).first);
# 3045 : 72309 : BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
# 3046 [ + + ]: 72309 : if (miPrev != m_block_index.end())
# 3047 : 71906 : {
# 3048 : 71906 : pindexNew->pprev = (*miPrev).second;
# 3049 : 71906 : pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
# 3050 : 71906 : pindexNew->BuildSkip();
# 3051 : 71906 : }
# 3052 [ + + ]: 72309 : pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
# 3053 [ + + ]: 72309 : pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
# 3054 : 72309 : pindexNew->RaiseValidity(BLOCK_VALID_TREE);
# 3055 [ + + ][ + + ]: 72309 : if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
# 3056 : 69260 : pindexBestHeader = pindexNew;
# 3057 : :
# 3058 : 72309 : setDirtyBlockIndex.insert(pindexNew);
# 3059 : :
# 3060 : 72309 : return pindexNew;
# 3061 : 72309 : }
# 3062 : :
# 3063 : : /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
# 3064 : : void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams)
# 3065 : 69448 : {
# 3066 : 69448 : pindexNew->nTx = block.vtx.size();
# 3067 : 69448 : pindexNew->nChainTx = 0;
# 3068 : 69448 : pindexNew->nFile = pos.nFile;
# 3069 : 69448 : pindexNew->nDataPos = pos.nPos;
# 3070 : 69448 : pindexNew->nUndoPos = 0;
# 3071 : 69448 : pindexNew->nStatus |= BLOCK_HAVE_DATA;
# 3072 [ + + ]: 69448 : if (IsWitnessEnabled(pindexNew->pprev, consensusParams)) {
# 3073 : 66772 : pindexNew->nStatus |= BLOCK_OPT_WITNESS;
# 3074 : 66772 : }
# 3075 : 69448 : pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
# 3076 : 69448 : setDirtyBlockIndex.insert(pindexNew);
# 3077 : :
# 3078 [ + + ][ + + ]: 69448 : if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
# 3079 : : // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
# 3080 : 68853 : std::deque<CBlockIndex*> queue;
# 3081 : 68853 : queue.push_back(pindexNew);
# 3082 : :
# 3083 : : // Recursively process any descendant blocks that now may be eligible to be connected.
# 3084 [ + + ]: 138298 : while (!queue.empty()) {
# 3085 : 69445 : CBlockIndex *pindex = queue.front();
# 3086 : 69445 : queue.pop_front();
# 3087 [ + + ]: 69445 : pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
# 3088 : 69445 : {
# 3089 : 69445 : LOCK(cs_nBlockSequenceId);
# 3090 : 69445 : pindex->nSequenceId = nBlockSequenceId++;
# 3091 : 69445 : }
# 3092 [ + + ][ + + ]: 69445 : if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
# [ + + ]
# 3093 : 66552 : setBlockIndexCandidates.insert(pindex);
# 3094 : 66552 : }
# 3095 : 69445 : std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
# 3096 [ + + ]: 70037 : while (range.first != range.second) {
# 3097 : 592 : std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
# 3098 : 592 : queue.push_back(it->second);
# 3099 : 592 : range.first++;
# 3100 : 592 : m_blockman.m_blocks_unlinked.erase(it);
# 3101 : 592 : }
# 3102 : 69445 : }
# 3103 : 68853 : } else {
# 3104 [ + - ][ + - ]: 595 : if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
# 3105 : 595 : m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
# 3106 : 595 : }
# 3107 : 595 : }
# 3108 : 69448 : }
# 3109 : :
# 3110 : : static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
# 3111 : 212079 : {
# 3112 : : // Check proof of work matches claimed amount
# 3113 [ + + ][ + + ]: 212079 : if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
# 3114 : 1 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
# 3115 : :
# 3116 : 212078 : return true;
# 3117 : 212078 : }
# 3118 : :
# 3119 : : bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
# 3120 : 298904 : {
# 3121 : : // These are checks that are independent of context.
# 3122 : :
# 3123 [ + + ]: 298904 : if (block.fChecked)
# 3124 : 158752 : return true;
# 3125 : :
# 3126 : : // Check that the header is valid (particularly PoW). This is mostly
# 3127 : : // redundant with the call in AcceptBlockHeader.
# 3128 [ + + ]: 140152 : if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
# 3129 : 1 : return false;
# 3130 : :
# 3131 : : // Signet only: check block solution
# 3132 [ + + ][ + + ]: 140151 : if (consensusParams.signet_blocks && fCheckPOW && !CheckSignetBlockSolution(block, consensusParams)) {
# [ + + ]
# 3133 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure");
# 3134 : 1 : }
# 3135 : :
# 3136 : : // Check the merkle root.
# 3137 [ + + ]: 140150 : if (fCheckMerkleRoot) {
# 3138 : 80210 : bool mutated;
# 3139 : 80210 : uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
# 3140 [ + + ]: 80210 : if (block.hashMerkleRoot != hashMerkleRoot2)
# 3141 : 13 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
# 3142 : :
# 3143 : : // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
# 3144 : : // of transactions in a block without affecting the merkle root of a block,
# 3145 : : // while still invalidating it.
# 3146 [ + + ]: 80197 : if (mutated)
# 3147 : 128 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
# 3148 : 140009 : }
# 3149 : :
# 3150 : : // All potential-corruption validation must be done before we do any
# 3151 : : // transaction validation, as otherwise we may mark the header as invalid
# 3152 : : // because we receive the wrong transactions for it.
# 3153 : : // Note that witness malleability is checked in ContextualCheckBlock, so no
# 3154 : : // checks that use witness data may be performed here.
# 3155 : :
# 3156 : : // Size limits
# 3157 [ + + ][ - + ]: 140009 : if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
# [ + + ]
# 3158 : 2 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
# 3159 : :
# 3160 : : // First transaction must be coinbase, the rest must not be
# 3161 [ - + ][ + + ]: 140007 : if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
# 3162 : 2 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
# 3163 [ + + ]: 225793 : for (unsigned int i = 1; i < block.vtx.size(); i++)
# 3164 [ + + ]: 85791 : if (block.vtx[i]->IsCoinBase())
# 3165 : 3 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
# 3166 : :
# 3167 : : // Check transactions
# 3168 : : // Must check for duplicate inputs (see CVE-2018-17144)
# 3169 [ + + ]: 225788 : for (const auto& tx : block.vtx) {
# 3170 : 225788 : TxValidationState tx_state;
# 3171 [ + + ]: 225788 : if (!CheckTransaction(*tx, tx_state)) {
# 3172 : : // CheckBlock() does context-free validation checks. The only
# 3173 : : // possible failures are consensus failures.
# 3174 : 257 : assert(tx_state.GetResult() == TxValidationResult::TX_CONSENSUS);
# 3175 : 257 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(),
# 3176 : 257 : strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
# 3177 : 257 : }
# 3178 : 225788 : }
# 3179 : 140002 : unsigned int nSigOps = 0;
# 3180 [ + + ]: 139745 : for (const auto& tx : block.vtx)
# 3181 : 225150 : {
# 3182 : 225150 : nSigOps += GetLegacySigOpCount(*tx);
# 3183 : 225150 : }
# 3184 [ + + ]: 139745 : if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
# 3185 : 9 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
# 3186 : :
# 3187 [ + + ][ + - ]: 139736 : if (fCheckPOW && fCheckMerkleRoot)
# 3188 : 79797 : block.fChecked = true;
# 3189 : :
# 3190 : 139736 : return true;
# 3191 : 139736 : }
# 3192 : :
# 3193 : : bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params)
# 3194 : 362254 : {
# 3195 [ + + ]: 362254 : int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
# 3196 : 362254 : return (height >= params.SegwitHeight);
# 3197 : 362254 : }
# 3198 : :
# 3199 : : void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
# 3200 : 38895 : {
# 3201 : 38895 : int commitpos = GetWitnessCommitmentIndex(block);
# 3202 : 38895 : static const std::vector<unsigned char> nonce(32, 0x00);
# 3203 [ + + ][ + + ]: 38895 : if (commitpos != NO_WITNESS_COMMITMENT && IsWitnessEnabled(pindexPrev, consensusParams) && !block.vtx[0]->HasWitness()) {
# [ + + ]
# 3204 : 29994 : CMutableTransaction tx(*block.vtx[0]);
# 3205 : 29994 : tx.vin[0].scriptWitness.stack.resize(1);
# 3206 : 29994 : tx.vin[0].scriptWitness.stack[0] = nonce;
# 3207 : 29994 : block.vtx[0] = MakeTransactionRef(std::move(tx));
# 3208 : 29994 : }
# 3209 : 38895 : }
# 3210 : :
# 3211 : : std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
# 3212 : 34789 : {
# 3213 : 34789 : std::vector<unsigned char> commitment;
# 3214 : 34789 : int commitpos = GetWitnessCommitmentIndex(block);
# 3215 : 34789 : std::vector<unsigned char> ret(32, 0x00);
# 3216 [ + + ]: 34789 : if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) {
# 3217 [ + - ]: 34788 : if (commitpos == NO_WITNESS_COMMITMENT) {
# 3218 : 34788 : uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
# 3219 : 34788 : CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
# 3220 : 34788 : CTxOut out;
# 3221 : 34788 : out.nValue = 0;
# 3222 : 34788 : out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT);
# 3223 : 34788 : out.scriptPubKey[0] = OP_RETURN;
# 3224 : 34788 : out.scriptPubKey[1] = 0x24;
# 3225 : 34788 : out.scriptPubKey[2] = 0xaa;
# 3226 : 34788 : out.scriptPubKey[3] = 0x21;
# 3227 : 34788 : out.scriptPubKey[4] = 0xa9;
# 3228 : 34788 : out.scriptPubKey[5] = 0xed;
# 3229 : 34788 : memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
# 3230 : 34788 : commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
# 3231 : 34788 : CMutableTransaction tx(*block.vtx[0]);
# 3232 : 34788 : tx.vout.push_back(out);
# 3233 : 34788 : block.vtx[0] = MakeTransactionRef(std::move(tx));
# 3234 : 34788 : }
# 3235 : 34788 : }
# 3236 : 34789 : UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
# 3237 : 34789 : return commitment;
# 3238 : 34789 : }
# 3239 : :
# 3240 : : CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
# 3241 : 101638 : {
# 3242 : 101638 : const MapCheckpoints& checkpoints = data.mapCheckpoints;
# 3243 : :
# 3244 [ + + ]: 101638 : for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
# 3245 : 101625 : {
# 3246 : 101625 : const uint256& hash = i.second;
# 3247 : 101625 : assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this));
# 3248 : 101625 : CBlockIndex* pindex = LookupBlockIndex(hash);
# 3249 [ + + ]: 101625 : if (pindex) {
# 3250 : 101077 : return pindex;
# 3251 : 101077 : }
# 3252 : 101625 : }
# 3253 : 101638 : return nullptr;
# 3254 : 101638 : }
# 3255 : :
# 3256 : : /** Context-dependent validity checks.
# 3257 : : * By "context", we mean only the previous block headers, but not the UTXO
# 3258 : : * set; UTXO-related validity checks are done in ConnectBlock().
# 3259 : : * NOTE: This function is not currently invoked by ConnectBlock(), so we
# 3260 : : * should consider upgrade issues if we change which consensus rules are
# 3261 : : * enforced in this function (eg by adding a new consensus rule). See comment
# 3262 : : * in ConnectBlock().
# 3263 : : * Note that -reindex-chainstate skips the validation that happens here!
# 3264 : : */
# 3265 : : static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
# 3266 : 101900 : {
# 3267 : 101900 : assert(pindexPrev != nullptr);
# 3268 : 101900 : const int nHeight = pindexPrev->nHeight + 1;
# 3269 : :
# 3270 : : // Check proof of work
# 3271 : 101900 : const Consensus::Params& consensusParams = params.GetConsensus();
# 3272 [ + + ]: 101900 : if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
# 3273 : 2 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
# 3274 : :
# 3275 : : // Check against checkpoints
# 3276 [ + + ]: 101898 : if (fCheckpointsEnabled) {
# 3277 : : // Don't accept any forks from the main chain prior to last checkpoint.
# 3278 : : // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
# 3279 : : // BlockIndex().
# 3280 : 101638 : assert(std::addressof(g_chainman.m_blockman) == std::addressof(blockman));
# 3281 : 101638 : CBlockIndex* pcheckpoint = blockman.GetLastCheckpoint(params.Checkpoints());
# 3282 [ + + ][ + + ]: 101638 : if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
# 3283 : 1 : LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
# 3284 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
# 3285 : 1 : }
# 3286 : 101897 : }
# 3287 : :
# 3288 : : // Check timestamp against prev
# 3289 [ + + ]: 101897 : if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
# 3290 : 6 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
# 3291 : :
# 3292 : : // Check timestamp
# 3293 [ + + ]: 101891 : if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
# 3294 : 4 : return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
# 3295 : :
# 3296 : : // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
# 3297 : : // check for version 2, 3 and 4 upgrades
# 3298 [ + + ][ + + ]: 101887 : if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
# 3299 [ + + ][ + + ]: 101887 : (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
# 3300 [ + + ][ + + ]: 101887 : (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
# 3301 : 3 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
# 3302 : 3 : strprintf("rejected nVersion=0x%08x block", block.nVersion));
# 3303 : :
# 3304 : 101884 : return true;
# 3305 : 101884 : }
# 3306 : :
# 3307 : : /** NOTE: This function is not currently invoked by ConnectBlock(), so we
# 3308 : : * should consider upgrade issues if we change which consensus rules are
# 3309 : : * enforced in this function (eg by adding a new consensus rule). See comment
# 3310 : : * in ConnectBlock().
# 3311 : : * Note that -reindex-chainstate skips the validation that happens here!
# 3312 : : */
# 3313 : : static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
# 3314 : 99185 : {
# 3315 [ + + ]: 99185 : const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
# 3316 : :
# 3317 : : // Start enforcing BIP113 (Median Time Past).
# 3318 : 99185 : int nLockTimeFlags = 0;
# 3319 [ + + ]: 99185 : if (nHeight >= consensusParams.CSVHeight) {
# 3320 : 30810 : assert(pindexPrev != nullptr);
# 3321 : 30810 : nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
# 3322 : 30810 : }
# 3323 : :
# 3324 [ + + ]: 99185 : int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
# 3325 : 99185 : ? pindexPrev->GetMedianTimePast()
# 3326 : 99185 : : block.GetBlockTime();
# 3327 : :
# 3328 : : // Check that all transactions are finalized
# 3329 [ + + ]: 166364 : for (const auto& tx : block.vtx) {
# 3330 [ + + ]: 166364 : if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
# 3331 : 7 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
# 3332 : 7 : }
# 3333 : 166364 : }
# 3334 : :
# 3335 : : // Enforce rule that the coinbase starts with serialized block height
# 3336 [ + + ]: 99185 : if (nHeight >= consensusParams.BIP34Height)
# 3337 : 28394 : {
# 3338 : 28394 : CScript expect = CScript() << nHeight;
# 3339 [ + + ]: 28394 : if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
# 3340 [ - + ]: 28394 : !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
# 3341 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
# 3342 : 1 : }
# 3343 : 99177 : }
# 3344 : :
# 3345 : : // Validation for witness commitments.
# 3346 : : // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
# 3347 : : // coinbase (where 0x0000....0000 is used instead).
# 3348 : : // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
# 3349 : : // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
# 3350 : : // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
# 3351 : : // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
# 3352 : : // multiple, the last one is used.
# 3353 : 99177 : bool fHaveWitness = false;
# 3354 [ + + ]: 99177 : if (nHeight >= consensusParams.SegwitHeight) {
# 3355 : 95828 : int commitpos = GetWitnessCommitmentIndex(block);
# 3356 [ + + ]: 95828 : if (commitpos != NO_WITNESS_COMMITMENT) {
# 3357 : 89387 : bool malleated = false;
# 3358 : 89387 : uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
# 3359 : : // The malleation check is ignored; as the transaction tree itself
# 3360 : : // already does not permit it, it is impossible to trigger in the
# 3361 : : // witness tree.
# 3362 [ + + ][ - + ]: 89387 : if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
# 3363 : 145 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
# 3364 : 145 : }
# 3365 : 89242 : CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
# 3366 [ + + ]: 89242 : if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
# 3367 : 3 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
# 3368 : 3 : }
# 3369 : 89239 : fHaveWitness = true;
# 3370 : 89239 : }
# 3371 : 95828 : }
# 3372 : :
# 3373 : : // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
# 3374 [ + + ]: 99177 : if (!fHaveWitness) {
# 3375 [ + + ]: 33071 : for (const auto& tx : block.vtx) {
# 3376 [ + + ]: 33071 : if (tx->HasWitness()) {
# 3377 : 5 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
# 3378 : 5 : }
# 3379 : 33071 : }
# 3380 : 9790 : }
# 3381 : :
# 3382 : : // After the coinbase witness reserved value and commitment are verified,
# 3383 : : // we can check if the block weight passes (before we've checked the
# 3384 : : // coinbase witness, it would be possible for the weight to be too
# 3385 : : // large by filling up the coinbase witness, which doesn't change
# 3386 : : // the block hash, so we couldn't mark the block as permanently
# 3387 : : // failed).
# 3388 [ + + ]: 99029 : if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
# 3389 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
# 3390 : 1 : }
# 3391 : :
# 3392 : 99023 : return true;
# 3393 : 99023 : }
# 3394 : :
# 3395 : : bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
# 3396 : 126374 : {
# 3397 : 126374 : AssertLockHeld(cs_main);
# 3398 : : // Check for duplicate
# 3399 : 126374 : uint256 hash = block.GetHash();
# 3400 : 126374 : BlockMap::iterator miSelf = m_block_index.find(hash);
# 3401 [ + + ]: 126374 : if (hash != chainparams.GetConsensus().hashGenesisBlock) {
# 3402 [ + + ]: 126362 : if (miSelf != m_block_index.end()) {
# 3403 : : // Block header is already known.
# 3404 : 54435 : CBlockIndex* pindex = miSelf->second;
# 3405 [ + - ]: 54435 : if (ppindex)
# 3406 : 54435 : *ppindex = pindex;
# 3407 [ + + ]: 54435 : if (pindex->nStatus & BLOCK_FAILED_MASK) {
# 3408 : 524 : LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__, hash.ToString());
# 3409 : 524 : return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
# 3410 : 524 : }
# 3411 : 53911 : return true;
# 3412 : 53911 : }
# 3413 : :
# 3414 [ - + ]: 71927 : if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) {
# 3415 [ # # ]: 0 : LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
# 3416 : 0 : return false;
# 3417 : 0 : }
# 3418 : :
# 3419 : : // Get prev block index
# 3420 : 71927 : CBlockIndex* pindexPrev = nullptr;
# 3421 : 71927 : BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
# 3422 [ + + ]: 71927 : if (mi == m_block_index.end()) {
# 3423 : 3 : LogPrintf("ERROR: %s: prev block not found\n", __func__);
# 3424 : 3 : return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
# 3425 : 3 : }
# 3426 : 71924 : pindexPrev = (*mi).second;
# 3427 [ + + ]: 71924 : if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
# 3428 : 4 : LogPrintf("ERROR: %s: prev block invalid\n", __func__);
# 3429 : 4 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
# 3430 : 4 : }
# 3431 [ + + ]: 71920 : if (!ContextualCheckBlockHeader(block, state, *this, chainparams, pindexPrev, GetAdjustedTime()))
# 3432 : 13 : return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
# 3433 : :
# 3434 : : /* Determine if this block descends from any block which has been found
# 3435 : : * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
# 3436 : : * them as failed. For example:
# 3437 : : *
# 3438 : : * D3
# 3439 : : * /
# 3440 : : * B2 - C2
# 3441 : : * / \
# 3442 : : * A D2 - E2 - F2
# 3443 : : * \
# 3444 : : * B1 - C1 - D1 - E1
# 3445 : : *
# 3446 : : * In the case that we attempted to reorg from E1 to F2, only to find
# 3447 : : * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
# 3448 : : * but NOT D3 (it was not in any of our candidate sets at the time).
# 3449 : : *
# 3450 : : * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
# 3451 : : * in LoadBlockIndex.
# 3452 : : */
# 3453 [ + + ]: 71907 : if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
# 3454 : : // The above does not mean "invalid": it checks if the previous block
# 3455 : : // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
# 3456 : : // optimization, in the common case of adding a new block to the tip,
# 3457 : : // we don't need to iterate over the failed blocks list.
# 3458 [ + + ]: 117313 : for (const CBlockIndex* failedit : m_failed_blocks) {
# 3459 [ + + ]: 117313 : if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
# 3460 : 1 : assert(failedit->nStatus & BLOCK_FAILED_VALID);
# 3461 : 1 : CBlockIndex* invalid_walk = pindexPrev;
# 3462 [ + + ]: 2 : while (invalid_walk != failedit) {
# 3463 : 1 : invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
# 3464 : 1 : setDirtyBlockIndex.insert(invalid_walk);
# 3465 : 1 : invalid_walk = invalid_walk->pprev;
# 3466 : 1 : }
# 3467 : 1 : LogPrintf("ERROR: %s: prev block invalid\n", __func__);
# 3468 : 1 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
# 3469 : 1 : }
# 3470 : 117313 : }
# 3471 : 20992 : }
# 3472 : 71907 : }
# 3473 : 126374 : CBlockIndex* pindex = AddToBlockIndex(block);
# 3474 : :
# 3475 [ + - ]: 71918 : if (ppindex)
# 3476 : 71918 : *ppindex = pindex;
# 3477 : :
# 3478 : 71918 : return true;
# 3479 : 126374 : }
# 3480 : :
# 3481 : : // Exposed wrapper for AcceptBlockHeader
# 3482 : : bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
# 3483 : 29029 : {
# 3484 : 29029 : assert(std::addressof(::ChainstateActive()) == std::addressof(ActiveChainstate()));
# 3485 : 29029 : AssertLockNotHeld(cs_main);
# 3486 : 29029 : {
# 3487 : 29029 : LOCK(cs_main);
# 3488 [ + + ]: 45254 : for (const CBlockHeader& header : headers) {
# 3489 : 45254 : CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
# 3490 : 45254 : bool accepted = m_blockman.AcceptBlockHeader(
# 3491 : 45254 : header, state, chainparams, &pindex);
# 3492 : 45254 : ActiveChainstate().CheckBlockIndex(chainparams.GetConsensus());
# 3493 : :
# 3494 [ + + ]: 45254 : if (!accepted) {
# 3495 : 28 : return false;
# 3496 : 28 : }
# 3497 [ + + ]: 45226 : if (ppindex) {
# 3498 : 45136 : *ppindex = pindex;
# 3499 : 45136 : }
# 3500 : 45226 : }
# 3501 : 29029 : }
# 3502 [ + + ]: 29029 : if (NotifyHeaderTip(ActiveChainstate())) {
# 3503 [ + + ][ + + ]: 21081 : if (ActiveChainstate().IsInitialBlockDownload() && ppindex && *ppindex) {
# [ + - ]
# 3504 : 407 : LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight);
# 3505 : 407 : }
# 3506 : 21081 : }
# 3507 : 29001 : return true;
# 3508 : 29029 : }
# 3509 : :
# 3510 : : /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
# 3511 : : bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
# 3512 : 81120 : {
# 3513 : 81120 : const CBlock& block = *pblock;
# 3514 : :
# 3515 [ + + ]: 81120 : if (fNewBlock) *fNewBlock = false;
# 3516 : 81120 : AssertLockHeld(cs_main);
# 3517 : :
# 3518 : 81120 : CBlockIndex *pindexDummy = nullptr;
# 3519 [ + + ]: 81120 : CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
# 3520 : :
# 3521 : 81120 : bool accepted_header = m_blockman.AcceptBlockHeader(block, state, chainparams, &pindex);
# 3522 : 81120 : CheckBlockIndex(chainparams.GetConsensus());
# 3523 : :
# 3524 [ + + ]: 81120 : if (!accepted_header)
# 3525 : 517 : return false;
# 3526 : :
# 3527 : : // Try to process all requested blocks that we don't have, but only
# 3528 : : // process an unrequested block if it's new and has enough work to
# 3529 : : // advance our tip, and isn't too many blocks ahead.
# 3530 : 80603 : bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
# 3531 [ + + ]: 80603 : bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
# 3532 : : // Blocks that are too out-of-order needlessly limit the effectiveness of
# 3533 : : // pruning, because pruning will not delete block files that contain any
# 3534 : : // blocks which are too close in height to the tip. Apply this test
# 3535 : : // regardless of whether pruning is enabled; it should generally be safe to
# 3536 : : // not process unrequested blocks.
# 3537 : 80603 : bool fTooFarAhead = (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
# 3538 : :
# 3539 : : // TODO: Decouple this function from the block download logic by removing fRequested
# 3540 : : // This requires some new chain data structure to efficiently look up if a
# 3541 : : // block is in a chain leading to a candidate for best tip, despite not
# 3542 : : // being such a candidate itself.
# 3543 : :
# 3544 : : // TODO: deal better with return value and error conditions for duplicate
# 3545 : : // and unrequested blocks.
# 3546 [ + + ]: 80603 : if (fAlreadyHave) return true;
# 3547 [ + + ]: 70309 : if (!fRequested) { // If we didn't ask for it:
# 3548 [ - + ]: 1702 : if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
# 3549 [ + + ]: 1702 : if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
# 3550 [ + + ]: 610 : if (fTooFarAhead) return true; // Block height is too high
# 3551 : :
# 3552 : : // Protect against DoS attacks from low-work chains.
# 3553 : : // If our tip is behind, a peer could try to send us
# 3554 : : // low-work blocks on a fake chain that we would never
# 3555 : : // request; don't process these.
# 3556 [ + + ]: 609 : if (pindex->nChainWork < nMinimumChainWork) return true;
# 3557 : 69215 : }
# 3558 : :
# 3559 [ - + ]: 69215 : if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
# 3560 [ + + ]: 69215 : !ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
# 3561 [ + - ][ + + ]: 161 : if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
# 3562 : 8 : pindex->nStatus |= BLOCK_FAILED_VALID;
# 3563 : 8 : setDirtyBlockIndex.insert(pindex);
# 3564 : 8 : }
# 3565 : 161 : return error("%s: %s", __func__, state.ToString());
# 3566 : 161 : }
# 3567 : :
# 3568 : : // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
# 3569 : : // (but if it does not build on our best tip, let the SendMessages loop relay it)
# 3570 [ + + ][ + + ]: 69054 : if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
# 3571 : 58957 : GetMainSignals().NewPoWValidBlock(pindex, pblock);
# 3572 : :
# 3573 : : // Write block to history file
# 3574 [ + + ]: 69054 : if (fNewBlock) *fNewBlock = true;
# 3575 : 69054 : assert(std::addressof(::ChainActive()) == std::addressof(m_chain));
# 3576 : 69054 : try {
# 3577 : 69054 : FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, m_chain, chainparams, dbp);
# 3578 [ - + ]: 69054 : if (blockPos.IsNull()) {
# 3579 : 0 : state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
# 3580 : 0 : return false;
# 3581 : 0 : }
# 3582 : 69054 : ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
# 3583 : 69054 : } catch (const std::runtime_error& e) {
# 3584 : 0 : return AbortNode(state, std::string("System error: ") + e.what());
# 3585 : 0 : }
# 3586 : :
# 3587 : 69054 : FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
# 3588 : :
# 3589 : 69054 : CheckBlockIndex(chainparams.GetConsensus());
# 3590 : :
# 3591 : 69054 : return true;
# 3592 : 69054 : }
# 3593 : :
# 3594 : : bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock)
# 3595 : 79826 : {
# 3596 : 79826 : AssertLockNotHeld(cs_main);
# 3597 : 79826 : assert(std::addressof(::ChainstateActive()) == std::addressof(ActiveChainstate()));
# 3598 : :
# 3599 : 79826 : {
# 3600 : 79826 : CBlockIndex *pindex = nullptr;
# 3601 [ + + ]: 79826 : if (fNewBlock) *fNewBlock = false;
# 3602 : 79826 : BlockValidationState state;
# 3603 : :
# 3604 : : // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
# 3605 : : // Therefore, the following critical section must include the CheckBlock() call as well.
# 3606 : 79826 : LOCK(cs_main);
# 3607 : :
# 3608 : : // Ensure that CheckBlock() passes before calling AcceptBlock, as
# 3609 : : // belt-and-suspenders.
# 3610 : 79826 : bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
# 3611 [ + + ]: 79826 : if (ret) {
# 3612 : : // Store to disk
# 3613 : 79422 : ret = ActiveChainstate().AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
# 3614 : 79422 : }
# 3615 [ + + ]: 79826 : if (!ret) {
# 3616 : 939 : GetMainSignals().BlockChecked(*pblock, state);
# 3617 : 939 : return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
# 3618 : 939 : }
# 3619 : 78887 : }
# 3620 : :
# 3621 : 78887 : NotifyHeaderTip(ActiveChainstate());
# 3622 : :
# 3623 : 78887 : BlockValidationState state; // Only used to report errors, not invalidity - ignore it
# 3624 [ + + ]: 78887 : if (!ActiveChainstate().ActivateBestChain(state, chainparams, pblock))
# 3625 : 1 : return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
# 3626 : :
# 3627 : 78886 : return true;
# 3628 : 78886 : }
# 3629 : :
# 3630 : : bool TestBlockValidity(BlockValidationState& state,
# 3631 : : const CChainParams& chainparams,
# 3632 : : CChainState& chainstate,
# 3633 : : const CBlock& block,
# 3634 : : CBlockIndex* pindexPrev,
# 3635 : : bool fCheckPOW,
# 3636 : : bool fCheckMerkleRoot)
# 3637 : 29980 : {
# 3638 : 29980 : AssertLockHeld(cs_main);
# 3639 : 29980 : assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate));
# 3640 : 29980 : assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
# 3641 : 29980 : CCoinsViewCache viewNew(&chainstate.CoinsTip());
# 3642 : 29980 : uint256 block_hash(block.GetHash());
# 3643 : 29980 : CBlockIndex indexDummy(block);
# 3644 : 29980 : indexDummy.pprev = pindexPrev;
# 3645 : 29980 : indexDummy.nHeight = pindexPrev->nHeight + 1;
# 3646 : 29980 : indexDummy.phashBlock = &block_hash;
# 3647 : :
# 3648 : : // NOTE: CheckBlockHeader is called by CheckBlock
# 3649 : 29980 : assert(std::addressof(g_chainman.m_blockman) == std::addressof(chainstate.m_blockman));
# 3650 [ + + ]: 29980 : if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainparams, pindexPrev, GetAdjustedTime()))
# 3651 : 3 : return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
# 3652 [ + + ]: 29977 : if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
# 3653 : 7 : return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
# 3654 [ + + ]: 29970 : if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
# 3655 : 1 : return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
# 3656 [ + + ]: 29969 : if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
# 3657 : 8 : return false;
# 3658 : 29961 : assert(state.IsValid());
# 3659 : :
# 3660 : 29961 : return true;
# 3661 : 29961 : }
# 3662 : :
# 3663 : : /**
# 3664 : : * BLOCK PRUNING CODE
# 3665 : : */
# 3666 : :
# 3667 : : void BlockManager::PruneOneBlockFile(const int fileNumber)
# 3668 : 7 : {
# 3669 : 7 : AssertLockHeld(cs_main);
# 3670 : 7 : LOCK(cs_LastBlockFile);
# 3671 : :
# 3672 [ + + ]: 6110 : for (const auto& entry : m_block_index) {
# 3673 : 6110 : CBlockIndex* pindex = entry.second;
# 3674 [ + + ]: 6110 : if (pindex->nFile == fileNumber) {
# 3675 : 1201 : pindex->nStatus &= ~BLOCK_HAVE_DATA;
# 3676 : 1201 : pindex->nStatus &= ~BLOCK_HAVE_UNDO;
# 3677 : 1201 : pindex->nFile = 0;
# 3678 : 1201 : pindex->nDataPos = 0;
# 3679 : 1201 : pindex->nUndoPos = 0;
# 3680 : 1201 : setDirtyBlockIndex.insert(pindex);
# 3681 : :
# 3682 : : // Prune from m_blocks_unlinked -- any block we prune would have
# 3683 : : // to be downloaded again in order to consider its chain, at which
# 3684 : : // point it would be considered as a candidate for
# 3685 : : // m_blocks_unlinked or setBlockIndexCandidates.
# 3686 : 1201 : auto range = m_blocks_unlinked.equal_range(pindex->pprev);
# 3687 [ - + ]: 1201 : while (range.first != range.second) {
# 3688 : 0 : std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
# 3689 : 0 : range.first++;
# 3690 [ # # ]: 0 : if (_it->second == pindex) {
# 3691 : 0 : m_blocks_unlinked.erase(_it);
# 3692 : 0 : }
# 3693 : 0 : }
# 3694 : 1201 : }
# 3695 : 6110 : }
# 3696 : :
# 3697 : 7 : vinfoBlockFile[fileNumber].SetNull();
# 3698 : 7 : setDirtyFileInfo.insert(fileNumber);
# 3699 : 7 : }
# 3700 : :
# 3701 : : void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
# 3702 : 2 : {
# 3703 : 2 : assert(fPruneMode && nManualPruneHeight > 0);
# 3704 : :
# 3705 : 2 : LOCK2(cs_main, cs_LastBlockFile);
# 3706 [ - + ]: 2 : if (chain_tip_height < 0) {
# 3707 : 0 : return;
# 3708 : 0 : }
# 3709 : :
# 3710 : : // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
# 3711 : 2 : unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
# 3712 : 2 : int count = 0;
# 3713 [ + + ]: 10 : for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
# 3714 [ + + ][ + + ]: 8 : if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
# 3715 : 4 : continue;
# 3716 : 4 : }
# 3717 : 4 : PruneOneBlockFile(fileNumber);
# 3718 : 4 : setFilesToPrune.insert(fileNumber);
# 3719 : 4 : count++;
# 3720 : 4 : }
# 3721 : 2 : LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
# 3722 : 2 : }
# 3723 : :
# 3724 : : /* This function is called from the RPC code for pruneblockchain */
# 3725 : : void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
# 3726 : 2 : {
# 3727 : 2 : BlockValidationState state;
# 3728 : 2 : const CChainParams& chainparams = Params();
# 3729 : 2 : assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
# 3730 [ - + ]: 2 : if (!active_chainstate.FlushStateToDisk(
# 3731 : 2 : chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
# 3732 : 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
# 3733 : 0 : }
# 3734 : 2 : }
# 3735 : :
# 3736 : : void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
# 3737 : 39 : {
# 3738 : 39 : LOCK2(cs_main, cs_LastBlockFile);
# 3739 [ + + ][ - + ]: 39 : if (chain_tip_height < 0 || nPruneTarget == 0) {
# 3740 : 2 : return;
# 3741 : 2 : }
# 3742 [ + + ]: 37 : if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
# 3743 : 5 : return;
# 3744 : 5 : }
# 3745 : :
# 3746 : 32 : unsigned int nLastBlockWeCanPrune = std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP));
# 3747 : 32 : uint64_t nCurrentUsage = CalculateCurrentUsage();
# 3748 : : // We don't check to prune until after we've allocated new space for files
# 3749 : : // So we should leave a buffer under our target to account for another allocation
# 3750 : : // before the next pruning.
# 3751 : 32 : uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
# 3752 : 32 : uint64_t nBytesToPrune;
# 3753 : 32 : int count = 0;
# 3754 : :
# 3755 [ - + ]: 32 : if (nCurrentUsage + nBuffer >= nPruneTarget) {
# 3756 : : // On a prune event, the chainstate DB is flushed.
# 3757 : : // To avoid excessive prune events negating the benefit of high dbcache
# 3758 : : // values, we should not prune too rapidly.
# 3759 : : // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
# 3760 [ # # ]: 0 : if (is_ibd) {
# 3761 : : // Since this is only relevant during IBD, we use a fixed 10%
# 3762 : 0 : nBuffer += nPruneTarget / 10;
# 3763 : 0 : }
# 3764 : :
# 3765 [ # # ]: 0 : for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
# 3766 : 0 : nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
# 3767 : :
# 3768 [ # # ]: 0 : if (vinfoBlockFile[fileNumber].nSize == 0) {
# 3769 : 0 : continue;
# 3770 : 0 : }
# 3771 : :
# 3772 [ # # ]: 0 : if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
# 3773 : 0 : break;
# 3774 : 0 : }
# 3775 : :
# 3776 : : // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
# 3777 [ # # ]: 0 : if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
# 3778 : 0 : continue;
# 3779 : 0 : }
# 3780 : :
# 3781 : 0 : PruneOneBlockFile(fileNumber);
# 3782 : : // Queue up the files for removal
# 3783 : 0 : setFilesToPrune.insert(fileNumber);
# 3784 : 0 : nCurrentUsage -= nBytesToPrune;
# 3785 : 0 : count++;
# 3786 : 0 : }
# 3787 : 0 : }
# 3788 : :
# 3789 [ + - ]: 32 : LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
# 3790 : 32 : nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
# 3791 : 32 : ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
# 3792 : 32 : nLastBlockWeCanPrune, count);
# 3793 : 32 : }
# 3794 : :
# 3795 : : CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
# 3796 : 124720 : {
# 3797 : 124720 : AssertLockHeld(cs_main);
# 3798 : :
# 3799 [ + + ]: 124720 : if (hash.IsNull())
# 3800 : 380 : return nullptr;
# 3801 : :
# 3802 : : // Return existing
# 3803 : 124340 : BlockMap::iterator mi = m_block_index.find(hash);
# 3804 [ + + ]: 124340 : if (mi != m_block_index.end())
# 3805 : 61980 : return (*mi).second;
# 3806 : :
# 3807 : : // Create new
# 3808 : 62360 : CBlockIndex* pindexNew = new CBlockIndex();
# 3809 : 62360 : mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
# 3810 : 62360 : pindexNew->phashBlock = &((*mi).first);
# 3811 : :
# 3812 : 62360 : return pindexNew;
# 3813 : 62360 : }
# 3814 : :
# 3815 : : bool BlockManager::LoadBlockIndex(
# 3816 : : const Consensus::Params& consensus_params,
# 3817 : : CBlockTreeDB& blocktree,
# 3818 : : std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
# 3819 : 616 : {
# 3820 [ - + ]: 124720 : if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }))
# 3821 : 0 : return false;
# 3822 : :
# 3823 : : // Calculate nChainWork
# 3824 : 616 : std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
# 3825 : 616 : vSortedByHeight.reserve(m_block_index.size());
# 3826 [ + + ]: 616 : for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
# 3827 : 62360 : {
# 3828 : 62360 : CBlockIndex* pindex = item.second;
# 3829 : 62360 : vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
# 3830 : 62360 : }
# 3831 : 616 : sort(vSortedByHeight.begin(), vSortedByHeight.end());
# 3832 [ + + ]: 616 : for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
# 3833 : 62360 : {
# 3834 [ - + ]: 62360 : if (ShutdownRequested()) return false;
# 3835 : 62360 : CBlockIndex* pindex = item.second;
# 3836 [ + + ]: 62360 : pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
# 3837 [ + + ]: 62360 : pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
# 3838 : : // We can link the chain of blocks for which we've received transactions at some point.
# 3839 : : // Pruned nodes may have deleted the block.
# 3840 [ + + ]: 62360 : if (pindex->nTx > 0) {
# 3841 [ + + ]: 61809 : if (pindex->pprev) {
# 3842 [ + - ]: 61429 : if (pindex->pprev->HaveTxsDownloaded()) {
# 3843 : 61429 : pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
# 3844 : 61429 : } else {
# 3845 : 0 : pindex->nChainTx = 0;
# 3846 : 0 : m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
# 3847 : 0 : }
# 3848 : 61429 : } else {
# 3849 : 380 : pindex->nChainTx = pindex->nTx;
# 3850 : 380 : }
# 3851 : 61809 : }
# 3852 [ + + ][ + + ]: 62360 : if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
# [ - + ]
# 3853 : 0 : pindex->nStatus |= BLOCK_FAILED_CHILD;
# 3854 : 0 : setDirtyBlockIndex.insert(pindex);
# 3855 : 0 : }
# 3856 [ + + ][ + - ]: 62360 : if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
# [ # # ]
# 3857 : 61799 : block_index_candidates.insert(pindex);
# 3858 : 61799 : }
# 3859 [ + + ][ + - ]: 62360 : if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
# [ # # ]
# 3860 : 10 : pindexBestInvalid = pindex;
# 3861 [ + + ]: 62360 : if (pindex->pprev)
# 3862 : 61980 : pindex->BuildSkip();
# 3863 [ + + ][ + + ]: 62360 : if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
# [ + + ][ + + ]
# 3864 : 62340 : pindexBestHeader = pindex;
# 3865 : 62360 : }
# 3866 : :
# 3867 : 616 : return true;
# 3868 : 616 : }
# 3869 : :
# 3870 : 787 : void BlockManager::Unload() {
# 3871 : 787 : m_failed_blocks.clear();
# 3872 : 787 : m_blocks_unlinked.clear();
# 3873 : :
# 3874 [ + + ]: 5236 : for (const BlockMap::value_type& entry : m_block_index) {
# 3875 : 5236 : delete entry.second;
# 3876 : 5236 : }
# 3877 : :
# 3878 : 787 : m_block_index.clear();
# 3879 : 787 : }
# 3880 : :
# 3881 : : bool CChainState::LoadBlockIndexDB(const CChainParams& chainparams)
# 3882 : 616 : {
# 3883 : 616 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 3884 [ - + ]: 616 : if (!m_blockman.LoadBlockIndex(
# 3885 : 616 : chainparams.GetConsensus(), *pblocktree,
# 3886 : 616 : setBlockIndexCandidates)) {
# 3887 : 0 : return false;
# 3888 : 0 : }
# 3889 : :
# 3890 : : // Load block file info
# 3891 : 616 : pblocktree->ReadLastBlockFile(nLastBlockFile);
# 3892 : 616 : vinfoBlockFile.resize(nLastBlockFile + 1);
# 3893 : 616 : LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
# 3894 [ + + ]: 1240 : for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
# 3895 : 624 : pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
# 3896 : 624 : }
# 3897 : 616 : LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
# 3898 : 616 : for (int nFile = nLastBlockFile + 1; true; nFile++) {
# 3899 : 616 : CBlockFileInfo info;
# 3900 [ - + ]: 616 : if (pblocktree->ReadBlockFileInfo(nFile, info)) {
# 3901 : 0 : vinfoBlockFile.push_back(info);
# 3902 : 616 : } else {
# 3903 : 616 : break;
# 3904 : 616 : }
# 3905 : 616 : }
# 3906 : :
# 3907 : : // Check presence of blk files
# 3908 : 616 : LogPrintf("Checking all blk files are present...\n");
# 3909 : 616 : std::set<int> setBlkDataFiles;
# 3910 [ + + ]: 62360 : for (const std::pair<const uint256, CBlockIndex*>& item : m_blockman.m_block_index) {
# 3911 : 62360 : CBlockIndex* pindex = item.second;
# 3912 [ + + ]: 62360 : if (pindex->nStatus & BLOCK_HAVE_DATA) {
# 3913 : 60563 : setBlkDataFiles.insert(pindex->nFile);
# 3914 : 60563 : }
# 3915 : 62360 : }
# 3916 [ + + ]: 999 : for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
# 3917 : 383 : {
# 3918 : 383 : FlatFilePos pos(*it, 0);
# 3919 [ - + ]: 383 : if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
# 3920 : 0 : return false;
# 3921 : 0 : }
# 3922 : 383 : }
# 3923 : :
# 3924 : : // Check whether we have ever pruned block & undo files
# 3925 : 616 : pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
# 3926 [ + + ]: 616 : if (fHavePruned)
# 3927 : 616 : LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
# 3928 : :
# 3929 : : // Check whether we need to continue reindexing
# 3930 : 616 : bool fReindexing = false;
# 3931 : 616 : pblocktree->ReadReindexing(fReindexing);
# 3932 [ - + ]: 616 : if(fReindexing) fReindex = true;
# 3933 : :
# 3934 : 616 : return true;
# 3935 : 616 : }
# 3936 : :
# 3937 : : void CChainState::LoadMempool(const ArgsManager& args)
# 3938 : 621 : {
# 3939 [ + + ]: 621 : if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
# 3940 : 615 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 3941 : 615 : ::LoadMempool(m_mempool, *this);
# 3942 : 615 : }
# 3943 : 621 : m_mempool.SetIsLoaded(!ShutdownRequested());
# 3944 : 621 : }
# 3945 : :
# 3946 : : bool CChainState::LoadChainTip(const CChainParams& chainparams)
# 3947 : 379 : {
# 3948 : 379 : AssertLockHeld(cs_main);
# 3949 : 379 : const CCoinsViewCache& coins_cache = CoinsTip();
# 3950 : 379 : assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
# 3951 : 379 : const CBlockIndex* tip = m_chain.Tip();
# 3952 : :
# 3953 [ + + ][ + + ]: 379 : if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
# [ + - ]
# 3954 : 1 : return true;
# 3955 : 1 : }
# 3956 : :
# 3957 : : // Load pointer to end of best chain
# 3958 : 378 : CBlockIndex* pindex = m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
# 3959 [ - + ]: 378 : if (!pindex) {
# 3960 : 0 : return false;
# 3961 : 0 : }
# 3962 : 378 : m_chain.SetTip(pindex);
# 3963 : 378 : PruneBlockIndexCandidates();
# 3964 : :
# 3965 : 378 : tip = m_chain.Tip();
# 3966 : 378 : LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
# 3967 : 378 : tip->GetBlockHash().ToString(),
# 3968 : 378 : m_chain.Height(),
# 3969 : 378 : FormatISO8601DateTime(tip->GetBlockTime()),
# 3970 : 378 : GuessVerificationProgress(chainparams.TxData(), tip));
# 3971 : 378 : return true;
# 3972 : 378 : }
# 3973 : :
# 3974 : : CVerifyDB::CVerifyDB()
# 3975 : 378 : {
# 3976 : 378 : uiInterface.ShowProgress(_("Verifying blocks…").translated, 0, false);
# 3977 : 378 : }
# 3978 : :
# 3979 : : CVerifyDB::~CVerifyDB()
# 3980 : 378 : {
# 3981 : 378 : uiInterface.ShowProgress("", 100, false);
# 3982 : 378 : }
# 3983 : :
# 3984 : : bool CVerifyDB::VerifyDB(
# 3985 : : CChainState& chainstate,
# 3986 : : const CChainParams& chainparams,
# 3987 : : CCoinsView& coinsview,
# 3988 : : int nCheckLevel, int nCheckDepth)
# 3989 : 378 : {
# 3990 : 378 : AssertLockHeld(cs_main);
# 3991 : :
# 3992 : 378 : assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate));
# 3993 [ - + ][ + + ]: 378 : if (chainstate.m_chain.Tip() == nullptr || chainstate.m_chain.Tip()->pprev == nullptr)
# 3994 : 55 : return true;
# 3995 : :
# 3996 : : // Verify blocks in the best chain
# 3997 [ + + ][ + + ]: 323 : if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height())
# 3998 : 20 : nCheckDepth = chainstate.m_chain.Height();
# 3999 : 323 : nCheckLevel = std::max(0, std::min(4, nCheckLevel));
# 4000 : 323 : LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
# 4001 : 323 : CCoinsViewCache coins(&coinsview);
# 4002 : 323 : CBlockIndex* pindex;
# 4003 : 323 : CBlockIndex* pindexFailure = nullptr;
# 4004 : 323 : int nGoodTransactions = 0;
# 4005 : 323 : BlockValidationState state;
# 4006 : 323 : int reportDone = 0;
# 4007 : 323 : LogPrintf("[0%%]..."); /* Continued */
# 4008 : :
# 4009 : 323 : const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
# 4010 : :
# 4011 [ + - ][ + + ]: 2370 : for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
# 4012 [ + + ]: 2351 : const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
# 4013 [ + + ]: 2351 : if (reportDone < percentageDone/10) {
# 4014 : : // report every 10% step
# 4015 : 1825 : LogPrintf("[%d%%]...", percentageDone); /* Continued */
# 4016 : 1825 : reportDone = percentageDone/10;
# 4017 : 1825 : }
# 4018 : 2351 : uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
# 4019 [ + + ]: 2351 : if (pindex->nHeight <= chainstate.m_chain.Height()-nCheckDepth)
# 4020 : 303 : break;
# 4021 [ + + ][ + - ]: 2048 : if ((fPruneMode || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
# [ - + ]
# 4022 : : // If pruning or running under an assumeutxo snapshot, only go
# 4023 : : // back as far as we have data.
# 4024 : 0 : LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
# 4025 : 0 : break;
# 4026 : 0 : }
# 4027 : 2048 : CBlock block;
# 4028 : : // check level 0: read from disk
# 4029 [ - + ]: 2048 : if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
# 4030 : 0 : return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4031 : : // check level 1: verify block validity
# 4032 [ + - ][ - + ]: 2048 : if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
# 4033 : 0 : return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
# 4034 : 0 : pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
# 4035 : : // check level 2: verify undo validity
# 4036 [ + - ][ + - ]: 2048 : if (nCheckLevel >= 2 && pindex) {
# 4037 : 2048 : CBlockUndo undo;
# 4038 [ + - ]: 2048 : if (!pindex->GetUndoPos().IsNull()) {
# 4039 [ + + ]: 2048 : if (!UndoReadFromDisk(undo, pindex)) {
# 4040 : 1 : return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4041 : 1 : }
# 4042 : 2047 : }
# 4043 : 2048 : }
# 4044 : : // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
# 4045 : 2047 : size_t curr_coins_usage = coins.DynamicMemoryUsage() + chainstate.CoinsTip().DynamicMemoryUsage();
# 4046 : :
# 4047 [ + - ][ + - ]: 2047 : if (nCheckLevel >= 3 && curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
# 4048 : 2047 : assert(coins.GetBestBlock() == pindex->GetBlockHash());
# 4049 : 2047 : DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
# 4050 [ - + ]: 2047 : if (res == DISCONNECT_FAILED) {
# 4051 : 0 : return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4052 : 0 : }
# 4053 [ - + ]: 2047 : if (res == DISCONNECT_UNCLEAN) {
# 4054 : 0 : nGoodTransactions = 0;
# 4055 : 0 : pindexFailure = pindex;
# 4056 : 2047 : } else {
# 4057 : 2047 : nGoodTransactions += block.vtx.size();
# 4058 : 2047 : }
# 4059 : 2047 : }
# 4060 [ - + ]: 2047 : if (ShutdownRequested()) return true;
# 4061 : 2047 : }
# 4062 [ - + ]: 323 : if (pindexFailure)
# 4063 : 0 : return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
# 4064 : :
# 4065 : : // store block count as we move pindex at check level >= 4
# 4066 : 322 : int block_count = chainstate.m_chain.Height() - pindex->nHeight;
# 4067 : :
# 4068 : : // check level 4: try reconnecting blocks
# 4069 [ + + ]: 322 : if (nCheckLevel >= 4) {
# 4070 [ + + ]: 209 : while (pindex != chainstate.m_chain.Tip()) {
# 4071 : 208 : const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
# 4072 [ + + ]: 208 : if (reportDone < percentageDone/10) {
# 4073 : : // report every 10% step
# 4074 : 5 : LogPrintf("[%d%%]...", percentageDone); /* Continued */
# 4075 : 5 : reportDone = percentageDone/10;
# 4076 : 5 : }
# 4077 : 208 : uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
# 4078 : 208 : pindex = chainstate.m_chain.Next(pindex);
# 4079 : 208 : CBlock block;
# 4080 [ - + ]: 208 : if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
# 4081 : 0 : return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4082 [ - + ]: 208 : if (!chainstate.ConnectBlock(block, state, pindex, coins, chainparams))
# 4083 : 0 : return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
# 4084 [ - + ]: 208 : if (ShutdownRequested()) return true;
# 4085 : 208 : }
# 4086 : 1 : }
# 4087 : :
# 4088 : 322 : LogPrintf("[DONE].\n");
# 4089 : 322 : LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
# 4090 : :
# 4091 : 322 : return true;
# 4092 : 322 : }
# 4093 : :
# 4094 : : /** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
# 4095 : : bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params)
# 4096 : 0 : {
# 4097 : : // TODO: merge with ConnectBlock
# 4098 : 0 : CBlock block;
# 4099 [ # # ]: 0 : if (!ReadBlockFromDisk(block, pindex, params.GetConsensus())) {
# 4100 : 0 : return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4101 : 0 : }
# 4102 : :
# 4103 [ # # ]: 0 : for (const CTransactionRef& tx : block.vtx) {
# 4104 [ # # ]: 0 : if (!tx->IsCoinBase()) {
# 4105 [ # # ]: 0 : for (const CTxIn &txin : tx->vin) {
# 4106 : 0 : inputs.SpendCoin(txin.prevout);
# 4107 : 0 : }
# 4108 : 0 : }
# 4109 : : // Pass check = true as every addition may be an overwrite.
# 4110 : 0 : AddCoins(inputs, *tx, pindex->nHeight, true);
# 4111 : 0 : }
# 4112 : 0 : return true;
# 4113 : 0 : }
# 4114 : :
# 4115 : : bool CChainState::ReplayBlocks(const CChainParams& params)
# 4116 : 626 : {
# 4117 : 626 : LOCK(cs_main);
# 4118 : :
# 4119 : 626 : CCoinsView& db = this->CoinsDB();
# 4120 : 626 : CCoinsViewCache cache(&db);
# 4121 : :
# 4122 : 626 : std::vector<uint256> hashHeads = db.GetHeadBlocks();
# 4123 [ + - ]: 626 : if (hashHeads.empty()) return true; // We're already in a consistent state.
# 4124 [ # # ]: 0 : if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
# 4125 : :
# 4126 : 0 : uiInterface.ShowProgress(_("Replaying blocks…").translated, 0, false);
# 4127 : 0 : LogPrintf("Replaying blocks\n");
# 4128 : :
# 4129 : 0 : const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
# 4130 : 0 : const CBlockIndex* pindexNew; // New tip during the interrupted flush.
# 4131 : 0 : const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
# 4132 : :
# 4133 [ # # ]: 0 : if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
# 4134 : 0 : return error("ReplayBlocks(): reorganization to unknown block requested");
# 4135 : 0 : }
# 4136 : 0 : pindexNew = m_blockman.m_block_index[hashHeads[0]];
# 4137 : :
# 4138 [ # # ]: 0 : if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
# 4139 [ # # ]: 0 : if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
# 4140 : 0 : return error("ReplayBlocks(): reorganization from unknown block requested");
# 4141 : 0 : }
# 4142 : 0 : pindexOld = m_blockman.m_block_index[hashHeads[1]];
# 4143 : 0 : pindexFork = LastCommonAncestor(pindexOld, pindexNew);
# 4144 : 0 : assert(pindexFork != nullptr);
# 4145 : 0 : }
# 4146 : :
# 4147 : : // Rollback along the old branch.
# 4148 [ # # ]: 0 : while (pindexOld != pindexFork) {
# 4149 [ # # ]: 0 : if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
# 4150 : 0 : CBlock block;
# 4151 [ # # ]: 0 : if (!ReadBlockFromDisk(block, pindexOld, params.GetConsensus())) {
# 4152 : 0 : return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
# 4153 : 0 : }
# 4154 : 0 : LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
# 4155 : 0 : DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
# 4156 [ # # ]: 0 : if (res == DISCONNECT_FAILED) {
# 4157 : 0 : return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
# 4158 : 0 : }
# 4159 : : // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
# 4160 : : // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
# 4161 : : // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
# 4162 : : // the result is still a version of the UTXO set with the effects of that block undone.
# 4163 : 0 : }
# 4164 : 0 : pindexOld = pindexOld->pprev;
# 4165 : 0 : }
# 4166 : :
# 4167 : : // Roll forward from the forking point to the new tip.
# 4168 [ # # ]: 0 : int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
# 4169 [ # # ]: 0 : for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
# 4170 : 0 : const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
# 4171 : 0 : LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
# 4172 : 0 : uiInterface.ShowProgress(_("Replaying blocks…").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
# 4173 [ # # ]: 0 : if (!RollforwardBlock(pindex, cache, params)) return false;
# 4174 : 0 : }
# 4175 : :
# 4176 : 0 : cache.SetBestBlock(pindexNew->GetBlockHash());
# 4177 : 0 : cache.Flush();
# 4178 : 0 : uiInterface.ShowProgress("", 100, false);
# 4179 : 0 : return true;
# 4180 : 0 : }
# 4181 : :
# 4182 : : bool CChainState::NeedsRedownload(const CChainParams& params) const
# 4183 : 616 : {
# 4184 : 616 : AssertLockHeld(cs_main);
# 4185 : :
# 4186 : : // At and above params.SegwitHeight, segwit consensus rules must be validated
# 4187 : 616 : CBlockIndex* block{m_chain.Tip()};
# 4188 : 616 : const int segwit_height{params.GetConsensus().SegwitHeight};
# 4189 : :
# 4190 [ + + ][ + + ]: 61251 : while (block != nullptr && block->nHeight >= segwit_height) {
# 4191 [ + + ]: 60636 : if (!(block->nStatus & BLOCK_OPT_WITNESS)) {
# 4192 : : // block is insufficiently validated for a segwit client
# 4193 : 1 : return true;
# 4194 : 1 : }
# 4195 : 60635 : block = block->pprev;
# 4196 : 60635 : }
# 4197 : :
# 4198 : 616 : return false;
# 4199 : 616 : }
# 4200 : :
# 4201 : 789 : void CChainState::UnloadBlockIndex() {
# 4202 : 789 : nBlockSequenceId = 1;
# 4203 : 789 : setBlockIndexCandidates.clear();
# 4204 : 789 : }
# 4205 : :
# 4206 : : // May NOT be used after any connections are up as much
# 4207 : : // of the peer-processing logic assumes a consistent
# 4208 : : // block index state
# 4209 : : void UnloadBlockIndex(CTxMemPool* mempool, ChainstateManager& chainman)
# 4210 : 783 : {
# 4211 : 783 : LOCK(cs_main);
# 4212 : 783 : chainman.Unload();
# 4213 : 783 : pindexBestInvalid = nullptr;
# 4214 : 783 : pindexBestHeader = nullptr;
# 4215 [ + - ]: 783 : if (mempool) mempool->clear();
# 4216 : 783 : vinfoBlockFile.clear();
# 4217 : 783 : nLastBlockFile = 0;
# 4218 : 783 : setDirtyBlockIndex.clear();
# 4219 : 783 : setDirtyFileInfo.clear();
# 4220 : 783 : versionbitscache.Clear();
# 4221 [ + + ]: 23490 : for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
# 4222 : 22707 : warningcache[b].clear();
# 4223 : 22707 : }
# 4224 : 783 : fHavePruned = false;
# 4225 : 783 : }
# 4226 : :
# 4227 : : bool ChainstateManager::LoadBlockIndex(const CChainParams& chainparams)
# 4228 : 626 : {
# 4229 : 626 : AssertLockHeld(cs_main);
# 4230 : : // Load block index from databases
# 4231 : 626 : bool needs_init = fReindex;
# 4232 [ + + ]: 626 : if (!fReindex) {
# 4233 : 616 : bool ret = ActiveChainstate().LoadBlockIndexDB(chainparams);
# 4234 [ - + ]: 616 : if (!ret) return false;
# 4235 : 616 : needs_init = m_blockman.m_block_index.empty();
# 4236 : 616 : }
# 4237 : :
# 4238 [ + + ]: 626 : if (needs_init) {
# 4239 : : // Everything here is for *new* reindex/DBs. Thus, though
# 4240 : : // LoadBlockIndexDB may have set fReindex if we shut down
# 4241 : : // mid-reindex previously, we don't check fReindex and
# 4242 : : // instead only check it prior to LoadBlockIndexDB to set
# 4243 : : // needs_init.
# 4244 : :
# 4245 : 246 : LogPrintf("Initializing databases...\n");
# 4246 : 246 : }
# 4247 : 626 : return true;
# 4248 : 626 : }
# 4249 : :
# 4250 : : bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
# 4251 : 784 : {
# 4252 : 784 : LOCK(cs_main);
# 4253 : :
# 4254 : : // Check whether we're already initialized by checking for genesis in
# 4255 : : // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
# 4256 : : // set based on the coins db, not the block index db, which is the only
# 4257 : : // thing loaded at this point.
# 4258 [ + + ]: 784 : if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash()))
# 4259 : 390 : return true;
# 4260 : :
# 4261 : 394 : assert(std::addressof(::ChainActive()) == std::addressof(m_chain));
# 4262 : 394 : try {
# 4263 : 394 : const CBlock& block = chainparams.GenesisBlock();
# 4264 : 394 : FlatFilePos blockPos = SaveBlockToDisk(block, 0, m_chain, chainparams, nullptr);
# 4265 [ - + ]: 394 : if (blockPos.IsNull())
# 4266 : 0 : return error("%s: writing genesis block to disk failed", __func__);
# 4267 : 394 : CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
# 4268 : 394 : ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
# 4269 : 394 : } catch (const std::runtime_error& e) {
# 4270 : 0 : return error("%s: failed to write genesis block: %s", __func__, e.what());
# 4271 : 0 : }
# 4272 : :
# 4273 : 394 : return true;
# 4274 : 394 : }
# 4275 : :
# 4276 : : void CChainState::LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp)
# 4277 : 10 : {
# 4278 : : // Map of disk positions for blocks with unknown parent (only used for reindex)
# 4279 : 10 : static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
# 4280 : 10 : int64_t nStart = GetTimeMillis();
# 4281 : :
# 4282 : 10 : int nLoaded = 0;
# 4283 : 10 : try {
# 4284 : : // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
# 4285 : 10 : CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE+8, SER_DISK, CLIENT_VERSION);
# 4286 : 10 : uint64_t nRewind = blkdat.GetPos();
# 4287 [ + + ]: 1709 : while (!blkdat.eof()) {
# 4288 [ - + ]: 1708 : if (ShutdownRequested()) return;
# 4289 : :
# 4290 : 1708 : blkdat.SetPos(nRewind);
# 4291 : 1708 : nRewind++; // start one byte further next time, in case of failure
# 4292 : 1708 : blkdat.SetLimit(); // remove former limit
# 4293 : 1708 : unsigned int nSize = 0;
# 4294 : 1708 : try {
# 4295 : : // locate a header
# 4296 : 1708 : unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
# 4297 : 1708 : blkdat.FindByte(chainparams.MessageStart()[0]);
# 4298 : 1708 : nRewind = blkdat.GetPos()+1;
# 4299 : 1708 : blkdat >> buf;
# 4300 [ - + ]: 1708 : if (memcmp(buf, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE))
# 4301 : 0 : continue;
# 4302 : : // read size
# 4303 : 1708 : blkdat >> nSize;
# 4304 [ + + ][ - + ]: 1708 : if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
# 4305 : 0 : continue;
# 4306 : 9 : } catch (const std::exception&) {
# 4307 : : // no valid block header found; don't complain
# 4308 : 9 : break;
# 4309 : 9 : }
# 4310 : 1699 : try {
# 4311 : : // read block
# 4312 : 1699 : uint64_t nBlockPos = blkdat.GetPos();
# 4313 [ + + ]: 1699 : if (dbp)
# 4314 : 1598 : dbp->nPos = nBlockPos;
# 4315 : 1699 : blkdat.SetLimit(nBlockPos + nSize);
# 4316 : 1699 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
# 4317 : 1699 : CBlock& block = *pblock;
# 4318 : 1699 : blkdat >> block;
# 4319 : 1699 : nRewind = blkdat.GetPos();
# 4320 : :
# 4321 : 1699 : uint256 hash = block.GetHash();
# 4322 : 1699 : {
# 4323 : 1699 : LOCK(cs_main);
# 4324 : : // detect out of order blocks, and store them for later
# 4325 : 1699 : assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_blockman));
# 4326 [ + + ][ + + ]: 1699 : if (hash != chainparams.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) {
# 4327 [ + - ]: 9 : LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
# 4328 : 9 : block.hashPrevBlock.ToString());
# 4329 [ + - ]: 9 : if (dbp)
# 4330 : 9 : mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
# 4331 : 9 : continue;
# 4332 : 9 : }
# 4333 : :
# 4334 : : // process in case the block isn't known yet
# 4335 : 1690 : assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_blockman));
# 4336 : 1690 : CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash);
# 4337 [ + + ][ - + ]: 1690 : if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
# 4338 : 1689 : BlockValidationState state;
# 4339 : 1689 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 4340 [ + + ]: 1689 : if (AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) {
# 4341 : 1547 : nLoaded++;
# 4342 : 1547 : }
# 4343 [ - + ]: 1689 : if (state.IsError()) {
# 4344 : 0 : break;
# 4345 : 0 : }
# 4346 [ - + ][ # # ]: 1 : } else if (hash != chainparams.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
# 4347 [ # # ]: 0 : LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
# 4348 : 0 : }
# 4349 : 1690 : }
# 4350 : :
# 4351 : : // Activate the genesis block so normal node progress can continue
# 4352 [ + + ]: 1690 : if (hash == chainparams.GetConsensus().hashGenesisBlock) {
# 4353 : 10 : BlockValidationState state;
# 4354 : 10 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 4355 [ - + ]: 10 : if (!ActivateBestChain(state, chainparams, nullptr)) {
# 4356 : 0 : break;
# 4357 : 0 : }
# 4358 : 1690 : }
# 4359 : :
# 4360 : 1690 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 4361 : 1690 : NotifyHeaderTip(*this);
# 4362 : :
# 4363 : : // Recursively process earlier encountered successors of this block
# 4364 : 1690 : std::deque<uint256> queue;
# 4365 : 1690 : queue.push_back(hash);
# 4366 [ + + ]: 3388 : while (!queue.empty()) {
# 4367 : 1698 : uint256 head = queue.front();
# 4368 : 1698 : queue.pop_front();
# 4369 : 1698 : std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
# 4370 [ + + ]: 1707 : while (range.first != range.second) {
# 4371 : 9 : std::multimap<uint256, FlatFilePos>::iterator it = range.first;
# 4372 : 9 : std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
# 4373 [ + - ]: 9 : if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
# 4374 : 9 : {
# 4375 [ + - ]: 9 : LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
# 4376 : 9 : head.ToString());
# 4377 : 9 : LOCK(cs_main);
# 4378 : 9 : BlockValidationState dummy;
# 4379 : 9 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 4380 [ + + ]: 9 : if (AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
# 4381 : 8 : {
# 4382 : 8 : nLoaded++;
# 4383 : 8 : queue.push_back(pblockrecursive->GetHash());
# 4384 : 8 : }
# 4385 : 9 : }
# 4386 : 9 : range.first++;
# 4387 : 9 : mapBlocksUnknownParent.erase(it);
# 4388 : 9 : assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
# 4389 : 9 : NotifyHeaderTip(*this);
# 4390 : 9 : }
# 4391 : 1698 : }
# 4392 : 1690 : } catch (const std::exception& e) {
# 4393 : 0 : LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
# 4394 : 0 : }
# 4395 : 1699 : }
# 4396 : 10 : } catch (const std::runtime_error& e) {
# 4397 : 0 : AbortNode(std::string("System error: ") + e.what());
# 4398 : 0 : }
# 4399 : 10 : LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
# 4400 : 10 : }
# 4401 : :
# 4402 : : void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
# 4403 : 257292 : {
# 4404 [ + + ]: 257292 : if (!fCheckBlockIndex) {
# 4405 : 596 : return;
# 4406 : 596 : }
# 4407 : :
# 4408 : 256696 : LOCK(cs_main);
# 4409 : :
# 4410 : : // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
# 4411 : : // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
# 4412 : : // tests when iterating the block tree require that m_chain has been initialized.)
# 4413 [ + + ]: 256696 : if (m_chain.Height() < 0) {
# 4414 : 18 : assert(m_blockman.m_block_index.size() <= 1);
# 4415 : 18 : return;
# 4416 : 18 : }
# 4417 : :
# 4418 : : // Build forward-pointing map of the entire block tree.
# 4419 : 256678 : std::multimap<CBlockIndex*,CBlockIndex*> forward;
# 4420 [ + + ]: 146485621 : for (const std::pair<const uint256, CBlockIndex*>& entry : m_blockman.m_block_index) {
# 4421 : 146485621 : forward.insert(std::make_pair(entry.second->pprev, entry.second));
# 4422 : 146485621 : }
# 4423 : :
# 4424 : 256678 : assert(forward.size() == m_blockman.m_block_index.size());
# 4425 : :
# 4426 : 256678 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
# 4427 : 256678 : CBlockIndex *pindex = rangeGenesis.first->second;
# 4428 : 256678 : rangeGenesis.first++;
# 4429 : 256678 : assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
# 4430 : :
# 4431 : : // Iterate over the entire block tree, using depth-first search.
# 4432 : : // Along the way, remember whether there are blocks on the path from genesis
# 4433 : : // block being explored which are the first to have certain properties.
# 4434 : 256678 : size_t nNodes = 0;
# 4435 : 256678 : int nHeight = 0;
# 4436 : 256678 : CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
# 4437 : 256678 : CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
# 4438 : 256678 : CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
# 4439 : 256678 : CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
# 4440 : 256678 : CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
# 4441 : 256678 : CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
# 4442 : 256678 : CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
# 4443 [ + + ]: 146742299 : while (pindex != nullptr) {
# 4444 : 146485621 : nNodes++;
# 4445 [ + + ][ + + ]: 146485621 : if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
# 4446 [ + + ][ + + ]: 146485621 : if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
# 4447 [ + + ][ + + ]: 146485621 : if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
# 4448 [ + + ][ + - ]: 146485621 : if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
# [ - + ]
# 4449 [ + + ][ + + ]: 146485621 : if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
# [ + + ]
# 4450 [ + + ][ + + ]: 146485621 : if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
# [ + + ]
# 4451 [ + + ][ + + ]: 146485621 : if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
# [ + + ]
# 4452 : :
# 4453 : : // Begin: actual consistency checks.
# 4454 [ + + ]: 146485621 : if (pindex->pprev == nullptr) {
# 4455 : : // Genesis block checks.
# 4456 : 256678 : assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
# 4457 : 256678 : assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
# 4458 : 256678 : }
# 4459 [ + + ]: 146485621 : if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
# 4460 : : // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
# 4461 : : // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
# 4462 [ + + ]: 146485621 : if (!fHavePruned) {
# 4463 : : // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
# 4464 : 142881121 : assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
# 4465 : 142881121 : assert(pindexFirstMissing == pindexFirstNeverProcessed);
# 4466 : 142881121 : } else {
# 4467 : : // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
# 4468 [ + + ]: 3604500 : if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
# 4469 : 3604500 : }
# 4470 [ + + ]: 146485621 : if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
# 4471 : 146485621 : assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
# 4472 : : // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
# 4473 : 146485621 : assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
# 4474 : 146485621 : assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
# 4475 : 146485621 : assert(pindex->nHeight == nHeight); // nHeight must be consistent.
# 4476 : 146485621 : assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
# 4477 : 146485621 : assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
# 4478 : 146485621 : assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
# 4479 [ + - ]: 146485621 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
# 4480 [ + + ]: 146485621 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
# 4481 [ + + ]: 146485621 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
# 4482 [ + + ]: 146485621 : if (pindexFirstInvalid == nullptr) {
# 4483 : : // Checks for not-invalid blocks.
# 4484 : 134457931 : assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
# 4485 : 134457931 : }
# 4486 [ + + ][ + + ]: 146485621 : if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
# [ + + ]
# 4487 [ + + ]: 705944 : if (pindexFirstInvalid == nullptr) {
# 4488 : : // If this block sorts at least as good as the current tip and
# 4489 : : // is valid and we have all data for its parents, it must be in
# 4490 : : // setBlockIndexCandidates. m_chain.Tip() must also be there
# 4491 : : // even if some data has been pruned.
# 4492 [ + + ][ + + ]: 650039 : if (pindexFirstMissing == nullptr || pindex == m_chain.Tip()) {
# 4493 : 649039 : assert(setBlockIndexCandidates.count(pindex));
# 4494 : 649039 : }
# 4495 : : // If some parent is missing, then it could be that this block was in
# 4496 : : // setBlockIndexCandidates but had to be removed because of the missing data.
# 4497 : : // In this case it must be in m_blocks_unlinked -- see test below.
# 4498 : 650039 : }
# 4499 : 145779677 : } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
# 4500 : 145779677 : assert(setBlockIndexCandidates.count(pindex) == 0);
# 4501 : 145779677 : }
# 4502 : : // Check whether this block is in m_blocks_unlinked.
# 4503 : 146485621 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
# 4504 : 146485621 : bool foundInUnlinked = false;
# 4505 [ + + ]: 146488137 : while (rangeUnlinked.first != rangeUnlinked.second) {
# 4506 : 137318 : assert(rangeUnlinked.first->first == pindex->pprev);
# 4507 [ + + ]: 137318 : if (rangeUnlinked.first->second == pindex) {
# 4508 : 134802 : foundInUnlinked = true;
# 4509 : 134802 : break;
# 4510 : 134802 : }
# 4511 : 2516 : rangeUnlinked.first++;
# 4512 : 2516 : }
# 4513 [ + + ][ + + ]: 146485621 : if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
# [ + + ][ + - ]
# 4514 : : // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
# 4515 : 134802 : assert(foundInUnlinked);
# 4516 : 134802 : }
# 4517 [ + + ]: 146485621 : if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
# 4518 [ + + ]: 146485621 : if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
# 4519 [ + + ][ + + ]: 146485621 : if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
# [ + + ][ + + ]
# 4520 : : // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
# 4521 : 2859500 : assert(fHavePruned); // We must have pruned.
# 4522 : : // This block may have entered m_blocks_unlinked if:
# 4523 : : // - it has a descendant that at some point had more work than the
# 4524 : : // tip, and
# 4525 : : // - we tried switching to that descendant but were missing
# 4526 : : // data for some intermediate block between m_chain and the
# 4527 : : // tip.
# 4528 : : // So if this block is itself better than m_chain.Tip() and it wasn't in
# 4529 : : // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
# 4530 [ + + ][ - + ]: 2859500 : if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
# [ - + ]
# 4531 [ # # ]: 0 : if (pindexFirstInvalid == nullptr) {
# 4532 : 0 : assert(foundInUnlinked);
# 4533 : 0 : }
# 4534 : 0 : }
# 4535 : 2859500 : }
# 4536 : : // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
# 4537 : : // End: actual consistency checks.
# 4538 : :
# 4539 : : // Try descending into the first subnode.
# 4540 : 146485621 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
# 4541 [ + + ]: 146485621 : if (range.first != range.second) {
# 4542 : : // A subnode was found.
# 4543 : 127995234 : pindex = range.first->second;
# 4544 : 127995234 : nHeight++;
# 4545 : 127995234 : continue;
# 4546 : 127995234 : }
# 4547 : : // This is a leaf node.
# 4548 : : // Move upwards until we reach a node of which we have not yet visited the last child.
# 4549 [ + + ]: 146742299 : while (pindex) {
# 4550 : : // We are going to either move to a parent or a sibling of pindex.
# 4551 : : // If pindex was the first with a certain property, unset the corresponding variable.
# 4552 [ + + ]: 146485621 : if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
# 4553 [ + + ]: 146485621 : if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
# 4554 [ + + ]: 146485621 : if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
# 4555 [ - + ]: 146485621 : if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
# 4556 [ + + ]: 146485621 : if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
# 4557 [ + + ]: 146485621 : if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
# 4558 [ + + ]: 146485621 : if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
# 4559 : : // Find our parent.
# 4560 : 146485621 : CBlockIndex* pindexPar = pindex->pprev;
# 4561 : : // Find which child we just visited.
# 4562 : 146485621 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
# 4563 [ + + ]: 171767668 : while (rangePar.first->second != pindex) {
# 4564 : 25282047 : assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
# 4565 : 25282047 : rangePar.first++;
# 4566 : 25282047 : }
# 4567 : : // Proceed to the next one.
# 4568 : 146485621 : rangePar.first++;
# 4569 [ + + ]: 146485621 : if (rangePar.first != rangePar.second) {
# 4570 : : // Move to the sibling.
# 4571 : 18233709 : pindex = rangePar.first->second;
# 4572 : 18233709 : break;
# 4573 : 128251912 : } else {
# 4574 : : // Move up further.
# 4575 : 128251912 : pindex = pindexPar;
# 4576 : 128251912 : nHeight--;
# 4577 : 128251912 : continue;
# 4578 : 128251912 : }
# 4579 : 146485621 : }
# 4580 : 18490387 : }
# 4581 : :
# 4582 : : // Check that we actually traversed the entire map.
# 4583 : 256678 : assert(nNodes == forward.size());
# 4584 : 256678 : }
# 4585 : :
# 4586 : : std::string CChainState::ToString()
# 4587 : 830 : {
# 4588 : 830 : CBlockIndex* tip = m_chain.Tip();
# 4589 : 830 : return strprintf("Chainstate [%s] @ height %d (%s)",
# 4590 [ + + ]: 830 : m_from_snapshot_blockhash ? "snapshot" : "ibd",
# 4591 [ + + ][ + + ]: 830 : tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
# 4592 : 830 : }
# 4593 : :
# 4594 : : bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
# 4595 : 20 : {
# 4596 [ - + ]: 20 : if (coinstip_size == m_coinstip_cache_size_bytes &&
# 4597 [ # # ]: 20 : coinsdb_size == m_coinsdb_cache_size_bytes) {
# 4598 : : // Cache sizes are unchanged, no need to continue.
# 4599 : 0 : return true;
# 4600 : 0 : }
# 4601 : 20 : size_t old_coinstip_size = m_coinstip_cache_size_bytes;
# 4602 : 20 : m_coinstip_cache_size_bytes = coinstip_size;
# 4603 : 20 : m_coinsdb_cache_size_bytes = coinsdb_size;
# 4604 : 20 : CoinsDB().ResizeCache(coinsdb_size);
# 4605 : :
# 4606 : 20 : LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
# 4607 : 20 : this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
# 4608 : 20 : LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
# 4609 : 20 : this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
# 4610 : :
# 4611 : 20 : BlockValidationState state;
# 4612 : 20 : const CChainParams& chainparams = Params();
# 4613 : :
# 4614 : 20 : bool ret;
# 4615 : :
# 4616 [ + + ]: 20 : if (coinstip_size > old_coinstip_size) {
# 4617 : : // Likely no need to flush if cache sizes have grown.
# 4618 : 7 : ret = FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED);
# 4619 : 13 : } else {
# 4620 : : // Otherwise, flush state to disk and deallocate the in-memory coins map.
# 4621 : 13 : ret = FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS);
# 4622 : 13 : CoinsTip().ReallocateCache();
# 4623 : 13 : }
# 4624 : 20 : return ret;
# 4625 : 20 : }
# 4626 : :
# 4627 : : static const uint64_t MEMPOOL_DUMP_VERSION = 1;
# 4628 : :
# 4629 : : bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function)
# 4630 : 615 : {
# 4631 : 615 : const CChainParams& chainparams = Params();
# 4632 : 615 : int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
# 4633 : 615 : FILE* filestr{mockable_fopen_function(gArgs.GetDataDirNet() / "mempool.dat", "rb")};
# 4634 : 615 : CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
# 4635 [ + + ]: 615 : if (file.IsNull()) {
# 4636 : 370 : LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
# 4637 : 370 : return false;
# 4638 : 370 : }
# 4639 : :
# 4640 : 245 : int64_t count = 0;
# 4641 : 245 : int64_t expired = 0;
# 4642 : 245 : int64_t failed = 0;
# 4643 : 245 : int64_t already_there = 0;
# 4644 : 245 : int64_t unbroadcast = 0;
# 4645 : 245 : int64_t nNow = GetTime();
# 4646 : :
# 4647 : 245 : try {
# 4648 : 245 : uint64_t version;
# 4649 : 245 : file >> version;
# 4650 [ - + ]: 245 : if (version != MEMPOOL_DUMP_VERSION) {
# 4651 : 0 : return false;
# 4652 : 0 : }
# 4653 : 245 : uint64_t num;
# 4654 : 245 : file >> num;
# 4655 [ + + ]: 378 : while (num--) {
# 4656 : 133 : CTransactionRef tx;
# 4657 : 133 : int64_t nTime;
# 4658 : 133 : int64_t nFeeDelta;
# 4659 : 133 : file >> tx;
# 4660 : 133 : file >> nTime;
# 4661 : 133 : file >> nFeeDelta;
# 4662 : :
# 4663 : 133 : CAmount amountdelta = nFeeDelta;
# 4664 [ + + ]: 133 : if (amountdelta) {
# 4665 : 4 : pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
# 4666 : 4 : }
# 4667 [ + - ]: 133 : if (nTime > nNow - nExpiryTimeout) {
# 4668 : 133 : LOCK(cs_main);
# 4669 : 133 : assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
# 4670 [ + + ]: 133 : if (AcceptToMemoryPoolWithTime(chainparams, pool, active_chainstate, tx, nTime, false /* bypass_limits */,
# 4671 : 133 : false /* test_accept */).m_result_type == MempoolAcceptResult::ResultType::VALID) {
# 4672 : 102 : ++count;
# 4673 : 102 : } else {
# 4674 : : // mempool may contain the transaction already, e.g. from
# 4675 : : // wallet(s) having loaded it while we were processing
# 4676 : : // mempool transactions; consider these as valid, instead of
# 4677 : : // failed, but mark them as 'already there'
# 4678 [ + + ]: 31 : if (pool.exists(tx->GetHash())) {
# 4679 : 5 : ++already_there;
# 4680 : 26 : } else {
# 4681 : 26 : ++failed;
# 4682 : 26 : }
# 4683 : 31 : }
# 4684 : 133 : } else {
# 4685 : 0 : ++expired;
# 4686 : 0 : }
# 4687 [ - + ]: 133 : if (ShutdownRequested())
# 4688 : 0 : return false;
# 4689 : 133 : }
# 4690 : 245 : std::map<uint256, CAmount> mapDeltas;
# 4691 : 245 : file >> mapDeltas;
# 4692 : :
# 4693 [ - + ]: 245 : for (const auto& i : mapDeltas) {
# 4694 : 0 : pool.PrioritiseTransaction(i.first, i.second);
# 4695 : 0 : }
# 4696 : :
# 4697 : 245 : std::set<uint256> unbroadcast_txids;
# 4698 : 245 : file >> unbroadcast_txids;
# 4699 : 245 : unbroadcast = unbroadcast_txids.size();
# 4700 [ + + ]: 245 : for (const auto& txid : unbroadcast_txids) {
# 4701 : : // Ensure transactions were accepted to mempool then add to
# 4702 : : // unbroadcast set.
# 4703 [ + + ]: 58 : if (pool.get(txid) != nullptr) pool.AddUnbroadcastTx(txid);
# 4704 : 58 : }
# 4705 : 245 : } catch (const std::exception& e) {
# 4706 : 0 : LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
# 4707 : 0 : return false;
# 4708 : 0 : }
# 4709 : :
# 4710 : 245 : LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there, %i waiting for initial broadcast\n", count, failed, expired, already_there, unbroadcast);
# 4711 : 245 : return true;
# 4712 : 245 : }
# 4713 : :
# 4714 : : bool DumpMempool(const CTxMemPool& pool, FopenFn mockable_fopen_function, bool skip_file_commit)
# 4715 : 617 : {
# 4716 : 617 : int64_t start = GetTimeMicros();
# 4717 : :
# 4718 : 617 : std::map<uint256, CAmount> mapDeltas;
# 4719 : 617 : std::vector<TxMempoolInfo> vinfo;
# 4720 : 617 : std::set<uint256> unbroadcast_txids;
# 4721 : :
# 4722 : 617 : static Mutex dump_mutex;
# 4723 : 617 : LOCK(dump_mutex);
# 4724 : :
# 4725 : 617 : {
# 4726 : 617 : LOCK(pool.cs);
# 4727 [ + + ]: 617 : for (const auto &i : pool.mapDeltas) {
# 4728 : 10 : mapDeltas[i.first] = i.second;
# 4729 : 10 : }
# 4730 : 617 : vinfo = pool.infoAll();
# 4731 : 617 : unbroadcast_txids = pool.GetUnbroadcastTxs();
# 4732 : 617 : }
# 4733 : :
# 4734 : 617 : int64_t mid = GetTimeMicros();
# 4735 : :
# 4736 : 617 : try {
# 4737 : 617 : FILE* filestr{mockable_fopen_function(gArgs.GetDataDirNet() / "mempool.dat.new", "wb")};
# 4738 [ + + ]: 617 : if (!filestr) {
# 4739 : 1 : return false;
# 4740 : 1 : }
# 4741 : :
# 4742 : 616 : CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
# 4743 : :
# 4744 : 616 : uint64_t version = MEMPOOL_DUMP_VERSION;
# 4745 : 616 : file << version;
# 4746 : :
# 4747 : 616 : file << (uint64_t)vinfo.size();
# 4748 [ + + ]: 748 : for (const auto& i : vinfo) {
# 4749 : 748 : file << *(i.tx);
# 4750 : 748 : file << int64_t{count_seconds(i.m_time)};
# 4751 : 748 : file << int64_t{i.nFeeDelta};
# 4752 : 748 : mapDeltas.erase(i.tx->GetHash());
# 4753 : 748 : }
# 4754 : :
# 4755 : 616 : file << mapDeltas;
# 4756 : :
# 4757 : 616 : LogPrintf("Writing %d unbroadcast transactions to disk.\n", unbroadcast_txids.size());
# 4758 : 616 : file << unbroadcast_txids;
# 4759 : :
# 4760 [ + - ][ - + ]: 616 : if (!skip_file_commit && !FileCommit(file.Get()))
# 4761 : 0 : throw std::runtime_error("FileCommit failed");
# 4762 : 616 : file.fclose();
# 4763 [ - + ]: 616 : if (!RenameOver(gArgs.GetDataDirNet() / "mempool.dat.new", gArgs.GetDataDirNet() / "mempool.dat")) {
# 4764 : 0 : throw std::runtime_error("Rename failed");
# 4765 : 0 : }
# 4766 : 616 : int64_t last = GetTimeMicros();
# 4767 : 616 : LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO);
# 4768 : 616 : } catch (const std::exception& e) {
# 4769 : 0 : LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
# 4770 : 0 : return false;
# 4771 : 0 : }
# 4772 : 616 : return true;
# 4773 : 616 : }
# 4774 : :
# 4775 : : //! Guess how far we are in the verification process at the given block index
# 4776 : : //! require cs_main if pindex has not been validated yet (because nChainTx might be unset)
# 4777 : 153051 : double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
# 4778 [ + + ]: 153051 : if (pindex == nullptr)
# 4779 : 1 : return 0.0;
# 4780 : :
# 4781 : 153050 : int64_t nNow = time(nullptr);
# 4782 : :
# 4783 : 153050 : double fTxTotal;
# 4784 : :
# 4785 [ + + ]: 153050 : if (pindex->nChainTx <= data.nTxCount) {
# 4786 : 336 : fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
# 4787 : 152714 : } else {
# 4788 : 152714 : fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
# 4789 : 152714 : }
# 4790 : :
# 4791 : 153050 : return std::min<double>(pindex->nChainTx / fTxTotal, 1.0);
# 4792 : 153050 : }
# 4793 : :
# 4794 : : std::optional<uint256> ChainstateManager::SnapshotBlockhash() const
# 4795 : 14 : {
# 4796 : 14 : LOCK(::cs_main);
# 4797 [ + + ][ + + ]: 14 : if (m_active_chainstate && m_active_chainstate->m_from_snapshot_blockhash) {
# 4798 : : // If a snapshot chainstate exists, it will always be our active.
# 4799 : 4 : return m_active_chainstate->m_from_snapshot_blockhash;
# 4800 : 4 : }
# 4801 : 10 : return std::nullopt;
# 4802 : 10 : }
# 4803 : :
# 4804 : : std::vector<CChainState*> ChainstateManager::GetAll()
# 4805 : 4554 : {
# 4806 : 4554 : LOCK(::cs_main);
# 4807 : 4554 : std::vector<CChainState*> out;
# 4808 : :
# 4809 [ + - ][ + + ]: 4554 : if (!IsSnapshotValidated() && m_ibd_chainstate) {
# 4810 : 4536 : out.push_back(m_ibd_chainstate.get());
# 4811 : 4536 : }
# 4812 : :
# 4813 [ + + ]: 4554 : if (m_snapshot_chainstate) {
# 4814 : 7 : out.push_back(m_snapshot_chainstate.get());
# 4815 : 7 : }
# 4816 : :
# 4817 : 4554 : return out;
# 4818 : 4554 : }
# 4819 : :
# 4820 : : CChainState& ChainstateManager::InitializeChainstate(CTxMemPool& mempool, const std::optional<uint256>& snapshot_blockhash)
# 4821 : 786 : {
# 4822 : 786 : bool is_snapshot = snapshot_blockhash.has_value();
# 4823 : 786 : std::unique_ptr<CChainState>& to_modify =
# 4824 [ + + ]: 786 : is_snapshot ? m_snapshot_chainstate : m_ibd_chainstate;
# 4825 : :
# 4826 [ - + ]: 786 : if (to_modify) {
# 4827 : 0 : throw std::logic_error("should not be overwriting a chainstate");
# 4828 : 0 : }
# 4829 : 786 : to_modify.reset(new CChainState(mempool, m_blockman, snapshot_blockhash));
# 4830 : :
# 4831 : : // Snapshot chainstates and initial IBD chaintates always become active.
# 4832 [ + + ][ + - ]: 786 : if (is_snapshot || (!is_snapshot && !m_active_chainstate)) {
# [ + - ]
# 4833 : 786 : LogPrintf("Switching active chainstate to %s\n", to_modify->ToString());
# 4834 : 786 : m_active_chainstate = to_modify.get();
# 4835 : 786 : } else {
# 4836 : 0 : throw std::logic_error("unexpected chainstate activation");
# 4837 : 0 : }
# 4838 : :
# 4839 : 786 : return *to_modify;
# 4840 : 786 : }
# 4841 : :
# 4842 : : const AssumeutxoData* ExpectedAssumeutxo(
# 4843 : : const int height, const CChainParams& chainparams)
# 4844 : 22 : {
# 4845 : 22 : const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo();
# 4846 : 22 : const auto assumeutxo_found = valid_assumeutxos_map.find(height);
# 4847 : :
# 4848 [ + + ]: 22 : if (assumeutxo_found != valid_assumeutxos_map.end()) {
# 4849 : 9 : return &assumeutxo_found->second;
# 4850 : 9 : }
# 4851 : 13 : return nullptr;
# 4852 : 13 : }
# 4853 : :
# 4854 : : bool ChainstateManager::ActivateSnapshot(
# 4855 : : CAutoFile& coins_file,
# 4856 : : const SnapshotMetadata& metadata,
# 4857 : : bool in_memory)
# 4858 : 8 : {
# 4859 : 8 : uint256 base_blockhash = metadata.m_base_blockhash;
# 4860 : :
# 4861 [ + + ]: 8 : if (this->SnapshotBlockhash()) {
# 4862 : 1 : LogPrintf("[snapshot] can't activate a snapshot-based chainstate more than once\n");
# 4863 : 1 : return false;
# 4864 : 1 : }
# 4865 : :
# 4866 : 7 : int64_t current_coinsdb_cache_size{0};
# 4867 : 7 : int64_t current_coinstip_cache_size{0};
# 4868 : :
# 4869 : : // Cache percentages to allocate to each chainstate.
# 4870 : : //
# 4871 : : // These particular percentages don't matter so much since they will only be
# 4872 : : // relevant during snapshot activation; caches are rebalanced at the conclusion of
# 4873 : : // this function. We want to give (essentially) all available cache capacity to the
# 4874 : : // snapshot to aid the bulk load later in this function.
# 4875 : 7 : static constexpr double IBD_CACHE_PERC = 0.01;
# 4876 : 7 : static constexpr double SNAPSHOT_CACHE_PERC = 0.99;
# 4877 : :
# 4878 : 7 : {
# 4879 : 7 : LOCK(::cs_main);
# 4880 : : // Resize the coins caches to ensure we're not exceeding memory limits.
# 4881 : : //
# 4882 : : // Allocate the majority of the cache to the incoming snapshot chainstate, since
# 4883 : : // (optimistically) getting to its tip will be the top priority. We'll need to call
# 4884 : : // `MaybeRebalanceCaches()` once we're done with this function to ensure
# 4885 : : // the right allocation (including the possibility that no snapshot was activated
# 4886 : : // and that we should restore the active chainstate caches to their original size).
# 4887 : : //
# 4888 : 7 : current_coinsdb_cache_size = this->ActiveChainstate().m_coinsdb_cache_size_bytes;
# 4889 : 7 : current_coinstip_cache_size = this->ActiveChainstate().m_coinstip_cache_size_bytes;
# 4890 : :
# 4891 : : // Temporarily resize the active coins cache to make room for the newly-created
# 4892 : : // snapshot chain.
# 4893 : 7 : this->ActiveChainstate().ResizeCoinsCaches(
# 4894 : 7 : static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC),
# 4895 : 7 : static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC));
# 4896 : 7 : }
# 4897 : :
# 4898 : 7 : auto snapshot_chainstate = WITH_LOCK(::cs_main, return std::make_unique<CChainState>(
# 4899 : 7 : this->ActiveChainstate().m_mempool, m_blockman, base_blockhash));
# 4900 : :
# 4901 : 7 : {
# 4902 : 7 : LOCK(::cs_main);
# 4903 : 7 : snapshot_chainstate->InitCoinsDB(
# 4904 : 7 : static_cast<size_t>(current_coinsdb_cache_size * SNAPSHOT_CACHE_PERC),
# 4905 : 7 : in_memory, false, "chainstate");
# 4906 : 7 : snapshot_chainstate->InitCoinsCache(
# 4907 : 7 : static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC));
# 4908 : 7 : }
# 4909 : :
# 4910 : 7 : const bool snapshot_ok = this->PopulateAndValidateSnapshot(
# 4911 : 7 : *snapshot_chainstate, coins_file, metadata);
# 4912 : :
# 4913 [ + + ]: 7 : if (!snapshot_ok) {
# 4914 : 6 : WITH_LOCK(::cs_main, this->MaybeRebalanceCaches());
# 4915 : 6 : return false;
# 4916 : 6 : }
# 4917 : :
# 4918 : 1 : {
# 4919 : 1 : LOCK(::cs_main);
# 4920 : 1 : assert(!m_snapshot_chainstate);
# 4921 : 1 : m_snapshot_chainstate.swap(snapshot_chainstate);
# 4922 : 1 : const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip(::Params());
# 4923 : 1 : assert(chaintip_loaded);
# 4924 : :
# 4925 : 1 : m_active_chainstate = m_snapshot_chainstate.get();
# 4926 : :
# 4927 : 1 : LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString());
# 4928 : 1 : LogPrintf("[snapshot] (%.2f MB)\n",
# 4929 : 1 : m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000));
# 4930 : :
# 4931 : 1 : this->MaybeRebalanceCaches();
# 4932 : 1 : }
# 4933 : 1 : return true;
# 4934 : 1 : }
# 4935 : :
# 4936 : : bool ChainstateManager::PopulateAndValidateSnapshot(
# 4937 : : CChainState& snapshot_chainstate,
# 4938 : : CAutoFile& coins_file,
# 4939 : : const SnapshotMetadata& metadata)
# 4940 : 7 : {
# 4941 : : // It's okay to release cs_main before we're done using `coins_cache` because we know
# 4942 : : // that nothing else will be referencing the newly created snapshot_chainstate yet.
# 4943 : 7 : CCoinsViewCache& coins_cache = *WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip());
# 4944 : :
# 4945 : 7 : uint256 base_blockhash = metadata.m_base_blockhash;
# 4946 : :
# 4947 : 7 : CBlockIndex* snapshot_start_block = WITH_LOCK(::cs_main, return m_blockman.LookupBlockIndex(base_blockhash));
# 4948 : :
# 4949 [ + + ]: 7 : if (!snapshot_start_block) {
# 4950 : : // Needed for GetUTXOStats and ExpectedAssumeutxo to determine the height and to avoid a crash when base_blockhash.IsNull()
# 4951 : 2 : LogPrintf("[snapshot] Did not find snapshot start blockheader %s\n",
# 4952 : 2 : base_blockhash.ToString());
# 4953 : 2 : return false;
# 4954 : 2 : }
# 4955 : :
# 4956 : 5 : int base_height = snapshot_start_block->nHeight;
# 4957 : 5 : auto maybe_au_data = ExpectedAssumeutxo(base_height, ::Params());
# 4958 : :
# 4959 [ + + ]: 5 : if (!maybe_au_data) {
# 4960 : 1 : LogPrintf("[snapshot] assumeutxo height in snapshot metadata not recognized " /* Continued */
# 4961 : 1 : "(%d) - refusing to load snapshot\n", base_height);
# 4962 : 1 : return false;
# 4963 : 1 : }
# 4964 : :
# 4965 : 4 : const AssumeutxoData& au_data = *maybe_au_data;
# 4966 : :
# 4967 : 4 : COutPoint outpoint;
# 4968 : 4 : Coin coin;
# 4969 : 4 : const uint64_t coins_count = metadata.m_coins_count;
# 4970 : 4 : uint64_t coins_left = metadata.m_coins_count;
# 4971 : :
# 4972 : 4 : LogPrintf("[snapshot] loading coins from snapshot %s\n", base_blockhash.ToString());
# 4973 : 4 : int64_t flush_now{0};
# 4974 : 4 : int64_t coins_processed{0};
# 4975 : :
# 4976 [ + + ]: 442 : while (coins_left > 0) {
# 4977 : 439 : try {
# 4978 : 439 : coins_file >> outpoint;
# 4979 : 439 : coins_file >> coin;
# 4980 : 439 : } catch (const std::ios_base::failure&) {
# 4981 : 1 : LogPrintf("[snapshot] bad snapshot format or truncated snapshot after deserializing %d coins\n",
# 4982 : 1 : coins_count - coins_left);
# 4983 : 1 : return false;
# 4984 : 1 : }
# 4985 : 438 : coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin));
# 4986 : :
# 4987 : 438 : --coins_left;
# 4988 : 438 : ++coins_processed;
# 4989 : :
# 4990 [ - + ]: 438 : if (coins_processed % 1000000 == 0) {
# 4991 : 0 : LogPrintf("[snapshot] %d coins loaded (%.2f%%, %.2f MB)\n",
# 4992 : 0 : coins_processed,
# 4993 : 0 : static_cast<float>(coins_processed) * 100 / static_cast<float>(coins_count),
# 4994 : 0 : coins_cache.DynamicMemoryUsage() / (1000 * 1000));
# 4995 : 0 : }
# 4996 : :
# 4997 : : // Batch write and flush (if we need to) every so often.
# 4998 : : //
# 4999 : : // If our average Coin size is roughly 41 bytes, checking every 120,000 coins
# 5000 : : // means <5MB of memory imprecision.
# 5001 [ - + ]: 438 : if (coins_processed % 120000 == 0) {
# 5002 [ # # ]: 0 : if (ShutdownRequested()) {
# 5003 : 0 : return false;
# 5004 : 0 : }
# 5005 : :
# 5006 : 0 : const auto snapshot_cache_state = WITH_LOCK(::cs_main,
# 5007 : 0 : return snapshot_chainstate.GetCoinsCacheSizeState(&snapshot_chainstate.m_mempool));
# 5008 : :
# 5009 [ # # ]: 0 : if (snapshot_cache_state >=
# 5010 : 0 : CoinsCacheSizeState::CRITICAL) {
# 5011 : 0 : LogPrintf("[snapshot] flushing coins cache (%.2f MB)... ", /* Continued */
# 5012 : 0 : coins_cache.DynamicMemoryUsage() / (1000 * 1000));
# 5013 : 0 : flush_now = GetTimeMillis();
# 5014 : :
# 5015 : : // This is a hack - we don't know what the actual best block is, but that
# 5016 : : // doesn't matter for the purposes of flushing the cache here. We'll set this
# 5017 : : // to its correct value (`base_blockhash`) below after the coins are loaded.
# 5018 : 0 : coins_cache.SetBestBlock(GetRandHash());
# 5019 : :
# 5020 : 0 : coins_cache.Flush();
# 5021 : 0 : LogPrintf("done (%.2fms)\n", GetTimeMillis() - flush_now);
# 5022 : 0 : }
# 5023 : 0 : }
# 5024 : 438 : }
# 5025 : :
# 5026 : : // Important that we set this. This and the coins_cache accesses above are
# 5027 : : // sort of a layer violation, but either we reach into the innards of
# 5028 : : // CCoinsViewCache here or we have to invert some of the CChainState to
# 5029 : : // embed them in a snapshot-activation-specific CCoinsViewCache bulk load
# 5030 : : // method.
# 5031 : 4 : coins_cache.SetBestBlock(base_blockhash);
# 5032 : :
# 5033 : 3 : bool out_of_coins{false};
# 5034 : 3 : try {
# 5035 : 3 : coins_file >> outpoint;
# 5036 : 3 : } catch (const std::ios_base::failure&) {
# 5037 : : // We expect an exception since we should be out of coins.
# 5038 : 2 : out_of_coins = true;
# 5039 : 2 : }
# 5040 [ + + ]: 3 : if (!out_of_coins) {
# 5041 : 1 : LogPrintf("[snapshot] bad snapshot - coins left over after deserializing %d coins\n",
# 5042 : 1 : coins_count);
# 5043 : 1 : return false;
# 5044 : 1 : }
# 5045 : :
# 5046 : 2 : LogPrintf("[snapshot] loaded %d (%.2f MB) coins from snapshot %s\n",
# 5047 : 2 : coins_count,
# 5048 : 2 : coins_cache.DynamicMemoryUsage() / (1000 * 1000),
# 5049 : 2 : base_blockhash.ToString());
# 5050 : :
# 5051 : 2 : LogPrintf("[snapshot] flushing snapshot chainstate to disk\n");
# 5052 : : // No need to acquire cs_main since this chainstate isn't being used yet.
# 5053 : 2 : coins_cache.Flush(); // TODO: if #17487 is merged, add erase=false here for better performance.
# 5054 : :
# 5055 : 2 : assert(coins_cache.GetBestBlock() == base_blockhash);
# 5056 : :
# 5057 : 2 : CCoinsStats stats{CoinStatsHashType::HASH_SERIALIZED};
# 5058 : 219 : auto breakpoint_fnc = [] { /* TODO insert breakpoint here? */ };
# 5059 : :
# 5060 : : // As above, okay to immediately release cs_main here since no other context knows
# 5061 : : // about the snapshot_chainstate.
# 5062 : 2 : CCoinsViewDB* snapshot_coinsdb = WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB());
# 5063 : :
# 5064 [ - + ]: 2 : if (!GetUTXOStats(snapshot_coinsdb, WITH_LOCK(::cs_main, return std::ref(m_blockman)), stats, breakpoint_fnc)) {
# 5065 : 0 : LogPrintf("[snapshot] failed to generate coins stats\n");
# 5066 : 0 : return false;
# 5067 : 0 : }
# 5068 : :
# 5069 : : // Assert that the deserialized chainstate contents match the expected assumeutxo value.
# 5070 [ + + ]: 2 : if (AssumeutxoHash{stats.hashSerialized} != au_data.hash_serialized) {
# 5071 : 1 : LogPrintf("[snapshot] bad snapshot content hash: expected %s, got %s\n",
# 5072 : 1 : au_data.hash_serialized.ToString(), stats.hashSerialized.ToString());
# 5073 : 1 : return false;
# 5074 : 1 : }
# 5075 : :
# 5076 : 1 : snapshot_chainstate.m_chain.SetTip(snapshot_start_block);
# 5077 : :
# 5078 : : // The remainder of this function requires modifying data protected by cs_main.
# 5079 : 1 : LOCK(::cs_main);
# 5080 : :
# 5081 : : // Fake various pieces of CBlockIndex state:
# 5082 : : //
# 5083 : : // - nChainTx: so that we accurately report IBD-to-tip progress
# 5084 : : // - nTx: so that LoadBlockIndex() loads assumed-valid CBlockIndex entries
# 5085 : : // (among other things)
# 5086 : : // - nStatus & BLOCK_OPT_WITNESS: so that RewindBlockIndex() doesn't zealously
# 5087 : : // unwind the assumed-valid chain.
# 5088 : : //
# 5089 : 1 : CBlockIndex* index = nullptr;
# 5090 [ + + ]: 112 : for (int i = 0; i <= snapshot_chainstate.m_chain.Height(); ++i) {
# 5091 : 111 : index = snapshot_chainstate.m_chain[i];
# 5092 : :
# 5093 [ - + ]: 111 : if (!index->nTx) {
# 5094 : 0 : index->nTx = 1;
# 5095 : 0 : }
# 5096 [ + + ]: 111 : index->nChainTx = index->pprev ? index->pprev->nChainTx + index->nTx : 1;
# 5097 : :
# 5098 : : // We need to fake this flag so that CChainState::RewindBlockIndex()
# 5099 : : // won't try to rewind the entire assumed-valid chain on startup.
# 5100 [ + + ][ + - ]: 111 : if (index->pprev && ::IsWitnessEnabled(index->pprev, ::Params().GetConsensus())) {
# 5101 : 110 : index->nStatus |= BLOCK_OPT_WITNESS;
# 5102 : 110 : }
# 5103 : 111 : }
# 5104 : :
# 5105 : 1 : assert(index);
# 5106 : 1 : index->nChainTx = au_data.nChainTx;
# 5107 : 1 : snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block);
# 5108 : :
# 5109 : 1 : LogPrintf("[snapshot] validated snapshot (%.2f MB)\n",
# 5110 : 1 : coins_cache.DynamicMemoryUsage() / (1000 * 1000));
# 5111 : 1 : return true;
# 5112 : 1 : }
# 5113 : :
# 5114 : : CChainState& ChainstateManager::ActiveChainstate() const
# 5115 : 7420924 : {
# 5116 : 7420924 : LOCK(::cs_main);
# 5117 : 7420924 : assert(m_active_chainstate);
# 5118 : 7420924 : return *m_active_chainstate;
# 5119 : 7420924 : }
# 5120 : :
# 5121 : : bool ChainstateManager::IsSnapshotActive() const
# 5122 : 2 : {
# 5123 : 2 : LOCK(::cs_main);
# 5124 [ + + ][ + - ]: 2 : return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get();
# 5125 : 2 : }
# 5126 : :
# 5127 : : CChainState& ChainstateManager::ValidatedChainstate() const
# 5128 : 4 : {
# 5129 : 4 : LOCK(::cs_main);
# 5130 [ + + ][ - + ]: 4 : if (m_snapshot_chainstate && IsSnapshotValidated()) {
# 5131 : 0 : return *m_snapshot_chainstate.get();
# 5132 : 0 : }
# 5133 : 4 : assert(m_ibd_chainstate);
# 5134 : 4 : return *m_ibd_chainstate.get();
# 5135 : 4 : }
# 5136 : :
# 5137 : : bool ChainstateManager::IsBackgroundIBD(CChainState* chainstate) const
# 5138 : 5 : {
# 5139 : 5 : LOCK(::cs_main);
# 5140 [ + + ][ + + ]: 5 : return (m_snapshot_chainstate && chainstate == m_ibd_chainstate.get());
# 5141 : 5 : }
# 5142 : :
# 5143 : : void ChainstateManager::Unload()
# 5144 : 785 : {
# 5145 [ + + ]: 789 : for (CChainState* chainstate : this->GetAll()) {
# 5146 : 789 : chainstate->m_chain.SetTip(nullptr);
# 5147 : 789 : chainstate->UnloadBlockIndex();
# 5148 : 789 : }
# 5149 : :
# 5150 : 785 : m_blockman.Unload();
# 5151 : 785 : }
# 5152 : :
# 5153 : : void ChainstateManager::Reset()
# 5154 : 157 : {
# 5155 : 157 : LOCK(::cs_main);
# 5156 : 157 : m_ibd_chainstate.reset();
# 5157 : 157 : m_snapshot_chainstate.reset();
# 5158 : 157 : m_active_chainstate = nullptr;
# 5159 : 157 : m_snapshot_validated = false;
# 5160 : 157 : }
# 5161 : :
# 5162 : : void ChainstateManager::MaybeRebalanceCaches()
# 5163 : 9 : {
# 5164 [ + - ][ + + ]: 9 : if (m_ibd_chainstate && !m_snapshot_chainstate) {
# 5165 : 7 : LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
# 5166 : : // Allocate everything to the IBD chainstate.
# 5167 : 7 : m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
# 5168 : 7 : }
# 5169 [ + - ][ - + ]: 2 : else if (m_snapshot_chainstate && !m_ibd_chainstate) {
# 5170 : 0 : LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n");
# 5171 : : // Allocate everything to the snapshot chainstate.
# 5172 : 0 : m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
# 5173 : 0 : }
# 5174 [ + - ][ + - ]: 2 : else if (m_ibd_chainstate && m_snapshot_chainstate) {
# 5175 : : // If both chainstates exist, determine who needs more cache based on IBD status.
# 5176 : : //
# 5177 : : // Note: shrink caches first so that we don't inadvertently overwhelm available memory.
# 5178 [ + + ]: 2 : if (m_snapshot_chainstate->IsInitialBlockDownload()) {
# 5179 : 1 : m_ibd_chainstate->ResizeCoinsCaches(
# 5180 : 1 : m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
# 5181 : 1 : m_snapshot_chainstate->ResizeCoinsCaches(
# 5182 : 1 : m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
# 5183 : 1 : } else {
# 5184 : 1 : m_snapshot_chainstate->ResizeCoinsCaches(
# 5185 : 1 : m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
# 5186 : 1 : m_ibd_chainstate->ResizeCoinsCaches(
# 5187 : 1 : m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
# 5188 : 1 : }
# 5189 : 2 : }
# 5190 : 9 : }
|