Branch data Line data Source code
# 1 : : // Copyright (c) 2009-2010 Satoshi Nakamoto
# 2 : : // Copyright (c) 2009-2021 The Bitcoin Core developers
# 3 : : // Distributed under the MIT software license, see the accompanying
# 4 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
# 5 : :
# 6 : : #include <validation.h>
# 7 : :
# 8 : : #include <kernel/coinstats.h>
# 9 : : #include <kernel/mempool_persist.h>
# 10 : :
# 11 : : #include <arith_uint256.h>
# 12 : : #include <chain.h>
# 13 : : #include <chainparams.h>
# 14 : : #include <checkqueue.h>
# 15 : : #include <consensus/amount.h>
# 16 : : #include <consensus/consensus.h>
# 17 : : #include <consensus/merkle.h>
# 18 : : #include <consensus/tx_check.h>
# 19 : : #include <consensus/tx_verify.h>
# 20 : : #include <consensus/validation.h>
# 21 : : #include <cuckoocache.h>
# 22 : : #include <flatfile.h>
# 23 : : #include <fs.h>
# 24 : : #include <hash.h>
# 25 : : #include <logging.h>
# 26 : : #include <logging/timer.h>
# 27 : : #include <node/blockstorage.h>
# 28 : : #include <node/interface_ui.h>
# 29 : : #include <node/utxo_snapshot.h>
# 30 : : #include <policy/policy.h>
# 31 : : #include <policy/rbf.h>
# 32 : : #include <policy/settings.h>
# 33 : : #include <pow.h>
# 34 : : #include <primitives/block.h>
# 35 : : #include <primitives/transaction.h>
# 36 : : #include <random.h>
# 37 : : #include <reverse_iterator.h>
# 38 : : #include <script/script.h>
# 39 : : #include <script/sigcache.h>
# 40 : : #include <shutdown.h>
# 41 : : #include <signet.h>
# 42 : : #include <tinyformat.h>
# 43 : : #include <txdb.h>
# 44 : : #include <txmempool.h>
# 45 : : #include <uint256.h>
# 46 : : #include <undo.h>
# 47 : : #include <util/check.h> // For NDEBUG compile time check
# 48 : : #include <util/hasher.h>
# 49 : : #include <util/moneystr.h>
# 50 : : #include <util/rbf.h>
# 51 : : #include <util/strencodings.h>
# 52 : : #include <util/system.h>
# 53 : : #include <util/time.h>
# 54 : : #include <util/trace.h>
# 55 : : #include <util/translation.h>
# 56 : : #include <validationinterface.h>
# 57 : : #include <warnings.h>
# 58 : :
# 59 : : #include <algorithm>
# 60 : : #include <cassert>
# 61 : : #include <chrono>
# 62 : : #include <deque>
# 63 : : #include <numeric>
# 64 : : #include <optional>
# 65 : : #include <string>
# 66 : :
# 67 : : using kernel::CCoinsStats;
# 68 : : using kernel::CoinStatsHashType;
# 69 : : using kernel::ComputeUTXOStats;
# 70 : : using kernel::LoadMempool;
# 71 : :
# 72 : : using fsbridge::FopenFn;
# 73 : : using node::BlockManager;
# 74 : : using node::BlockMap;
# 75 : : using node::CBlockIndexHeightOnlyComparator;
# 76 : : using node::CBlockIndexWorkComparator;
# 77 : : using node::fImporting;
# 78 : : using node::fPruneMode;
# 79 : : using node::fReindex;
# 80 : : using node::ReadBlockFromDisk;
# 81 : : using node::SnapshotMetadata;
# 82 : : using node::UndoReadFromDisk;
# 83 : : using node::UnlinkPrunedFiles;
# 84 : :
# 85 : : #define MICRO 0.000001
# 86 : : #define MILLI 0.001
# 87 : :
# 88 : : /** Maximum kilobytes for transactions to store for processing during reorg */
# 89 : : static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
# 90 : : /** Time to wait between writing blocks/block index to disk. */
# 91 : : static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
# 92 : : /** Time to wait between flushing chainstate to disk. */
# 93 : : static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
# 94 : : /** Maximum age of our tip for us to be considered current for fee estimation */
# 95 : : static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
# 96 : : const std::vector<std::string> CHECKLEVEL_DOC {
# 97 : : "level 0 reads the blocks from disk",
# 98 : : "level 1 verifies block validity",
# 99 : : "level 2 verifies undo data",
# 100 : : "level 3 checks disconnection of tip blocks",
# 101 : : "level 4 tries to reconnect the blocks",
# 102 : : "each level includes the checks of the previous levels",
# 103 : : };
# 104 : : /** The number of blocks to keep below the deepest prune lock.
# 105 : : * There is nothing special about this number. It is higher than what we
# 106 : : * expect to see in regular mainnet reorgs, but not so high that it would
# 107 : : * noticeably interfere with the pruning mechanism.
# 108 : : * */
# 109 : : static constexpr int PRUNE_LOCK_BUFFER{10};
# 110 : :
# 111 : : /**
# 112 : : * Mutex to guard access to validation specific variables, such as reading
# 113 : : * or changing the chainstate.
# 114 : : *
# 115 : : * This may also need to be locked when updating the transaction pool, e.g. on
# 116 : : * AcceptToMemoryPool. See CTxMemPool::cs comment for details.
# 117 : : *
# 118 : : * The transaction pool has a separate lock to allow reading from it and the
# 119 : : * chainstate at the same time.
# 120 : : */
# 121 : : RecursiveMutex cs_main;
# 122 : :
# 123 : : GlobalMutex g_best_block_mutex;
# 124 : : std::condition_variable g_best_block_cv;
# 125 : : uint256 g_best_block;
# 126 : : bool g_parallel_script_checks{false};
# 127 : : bool fCheckBlockIndex = false;
# 128 : : bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
# 129 : : int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
# 130 : :
# 131 : : uint256 hashAssumeValid;
# 132 : : arith_uint256 nMinimumChainWork;
# 133 : :
# 134 : : const CBlockIndex* CChainState::FindForkInGlobalIndex(const CBlockLocator& locator) const
# 135 : 2260 : {
# 136 : 2260 : AssertLockHeld(cs_main);
# 137 : :
# 138 : : // Find the latest block common to locator and chain - we expect that
# 139 : : // locator.vHave is sorted descending by height.
# 140 [ + + ]: 2947 : for (const uint256& hash : locator.vHave) {
# 141 : 2947 : const CBlockIndex* pindex{m_blockman.LookupBlockIndex(hash)};
# 142 [ + + ]: 2947 : if (pindex) {
# 143 [ + + ]: 2369 : if (m_chain.Contains(pindex)) {
# 144 : 2245 : return pindex;
# 145 : 2245 : }
# 146 [ + + ]: 124 : if (pindex->GetAncestor(m_chain.Height()) == m_chain.Tip()) {
# 147 : 7 : return m_chain.Tip();
# 148 : 7 : }
# 149 : 124 : }
# 150 : 2947 : }
# 151 : 8 : return m_chain.Genesis();
# 152 : 2260 : }
# 153 : :
# 154 : : bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
# 155 : : const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
# 156 : : bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
# 157 : : std::vector<CScriptCheck>* pvChecks = nullptr)
# 158 : : EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 159 : :
# 160 : : bool CheckFinalTxAtTip(const CBlockIndex& active_chain_tip, const CTransaction& tx)
# 161 : 33170 : {
# 162 : 33170 : AssertLockHeld(cs_main);
# 163 : :
# 164 : : // CheckFinalTxAtTip() uses active_chain_tip.Height()+1 to evaluate
# 165 : : // nLockTime because when IsFinalTx() is called within
# 166 : : // AcceptBlock(), the height of the block *being*
# 167 : : // evaluated is what is used. Thus if we want to know if a
# 168 : : // transaction can be part of the *next* block, we need to call
# 169 : : // IsFinalTx() with one more than active_chain_tip.Height().
# 170 : 33170 : const int nBlockHeight = active_chain_tip.nHeight + 1;
# 171 : :
# 172 : : // BIP113 requires that time-locked transactions have nLockTime set to
# 173 : : // less than the median time of the previous block they're contained in.
# 174 : : // When the next block is created its previous block will be the current
# 175 : : // chain tip, so we use that to calculate the median time passed to
# 176 : : // IsFinalTx().
# 177 : 33170 : const int64_t nBlockTime{active_chain_tip.GetMedianTimePast()};
# 178 : :
# 179 : 33170 : return IsFinalTx(tx, nBlockHeight, nBlockTime);
# 180 : 33170 : }
# 181 : :
# 182 : : bool CheckSequenceLocksAtTip(CBlockIndex* tip,
# 183 : : const CCoinsView& coins_view,
# 184 : : const CTransaction& tx,
# 185 : : LockPoints* lp,
# 186 : : bool useExistingLockPoints)
# 187 : 29454 : {
# 188 : 29454 : assert(tip != nullptr);
# 189 : :
# 190 : 0 : CBlockIndex index;
# 191 : 29454 : index.pprev = tip;
# 192 : : // CheckSequenceLocksAtTip() uses active_chainstate.m_chain.Height()+1 to evaluate
# 193 : : // height based locks because when SequenceLocks() is called within
# 194 : : // ConnectBlock(), the height of the block *being*
# 195 : : // evaluated is what is used.
# 196 : : // Thus if we want to know if a transaction can be part of the
# 197 : : // *next* block, we need to use one more than active_chainstate.m_chain.Height()
# 198 : 29454 : index.nHeight = tip->nHeight + 1;
# 199 : :
# 200 : 29454 : std::pair<int, int64_t> lockPair;
# 201 [ + + ]: 29454 : if (useExistingLockPoints) {
# 202 : 801 : assert(lp);
# 203 : 0 : lockPair.first = lp->height;
# 204 : 801 : lockPair.second = lp->time;
# 205 : 801 : }
# 206 : 28653 : else {
# 207 : 28653 : std::vector<int> prevheights;
# 208 : 28653 : prevheights.resize(tx.vin.size());
# 209 [ + + ]: 89086 : for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
# 210 : 60433 : const CTxIn& txin = tx.vin[txinIndex];
# 211 : 60433 : Coin coin;
# 212 [ - + ]: 60433 : if (!coins_view.GetCoin(txin.prevout, coin)) {
# 213 : 0 : return error("%s: Missing input", __func__);
# 214 : 0 : }
# 215 [ + + ]: 60433 : if (coin.nHeight == MEMPOOL_HEIGHT) {
# 216 : : // Assume all mempool transaction confirm in the next block
# 217 : 7964 : prevheights[txinIndex] = tip->nHeight + 1;
# 218 : 52469 : } else {
# 219 : 52469 : prevheights[txinIndex] = coin.nHeight;
# 220 : 52469 : }
# 221 : 60433 : }
# 222 : 28653 : lockPair = CalculateSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, prevheights, index);
# 223 [ + + ]: 28653 : if (lp) {
# 224 : 28637 : lp->height = lockPair.first;
# 225 : 28637 : lp->time = lockPair.second;
# 226 : : // Also store the hash of the block with the highest height of
# 227 : : // all the blocks which have sequence locked prevouts.
# 228 : : // This hash needs to still be on the chain
# 229 : : // for these LockPoint calculations to be valid
# 230 : : // Note: It is impossible to correctly calculate a maxInputBlock
# 231 : : // if any of the sequence locked inputs depend on unconfirmed txs,
# 232 : : // except in the special case where the relative lock time/height
# 233 : : // is 0, which is equivalent to no sequence lock. Since we assume
# 234 : : // input height of tip+1 for mempool txs and test the resulting
# 235 : : // lockPair from CalculateSequenceLocks against tip+1. We know
# 236 : : // EvaluateSequenceLocks will fail if there was a non-zero sequence
# 237 : : // lock on a mempool input, so we can use the return value of
# 238 : : // CheckSequenceLocksAtTip to indicate the LockPoints validity
# 239 : 28637 : int maxInputHeight = 0;
# 240 [ + + ]: 60417 : for (const int height : prevheights) {
# 241 : : // Can ignore mempool inputs since we'll fail if they had non-zero locks
# 242 [ + + ]: 60417 : if (height != tip->nHeight+1) {
# 243 : 58005 : maxInputHeight = std::max(maxInputHeight, height);
# 244 : 58005 : }
# 245 : 60417 : }
# 246 : : // tip->GetAncestor(maxInputHeight) should never return a nullptr
# 247 : : // because maxInputHeight is always less than the tip height.
# 248 : : // It would, however, be a bad bug to continue execution, since a
# 249 : : // LockPoints object with the maxInputBlock member set to nullptr
# 250 : : // signifies no relative lock time.
# 251 : 28637 : lp->maxInputBlock = Assert(tip->GetAncestor(maxInputHeight));
# 252 : 28637 : }
# 253 : 28653 : }
# 254 : 29454 : return EvaluateSequenceLocks(index, lockPair);
# 255 : 29454 : }
# 256 : :
# 257 : : // Returns the script flags which should be checked for a given block
# 258 : : static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const ChainstateManager& chainman);
# 259 : :
# 260 : : static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache)
# 261 : : EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs)
# 262 : 21991 : {
# 263 : 21991 : AssertLockHeld(::cs_main);
# 264 : 21991 : AssertLockHeld(pool.cs);
# 265 : 21991 : int expired = pool.Expire(GetTime<std::chrono::seconds>() - pool.m_expiry);
# 266 [ + + ]: 21991 : if (expired != 0) {
# 267 [ + - ]: 2 : LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
# 268 : 2 : }
# 269 : :
# 270 : 21991 : std::vector<COutPoint> vNoSpendsRemaining;
# 271 : 21991 : pool.TrimToSize(pool.m_max_size_bytes, &vNoSpendsRemaining);
# 272 [ + + ]: 21991 : for (const COutPoint& removed : vNoSpendsRemaining)
# 273 : 4 : coins_cache.Uncache(removed);
# 274 : 21991 : }
# 275 : :
# 276 : : static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
# 277 : 20952 : {
# 278 : 20952 : AssertLockHeld(cs_main);
# 279 [ + + ]: 20952 : if (active_chainstate.IsInitialBlockDownload())
# 280 : 69 : return false;
# 281 [ + + ]: 20883 : if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
# 282 : 114 : return false;
# 283 [ - + ]: 20769 : if (active_chainstate.m_chain.Height() < active_chainstate.m_chainman.m_best_header->nHeight - 1) {
# 284 : 0 : return false;
# 285 : 0 : }
# 286 : 20769 : return true;
# 287 : 20769 : }
# 288 : :
# 289 : : void CChainState::MaybeUpdateMempoolForReorg(
# 290 : : DisconnectedBlockTransactions& disconnectpool,
# 291 : : bool fAddToMempool)
# 292 : 1032 : {
# 293 [ - + ]: 1032 : if (!m_mempool) return;
# 294 : :
# 295 : 1032 : AssertLockHeld(cs_main);
# 296 : 1032 : AssertLockHeld(m_mempool->cs);
# 297 : 1032 : std::vector<uint256> vHashUpdate;
# 298 : : // disconnectpool's insertion_order index sorts the entries from
# 299 : : // oldest to newest, but the oldest entry will be the last tx from the
# 300 : : // latest mined block that was disconnected.
# 301 : : // Iterate disconnectpool in reverse, so that we add transactions
# 302 : : // back to the mempool starting with the earliest transaction that had
# 303 : : // been previously seen in a block.
# 304 : 1032 : auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
# 305 [ + + ]: 13483 : while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
# 306 : : // ignore validation errors in resurrected transactions
# 307 [ + + ][ + + ]: 12451 : if (!fAddToMempool || (*it)->IsCoinBase() ||
# [ + + ]
# 308 [ + + ]: 12451 : AcceptToMemoryPool(*this, *it, GetTime(),
# 309 : 3986 : /*bypass_limits=*/true, /*test_accept=*/false).m_result_type !=
# 310 : 12159 : MempoolAcceptResult::ResultType::VALID) {
# 311 : : // If the transaction doesn't make it in to the mempool, remove any
# 312 : : // transactions that depend on it (which would now be orphans).
# 313 : 12159 : m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG);
# 314 [ + - ]: 12159 : } else if (m_mempool->exists(GenTxid::Txid((*it)->GetHash()))) {
# 315 : 292 : vHashUpdate.push_back((*it)->GetHash());
# 316 : 292 : }
# 317 : 12451 : ++it;
# 318 : 12451 : }
# 319 : 1032 : disconnectpool.queuedTx.clear();
# 320 : : // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
# 321 : : // no in-mempool children, which is generally not true when adding
# 322 : : // previously-confirmed transactions back to the mempool.
# 323 : : // UpdateTransactionsFromBlock finds descendants of any transactions in
# 324 : : // the disconnectpool that were added back and cleans up the mempool state.
# 325 : 1032 : m_mempool->UpdateTransactionsFromBlock(vHashUpdate);
# 326 : :
# 327 : : // Predicate to use for filtering transactions in removeForReorg.
# 328 : : // Checks whether the transaction is still final and, if it spends a coinbase output, mature.
# 329 : : // Also updates valid entries' cached LockPoints if needed.
# 330 : : // If false, the tx is still valid and its lockpoints are updated.
# 331 : : // If true, the tx would be invalid in the next block; remove this entry and all of its descendants.
# 332 : 1032 : const auto filter_final_and_mature = [this](CTxMemPool::txiter it)
# 333 : 1032 : EXCLUSIVE_LOCKS_REQUIRED(m_mempool->cs, ::cs_main) {
# 334 : 812 : AssertLockHeld(m_mempool->cs);
# 335 : 812 : AssertLockHeld(::cs_main);
# 336 : 812 : const CTransaction& tx = it->GetTx();
# 337 : :
# 338 : : // The transaction must be final.
# 339 [ + + ]: 812 : if (!CheckFinalTxAtTip(*Assert(m_chain.Tip()), tx)) return true;
# 340 : 810 : LockPoints lp = it->GetLockPoints();
# 341 : 810 : const bool validLP{TestLockPointValidity(m_chain, lp)};
# 342 : 810 : CCoinsViewMemPool view_mempool(&CoinsTip(), *m_mempool);
# 343 : : // CheckSequenceLocksAtTip checks if the transaction will be final in the next block to be
# 344 : : // created on top of the new chain. We use useExistingLockPoints=false so that, instead of
# 345 : : // using the information in lp (which might now refer to a block that no longer exists in
# 346 : : // the chain), it will update lp to contain LockPoints relevant to the new chain.
# 347 [ + + ]: 810 : if (!CheckSequenceLocksAtTip(m_chain.Tip(), view_mempool, tx, &lp, validLP)) {
# 348 : : // If CheckSequenceLocksAtTip fails, remove the tx and don't depend on the LockPoints.
# 349 : 7 : return true;
# 350 [ + + ]: 803 : } else if (!validLP) {
# 351 : : // If CheckSequenceLocksAtTip succeeded, it also updated the LockPoints.
# 352 : : // Now update the mempool entry lockpoints as well.
# 353 : 7 : m_mempool->mapTx.modify(it, [&lp](CTxMemPoolEntry& e) { e.UpdateLockPoints(lp); });
# 354 : 7 : }
# 355 : :
# 356 : : // If the transaction spends any coinbase outputs, it must be mature.
# 357 [ + + ]: 803 : if (it->GetSpendsCoinbase()) {
# 358 [ + + ]: 196 : for (const CTxIn& txin : tx.vin) {
# 359 : 196 : auto it2 = m_mempool->mapTx.find(txin.prevout.hash);
# 360 [ - + ]: 196 : if (it2 != m_mempool->mapTx.end())
# 361 : 0 : continue;
# 362 : 196 : const Coin& coin{CoinsTip().AccessCoin(txin.prevout)};
# 363 : 196 : assert(!coin.IsSpent());
# 364 : 0 : const auto mempool_spend_height{m_chain.Tip()->nHeight + 1};
# 365 [ + - ][ + + ]: 196 : if (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY) {
# 366 : 7 : return true;
# 367 : 7 : }
# 368 : 196 : }
# 369 : 196 : }
# 370 : : // Transaction is still valid and cached LockPoints are updated.
# 371 : 796 : return false;
# 372 : 803 : };
# 373 : :
# 374 : : // We also need to remove any now-immature transactions
# 375 : 1032 : m_mempool->removeForReorg(m_chain, filter_final_and_mature);
# 376 : : // Re-limit mempool size, in case we added any transactions
# 377 : 1032 : LimitMempoolSize(*m_mempool, this->CoinsTip());
# 378 : 1032 : }
# 379 : :
# 380 : : /**
# 381 : : * Checks to avoid mempool polluting consensus critical paths since cached
# 382 : : * signature and script validity results will be reused if we validate this
# 383 : : * transaction again during block validation.
# 384 : : * */
# 385 : : static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state,
# 386 : : const CCoinsViewCache& view, const CTxMemPool& pool,
# 387 : : unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip)
# 388 : : EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs)
# 389 : 24890 : {
# 390 : 24890 : AssertLockHeld(cs_main);
# 391 : 24890 : AssertLockHeld(pool.cs);
# 392 : :
# 393 : 24890 : assert(!tx.IsCoinBase());
# 394 [ + + ]: 51155 : for (const CTxIn& txin : tx.vin) {
# 395 : 51155 : const Coin& coin = view.AccessCoin(txin.prevout);
# 396 : :
# 397 : : // This coin was checked in PreChecks and MemPoolAccept
# 398 : : // has been holding cs_main since then.
# 399 : 51155 : Assume(!coin.IsSpent());
# 400 [ - + ]: 51155 : if (coin.IsSpent()) return false;
# 401 : :
# 402 : : // If the Coin is available, there are 2 possibilities:
# 403 : : // it is available in our current ChainstateActive UTXO set,
# 404 : : // or it's a UTXO provided by a transaction in our mempool.
# 405 : : // Ensure the scriptPubKeys in Coins from CoinsView are correct.
# 406 : 51155 : const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
# 407 [ + + ]: 51155 : if (txFrom) {
# 408 : 7073 : assert(txFrom->GetHash() == txin.prevout.hash);
# 409 : 0 : assert(txFrom->vout.size() > txin.prevout.n);
# 410 : 0 : assert(txFrom->vout[txin.prevout.n] == coin.out);
# 411 : 44082 : } else {
# 412 : 44082 : const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
# 413 : 44082 : assert(!coinFromUTXOSet.IsSpent());
# 414 : 0 : assert(coinFromUTXOSet.out == coin.out);
# 415 : 44082 : }
# 416 : 51155 : }
# 417 : :
# 418 : : // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
# 419 : 24890 : return CheckInputScripts(tx, state, view, flags, /* cacheSigStore= */ true, /* cacheFullScriptStore= */ true, txdata);
# 420 : 24890 : }
# 421 : :
# 422 : : namespace {
# 423 : :
# 424 : : class MemPoolAccept
# 425 : : {
# 426 : : public:
# 427 : : explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate),
# 428 : : m_limit_ancestors(m_pool.m_limits.ancestor_count),
# 429 : : m_limit_ancestor_size(m_pool.m_limits.ancestor_size_vbytes),
# 430 : : m_limit_descendants(m_pool.m_limits.descendant_count),
# 431 : 33337 : m_limit_descendant_size(m_pool.m_limits.descendant_size_vbytes) {
# 432 : 33337 : }
# 433 : :
# 434 : : // We put the arguments we're handed into a struct, so we can pass them
# 435 : : // around easier.
# 436 : : struct ATMPArgs {
# 437 : : const CChainParams& m_chainparams;
# 438 : : const int64_t m_accept_time;
# 439 : : const bool m_bypass_limits;
# 440 : : /*
# 441 : : * Return any outpoints which were not previously present in the coins
# 442 : : * cache, but were added as a result of validating the tx for mempool
# 443 : : * acceptance. This allows the caller to optionally remove the cache
# 444 : : * additions if the associated transaction ends up being rejected by
# 445 : : * the mempool.
# 446 : : */
# 447 : : std::vector<COutPoint>& m_coins_to_uncache;
# 448 : : const bool m_test_accept;
# 449 : : /** Whether we allow transactions to replace mempool transactions by BIP125 rules. If false,
# 450 : : * any transaction spending the same inputs as a transaction in the mempool is considered
# 451 : : * a conflict. */
# 452 : : const bool m_allow_replacement;
# 453 : : /** When true, the mempool will not be trimmed when individual transactions are submitted in
# 454 : : * Finalize(). Instead, limits should be enforced at the end to ensure the package is not
# 455 : : * partially submitted.
# 456 : : */
# 457 : : const bool m_package_submission;
# 458 : : /** When true, use package feerates instead of individual transaction feerates for fee-based
# 459 : : * policies such as mempool min fee and min relay fee.
# 460 : : */
# 461 : : const bool m_package_feerates;
# 462 : :
# 463 : : /** Parameters for single transaction mempool validation. */
# 464 : : static ATMPArgs SingleAccept(const CChainParams& chainparams, int64_t accept_time,
# 465 : : bool bypass_limits, std::vector<COutPoint>& coins_to_uncache,
# 466 : 33227 : bool test_accept) {
# 467 : 33227 : return ATMPArgs{/* m_chainparams */ chainparams,
# 468 : 33227 : /* m_accept_time */ accept_time,
# 469 : 33227 : /* m_bypass_limits */ bypass_limits,
# 470 : 33227 : /* m_coins_to_uncache */ coins_to_uncache,
# 471 : 33227 : /* m_test_accept */ test_accept,
# 472 : 33227 : /* m_allow_replacement */ true,
# 473 : 33227 : /* m_package_submission */ false,
# 474 : 33227 : /* m_package_feerates */ false,
# 475 : 33227 : };
# 476 : 33227 : }
# 477 : :
# 478 : : /** Parameters for test package mempool validation through testmempoolaccept. */
# 479 : : static ATMPArgs PackageTestAccept(const CChainParams& chainparams, int64_t accept_time,
# 480 : 72 : std::vector<COutPoint>& coins_to_uncache) {
# 481 : 72 : return ATMPArgs{/* m_chainparams */ chainparams,
# 482 : 72 : /* m_accept_time */ accept_time,
# 483 : 72 : /* m_bypass_limits */ false,
# 484 : 72 : /* m_coins_to_uncache */ coins_to_uncache,
# 485 : 72 : /* m_test_accept */ true,
# 486 : 72 : /* m_allow_replacement */ false,
# 487 : 72 : /* m_package_submission */ false, // not submitting to mempool
# 488 : 72 : /* m_package_feerates */ false,
# 489 : 72 : };
# 490 : 72 : }
# 491 : :
# 492 : : /** Parameters for child-with-unconfirmed-parents package validation. */
# 493 : : static ATMPArgs PackageChildWithParents(const CChainParams& chainparams, int64_t accept_time,
# 494 : 38 : std::vector<COutPoint>& coins_to_uncache) {
# 495 : 38 : return ATMPArgs{/* m_chainparams */ chainparams,
# 496 : 38 : /* m_accept_time */ accept_time,
# 497 : 38 : /* m_bypass_limits */ false,
# 498 : 38 : /* m_coins_to_uncache */ coins_to_uncache,
# 499 : 38 : /* m_test_accept */ false,
# 500 : 38 : /* m_allow_replacement */ false,
# 501 : 38 : /* m_package_submission */ true,
# 502 : 38 : /* m_package_feerates */ true,
# 503 : 38 : };
# 504 : 38 : }
# 505 : :
# 506 : : /** Parameters for a single transaction within a package. */
# 507 : 31 : static ATMPArgs SingleInPackageAccept(const ATMPArgs& package_args) {
# 508 : 31 : return ATMPArgs{/* m_chainparams */ package_args.m_chainparams,
# 509 : 31 : /* m_accept_time */ package_args.m_accept_time,
# 510 : 31 : /* m_bypass_limits */ false,
# 511 : 31 : /* m_coins_to_uncache */ package_args.m_coins_to_uncache,
# 512 : 31 : /* m_test_accept */ package_args.m_test_accept,
# 513 : 31 : /* m_allow_replacement */ true,
# 514 : 31 : /* m_package_submission */ false,
# 515 : 31 : /* m_package_feerates */ false, // only 1 transaction
# 516 : 31 : };
# 517 : 31 : }
# 518 : :
# 519 : : private:
# 520 : : // Private ctor to avoid exposing details to clients and allowing the possibility of
# 521 : : // mixing up the order of the arguments. Use static functions above instead.
# 522 : : ATMPArgs(const CChainParams& chainparams,
# 523 : : int64_t accept_time,
# 524 : : bool bypass_limits,
# 525 : : std::vector<COutPoint>& coins_to_uncache,
# 526 : : bool test_accept,
# 527 : : bool allow_replacement,
# 528 : : bool package_submission,
# 529 : : bool package_feerates)
# 530 : : : m_chainparams{chainparams},
# 531 : : m_accept_time{accept_time},
# 532 : : m_bypass_limits{bypass_limits},
# 533 : : m_coins_to_uncache{coins_to_uncache},
# 534 : : m_test_accept{test_accept},
# 535 : : m_allow_replacement{allow_replacement},
# 536 : : m_package_submission{package_submission},
# 537 : : m_package_feerates{package_feerates}
# 538 : 33368 : {
# 539 : 33368 : }
# 540 : : };
# 541 : :
# 542 : : // Single transaction acceptance
# 543 : : MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 544 : :
# 545 : : /**
# 546 : : * Multiple transaction acceptance. Transactions may or may not be interdependent, but must not
# 547 : : * conflict with each other, and the transactions cannot already be in the mempool. Parents must
# 548 : : * come before children if any dependencies exist.
# 549 : : */
# 550 : : PackageMempoolAcceptResult AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 551 : :
# 552 : : /**
# 553 : : * Package (more specific than just multiple transactions) acceptance. Package must be a child
# 554 : : * with all of its unconfirmed parents, and topologically sorted.
# 555 : : */
# 556 : : PackageMempoolAcceptResult AcceptPackage(const Package& package, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 557 : :
# 558 : : private:
# 559 : : // All the intermediate state that gets passed between the various levels
# 560 : : // of checking a given transaction.
# 561 : : struct Workspace {
# 562 : 33953 : explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
# 563 : : /** Txids of mempool transactions that this transaction directly conflicts with. */
# 564 : : std::set<uint256> m_conflicts;
# 565 : : /** Iterators to mempool entries that this transaction directly conflicts with. */
# 566 : : CTxMemPool::setEntries m_iters_conflicting;
# 567 : : /** Iterators to all mempool entries that would be replaced by this transaction, including
# 568 : : * those it directly conflicts with and their descendants. */
# 569 : : CTxMemPool::setEntries m_all_conflicting;
# 570 : : /** All mempool ancestors of this transaction. */
# 571 : : CTxMemPool::setEntries m_ancestors;
# 572 : : /** Mempool entry constructed for this transaction. Constructed in PreChecks() but not
# 573 : : * inserted into the mempool until Finalize(). */
# 574 : : std::unique_ptr<CTxMemPoolEntry> m_entry;
# 575 : : /** Pointers to the transactions that have been removed from the mempool and replaced by
# 576 : : * this transaction, used to return to the MemPoolAccept caller. Only populated if
# 577 : : * validation is successful and the original transactions are removed. */
# 578 : : std::list<CTransactionRef> m_replaced_transactions;
# 579 : :
# 580 : : /** Virtual size of the transaction as used by the mempool, calculated using serialized size
# 581 : : * of the transaction and sigops. */
# 582 : : int64_t m_vsize;
# 583 : : /** Fees paid by this transaction: total input amounts subtracted by total output amounts. */
# 584 : : CAmount m_base_fees;
# 585 : : /** Base fees + any fee delta set by the user with prioritisetransaction. */
# 586 : : CAmount m_modified_fees;
# 587 : : /** Total modified fees of all transactions being replaced. */
# 588 : : CAmount m_conflicting_fees{0};
# 589 : : /** Total virtual size of all transactions being replaced. */
# 590 : : size_t m_conflicting_size{0};
# 591 : :
# 592 : : const CTransactionRef& m_ptx;
# 593 : : /** Txid. */
# 594 : : const uint256& m_hash;
# 595 : : TxValidationState m_state;
# 596 : : /** A temporary cache containing serialized transaction data for signature verification.
# 597 : : * Reused across PolicyScriptChecks and ConsensusScriptChecks. */
# 598 : : PrecomputedTransactionData m_precomputed_txdata;
# 599 : : };
# 600 : :
# 601 : : // Run the policy checks on a given transaction, excluding any script checks.
# 602 : : // Looks up inputs, calculates feerate, considers replacement, evaluates
# 603 : : // package limits, etc. As this function can be invoked for "free" by a peer,
# 604 : : // only tests that are fast should be done here (to avoid CPU DoS).
# 605 : : bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 606 : :
# 607 : : // Run checks for mempool replace-by-fee.
# 608 : : bool ReplacementChecks(Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 609 : :
# 610 : : // Enforce package mempool ancestor/descendant limits (distinct from individual
# 611 : : // ancestor/descendant limits done in PreChecks).
# 612 : : bool PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
# 613 : : PackageValidationState& package_state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 614 : :
# 615 : : // Run the script checks using our policy flags. As this can be slow, we should
# 616 : : // only invoke this on transactions that have otherwise passed policy checks.
# 617 : : bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 618 : :
# 619 : : // Re-run the script checks, using consensus flags, and try to cache the
# 620 : : // result in the scriptcache. This should be done after
# 621 : : // PolicyScriptChecks(). This requires that all inputs either be in our
# 622 : : // utxo set or in the mempool.
# 623 : : bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 624 : :
# 625 : : // Try to add the transaction to the mempool, removing any conflicts first.
# 626 : : // Returns true if the transaction is in the mempool after any size
# 627 : : // limiting is performed, false otherwise.
# 628 : : bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 629 : :
# 630 : : // Submit all transactions to the mempool and call ConsensusScriptChecks to add to the script
# 631 : : // cache - should only be called after successful validation of all transactions in the package.
# 632 : : // The package may end up partially-submitted after size limiting; returns true if all
# 633 : : // transactions are successfully added to the mempool, false otherwise.
# 634 : : bool SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state,
# 635 : : std::map<const uint256, const MempoolAcceptResult>& results)
# 636 : : EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 637 : :
# 638 : : // Compare a package's feerate against minimum allowed.
# 639 : : bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_pool.cs)
# 640 : 27550 : {
# 641 : 27550 : AssertLockHeld(::cs_main);
# 642 : 27550 : AssertLockHeld(m_pool.cs);
# 643 : 27550 : CAmount mempoolRejectFee = m_pool.GetMinFee().GetFee(package_size);
# 644 [ + + ][ + + ]: 27550 : if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
# 645 : 1 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
# 646 : 1 : }
# 647 : :
# 648 [ + + ]: 27549 : if (package_fee < m_pool.m_min_relay_feerate.GetFee(package_size)) {
# 649 : 46 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met",
# 650 : 46 : strprintf("%d < %d", package_fee, m_pool.m_min_relay_feerate.GetFee(package_size)));
# 651 : 46 : }
# 652 : 27503 : return true;
# 653 : 27549 : }
# 654 : :
# 655 : : private:
# 656 : : CTxMemPool& m_pool;
# 657 : : CCoinsViewCache m_view;
# 658 : : CCoinsViewMemPool m_viewmempool;
# 659 : : CCoinsView m_dummy;
# 660 : :
# 661 : : CChainState& m_active_chainstate;
# 662 : :
# 663 : : // The package limits in effect at the time of invocation.
# 664 : : const size_t m_limit_ancestors;
# 665 : : const size_t m_limit_ancestor_size;
# 666 : : // These may be modified while evaluating a transaction (eg to account for
# 667 : : // in-mempool conflicts; see below).
# 668 : : size_t m_limit_descendants;
# 669 : : size_t m_limit_descendant_size;
# 670 : :
# 671 : : /** Whether the transaction(s) would replace any mempool transactions. If so, RBF rules apply. */
# 672 : : bool m_rbf{false};
# 673 : : };
# 674 : :
# 675 : : bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
# 676 : 33925 : {
# 677 : 33925 : AssertLockHeld(cs_main);
# 678 : 33925 : AssertLockHeld(m_pool.cs);
# 679 : 33925 : const CTransactionRef& ptx = ws.m_ptx;
# 680 : 33925 : const CTransaction& tx = *ws.m_ptx;
# 681 : 33925 : const uint256& hash = ws.m_hash;
# 682 : :
# 683 : : // Copy/alias what we need out of args
# 684 : 33925 : const int64_t nAcceptTime = args.m_accept_time;
# 685 : 33925 : const bool bypass_limits = args.m_bypass_limits;
# 686 : 33925 : std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
# 687 : :
# 688 : : // Alias what we need out of ws
# 689 : 33925 : TxValidationState& state = ws.m_state;
# 690 : 33925 : std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
# 691 : :
# 692 [ + + ]: 33925 : if (!CheckTransaction(tx, state)) {
# 693 : 14 : return false; // state filled in by CheckTransaction
# 694 : 14 : }
# 695 : :
# 696 : : // Coinbase is only valid in a block, not as a loose transaction
# 697 [ + + ]: 33911 : if (tx.IsCoinBase())
# 698 : 3 : return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
# 699 : :
# 700 : : // Rather not work on nonstandard transactions (unless -testnet/-regtest)
# 701 : 33908 : std::string reason;
# 702 [ + + ][ + + ]: 33908 : if (m_pool.m_require_standard && !IsStandardTx(tx, m_pool.m_max_datacarrier_bytes, m_pool.m_permit_bare_multisig, m_pool.m_dust_relay_feerate, reason)) {
# 703 : 1554 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
# 704 : 1554 : }
# 705 : :
# 706 : : // Do not work on transactions that are too small.
# 707 : : // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
# 708 : : // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
# 709 : : // 64-byte transactions.
# 710 [ + + ]: 32354 : if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
# 711 : 6 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
# 712 : :
# 713 : : // Only accept nLockTime-using transactions that can be mined in the next
# 714 : : // block; we don't want our mempool filled up with transactions that can't
# 715 : : // be mined yet.
# 716 [ + + ]: 32348 : if (!CheckFinalTxAtTip(*Assert(m_active_chainstate.m_chain.Tip()), tx)) {
# 717 : 68 : return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
# 718 : 68 : }
# 719 : :
# 720 [ + + ]: 32280 : if (m_pool.exists(GenTxid::Wtxid(tx.GetWitnessHash()))) {
# 721 : : // Exact transaction already exists in the mempool.
# 722 : 8 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
# 723 [ + + ]: 32272 : } else if (m_pool.exists(GenTxid::Txid(tx.GetHash()))) {
# 724 : : // Transaction with the same non-witness data but different witness (same txid, different
# 725 : : // wtxid) already exists in the mempool.
# 726 : 1 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-same-nonwitness-data-in-mempool");
# 727 : 1 : }
# 728 : :
# 729 : : // Check for conflicts with in-memory transactions
# 730 [ + + ]: 32271 : for (const CTxIn &txin : tx.vin)
# 731 : 67399 : {
# 732 : 67399 : const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
# 733 [ + + ]: 67399 : if (ptxConflicting) {
# 734 [ + + ]: 1492 : if (!args.m_allow_replacement) {
# 735 : : // Transaction conflicts with a mempool tx, but we're not allowing replacements.
# 736 : 1 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "bip125-replacement-disallowed");
# 737 : 1 : }
# 738 [ + + ]: 1491 : if (!ws.m_conflicts.count(ptxConflicting->GetHash()))
# 739 : 1479 : {
# 740 : : // Transactions that don't explicitly signal replaceability are
# 741 : : // *not* replaceable with the current logic, even if one of their
# 742 : : // unconfirmed ancestors signals replaceability. This diverges
# 743 : : // from BIP125's inherited signaling description (see CVE-2021-31876).
# 744 : : // Applications relying on first-seen mempool behavior should
# 745 : : // check all unconfirmed ancestors; otherwise an opt-in ancestor
# 746 : : // might be replaced, causing removal of this descendant.
# 747 : : //
# 748 : : // If replaceability signaling is ignored due to node setting,
# 749 : : // replacement is always allowed.
# 750 [ + + ][ + + ]: 1479 : if (!m_pool.m_full_rbf && !SignalsOptInRBF(*ptxConflicting)) {
# 751 : 8 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
# 752 : 8 : }
# 753 : :
# 754 : 1471 : ws.m_conflicts.insert(ptxConflicting->GetHash());
# 755 : 1471 : }
# 756 : 1491 : }
# 757 : 67399 : }
# 758 : :
# 759 : 32262 : LockPoints lp;
# 760 : 32262 : m_view.SetBackend(m_viewmempool);
# 761 : :
# 762 : 32262 : const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip();
# 763 : : // do all inputs exist?
# 764 [ + + ]: 64047 : for (const CTxIn& txin : tx.vin) {
# 765 [ + + ]: 64047 : if (!coins_cache.HaveCoinInCache(txin.prevout)) {
# 766 : 15692 : coins_to_uncache.push_back(txin.prevout);
# 767 : 15692 : }
# 768 : :
# 769 : : // Note: this call may add txin.prevout to the coins cache
# 770 : : // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
# 771 : : // later (via coins_to_uncache) if this tx turns out to be invalid.
# 772 [ + + ]: 64047 : if (!m_view.HaveCoin(txin.prevout)) {
# 773 : : // Are inputs missing because we already have the tx?
# 774 [ + + ]: 7341 : for (size_t out = 0; out < tx.vout.size(); out++) {
# 775 : : // Optimistically just do efficient check of cache for outputs
# 776 [ + + ]: 3711 : if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
# 777 : 4 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
# 778 : 4 : }
# 779 : 3711 : }
# 780 : : // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
# 781 : 3630 : return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
# 782 : 3634 : }
# 783 : 64047 : }
# 784 : :
# 785 : : // This is const, but calls into the back end CoinsViews. The CCoinsViewDB at the bottom of the
# 786 : : // hierarchy brings the best block into scope. See CCoinsViewDB::GetBestBlock().
# 787 : 28628 : m_view.GetBestBlock();
# 788 : :
# 789 : : // we have all inputs cached now, so switch back to dummy (to protect
# 790 : : // against bugs where we pull more inputs from disk that miss being added
# 791 : : // to coins_to_uncache)
# 792 : 28628 : m_view.SetBackend(m_dummy);
# 793 : :
# 794 : 28628 : assert(m_active_chainstate.m_blockman.LookupBlockIndex(m_view.GetBestBlock()) == m_active_chainstate.m_chain.Tip());
# 795 : :
# 796 : : // Only accept BIP68 sequence locked transactions that can be mined in the next
# 797 : : // block; we don't want our mempool filled up with transactions that can't
# 798 : : // be mined yet.
# 799 : : // Pass in m_view which has all of the relevant inputs cached. Note that, since m_view's
# 800 : : // backend was removed, it no longer pulls coins from the mempool.
# 801 [ + + ]: 28628 : if (!CheckSequenceLocksAtTip(m_active_chainstate.m_chain.Tip(), m_view, tx, &lp)) {
# 802 : 369 : return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
# 803 : 369 : }
# 804 : :
# 805 : : // The mempool holds txs for the next block, so pass height+1 to CheckTxInputs
# 806 [ + + ]: 28259 : if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_chain.Height() + 1, ws.m_base_fees)) {
# 807 : 3 : return false; // state filled in by CheckTxInputs
# 808 : 3 : }
# 809 : :
# 810 [ + + ][ + + ]: 28256 : if (m_pool.m_require_standard && !AreInputsStandard(tx, m_view)) {
# 811 : 206 : return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
# 812 : 206 : }
# 813 : :
# 814 : : // Check for non-standard witnesses.
# 815 [ + + ][ + + ]: 28050 : if (tx.HasWitness() && m_pool.m_require_standard && !IsWitnessStandard(tx, m_view)) {
# [ + + ]
# 816 : 155 : return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
# 817 : 155 : }
# 818 : :
# 819 : 27895 : int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
# 820 : :
# 821 : : // ws.m_modified_fees includes any fee deltas from PrioritiseTransaction
# 822 : 27895 : ws.m_modified_fees = ws.m_base_fees;
# 823 : 27895 : m_pool.ApplyDelta(hash, ws.m_modified_fees);
# 824 : :
# 825 : : // Keep track of transactions that spend a coinbase, which we re-scan
# 826 : : // during reorgs to ensure COINBASE_MATURITY is still met.
# 827 : 27895 : bool fSpendsCoinbase = false;
# 828 [ + + ]: 53127 : for (const CTxIn &txin : tx.vin) {
# 829 : 53127 : const Coin &coin = m_view.AccessCoin(txin.prevout);
# 830 [ + + ]: 53127 : if (coin.IsCoinBase()) {
# 831 : 3536 : fSpendsCoinbase = true;
# 832 : 3536 : break;
# 833 : 3536 : }
# 834 : 53127 : }
# 835 : :
# 836 : 27895 : entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(),
# 837 : 27895 : fSpendsCoinbase, nSigOpsCost, lp));
# 838 : 27895 : ws.m_vsize = entry->GetTxSize();
# 839 : :
# 840 [ + + ]: 27895 : if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
# 841 : 4 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
# 842 : 4 : strprintf("%d", nSigOpsCost));
# 843 : :
# 844 : : // No individual transactions are allowed below the min relay feerate and mempool min feerate except from
# 845 : : // disconnected blocks and transactions in a package. Package transactions will be checked using
# 846 : : // package feerate later.
# 847 [ + + ][ + + ]: 27891 : if (!bypass_limits && !args.m_package_feerates && !CheckFeeRate(ws.m_vsize, ws.m_modified_fees, state)) return false;
# [ + + ]
# 848 : :
# 849 : 27850 : ws.m_iters_conflicting = m_pool.GetIterSet(ws.m_conflicts);
# 850 : : // Calculate in-mempool ancestors, up to a limit.
# 851 [ + + ]: 27850 : if (ws.m_conflicts.size() == 1) {
# 852 : : // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
# 853 : : // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
# 854 : : // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
# 855 : : // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
# 856 : : // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
# 857 : : // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
# 858 : : // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
# 859 : : // for off-chain contract systems (see link in the comment below).
# 860 : : //
# 861 : : // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
# 862 : : // conflict directly with exactly one other transaction (but may evict children of said transaction),
# 863 : : // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
# 864 : : // check is accomplished later, so we don't bother doing anything about it here, but if our
# 865 : : // policy changes, we may need to move that check to here instead of removing it wholesale.
# 866 : : //
# 867 : : // Such transactions are clearly not merging any existing packages, so we are only concerned with
# 868 : : // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
# 869 : : // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
# 870 : : // to.
# 871 : : //
# 872 : : // To check these we first check if we meet the RBF criteria, above, and increment the descendant
# 873 : : // limits by the direct conflict and its descendants (as these are recalculated in
# 874 : : // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
# 875 : : // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
# 876 : : // the ancestor limits should be the same for both our new transaction and any conflicts).
# 877 : : // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
# 878 : : // into force here (as we're only adding a single transaction).
# 879 : 1029 : assert(ws.m_iters_conflicting.size() == 1);
# 880 : 0 : CTxMemPool::txiter conflict = *ws.m_iters_conflicting.begin();
# 881 : :
# 882 : 1029 : m_limit_descendants += 1;
# 883 : 1029 : m_limit_descendant_size += conflict->GetSizeWithDescendants();
# 884 : 1029 : }
# 885 : :
# 886 : 0 : std::string errString;
# 887 [ + + ]: 27850 : if (!m_pool.CalculateMemPoolAncestors(*entry, ws.m_ancestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
# 888 : 99 : ws.m_ancestors.clear();
# 889 : : // If CalculateMemPoolAncestors fails second time, we want the original error string.
# 890 : 99 : std::string dummy_err_string;
# 891 : : // Contracting/payment channels CPFP carve-out:
# 892 : : // If the new transaction is relatively small (up to 40k weight)
# 893 : : // and has at most one ancestor (ie ancestor limit of 2, including
# 894 : : // the new transaction), allow it if its parent has exactly the
# 895 : : // descendant limit descendants.
# 896 : : //
# 897 : : // This allows protocols which rely on distrusting counterparties
# 898 : : // being able to broadcast descendants of an unconfirmed transaction
# 899 : : // to be secure by simply only having two immediately-spendable
# 900 : : // outputs - one for each counterparty. For more info on the uses for
# 901 : : // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
# 902 [ + + ]: 99 : if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
# 903 [ + + ]: 99 : !m_pool.CalculateMemPoolAncestors(*entry, ws.m_ancestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
# 904 : 93 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
# 905 : 93 : }
# 906 : 99 : }
# 907 : :
# 908 : : // A transaction that spends outputs that would be replaced by it is invalid. Now
# 909 : : // that we have the set of all ancestors we can detect this
# 910 : : // pathological case by making sure ws.m_conflicts and ws.m_ancestors don't
# 911 : : // intersect.
# 912 [ + + ]: 27757 : if (const auto err_string{EntriesAndTxidsDisjoint(ws.m_ancestors, ws.m_conflicts, hash)}) {
# 913 : : // We classify this as a consensus error because a transaction depending on something it
# 914 : : // conflicts with would be inconsistent.
# 915 : 4 : return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx", *err_string);
# 916 : 4 : }
# 917 : :
# 918 : 27753 : m_rbf = !ws.m_conflicts.empty();
# 919 : 27753 : return true;
# 920 : 27757 : }
# 921 : :
# 922 : : bool MemPoolAccept::ReplacementChecks(Workspace& ws)
# 923 : 1033 : {
# 924 : 1033 : AssertLockHeld(cs_main);
# 925 : 1033 : AssertLockHeld(m_pool.cs);
# 926 : :
# 927 : 1033 : const CTransaction& tx = *ws.m_ptx;
# 928 : 1033 : const uint256& hash = ws.m_hash;
# 929 : 1033 : TxValidationState& state = ws.m_state;
# 930 : :
# 931 : 1033 : CFeeRate newFeeRate(ws.m_modified_fees, ws.m_vsize);
# 932 : : // Enforce Rule #6. The replacement transaction must have a higher feerate than its direct conflicts.
# 933 : : // - The motivation for this check is to ensure that the replacement transaction is preferable for
# 934 : : // block-inclusion, compared to what would be removed from the mempool.
# 935 : : // - This logic predates ancestor feerate-based transaction selection, which is why it doesn't
# 936 : : // consider feerates of descendants.
# 937 : : // - Note: Ancestor feerate-based transaction selection has made this comparison insufficient to
# 938 : : // guarantee that this is incentive-compatible for miners, because it is possible for a
# 939 : : // descendant transaction of a direct conflict to pay a higher feerate than the transaction that
# 940 : : // might replace them, under these rules.
# 941 [ + + ]: 1033 : if (const auto err_string{PaysMoreThanConflicts(ws.m_iters_conflicting, newFeeRate, hash)}) {
# 942 : 12 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
# 943 : 12 : }
# 944 : :
# 945 : : // Calculate all conflicting entries and enforce Rule #5.
# 946 [ + + ]: 1021 : if (const auto err_string{GetEntriesForConflicts(tx, m_pool, ws.m_iters_conflicting, ws.m_all_conflicting)}) {
# 947 : 8 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
# 948 : 8 : "too many potential replacements", *err_string);
# 949 : 8 : }
# 950 : : // Enforce Rule #2.
# 951 [ + + ]: 1013 : if (const auto err_string{HasNoNewUnconfirmed(tx, m_pool, ws.m_iters_conflicting)}) {
# 952 : 2 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
# 953 : 2 : "replacement-adds-unconfirmed", *err_string);
# 954 : 2 : }
# 955 : : // Check if it's economically rational to mine this transaction rather than the ones it
# 956 : : // replaces and pays for its own relay fees. Enforce Rules #3 and #4.
# 957 [ + + ]: 1969 : for (CTxMemPool::txiter it : ws.m_all_conflicting) {
# 958 : 1969 : ws.m_conflicting_fees += it->GetModifiedFee();
# 959 : 1969 : ws.m_conflicting_size += it->GetTxSize();
# 960 : 1969 : }
# 961 [ + + ]: 1011 : if (const auto err_string{PaysForRBF(ws.m_conflicting_fees, ws.m_modified_fees, ws.m_vsize,
# 962 : 1011 : m_pool.m_incremental_relay_feerate, hash)}) {
# 963 : 6 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
# 964 : 6 : }
# 965 : 1005 : return true;
# 966 : 1011 : }
# 967 : :
# 968 : : bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
# 969 : : PackageValidationState& package_state)
# 970 : 69 : {
# 971 : 69 : AssertLockHeld(cs_main);
# 972 : 69 : AssertLockHeld(m_pool.cs);
# 973 : :
# 974 : : // CheckPackageLimits expects the package transactions to not already be in the mempool.
# 975 : 69 : assert(std::all_of(txns.cbegin(), txns.cend(), [this](const auto& tx)
# 976 : 69 : { return !m_pool.exists(GenTxid::Txid(tx->GetHash()));}));
# 977 : :
# 978 : 0 : std::string err_string;
# 979 [ + + ]: 69 : if (!m_pool.CheckPackageLimits(txns, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants,
# 980 : 69 : m_limit_descendant_size, err_string)) {
# 981 : : // This is a package-wide error, separate from an individual transaction error.
# 982 : 10 : return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", err_string);
# 983 : 10 : }
# 984 : 59 : return true;
# 985 : 69 : }
# 986 : :
# 987 : : bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws)
# 988 : 27654 : {
# 989 : 27654 : AssertLockHeld(cs_main);
# 990 : 27654 : AssertLockHeld(m_pool.cs);
# 991 : 27654 : const CTransaction& tx = *ws.m_ptx;
# 992 : 27654 : TxValidationState& state = ws.m_state;
# 993 : :
# 994 : 27654 : constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
# 995 : :
# 996 : : // Check input scripts and signatures.
# 997 : : // This is done last to help prevent CPU exhaustion denial-of-service attacks.
# 998 [ + + ]: 27654 : if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata)) {
# 999 : : // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
# 1000 : : // need to turn both off, and compare against just turning off CLEANSTACK
# 1001 : : // to see if the failure is specifically due to witness validation.
# 1002 : 2238 : TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
# 1003 [ + + ][ + + ]: 2238 : if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata) &&
# 1004 [ + - ]: 2238 : !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata)) {
# 1005 : : // Only the witness is missing, so the transaction itself may be fine.
# 1006 : 17 : state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED,
# 1007 : 17 : state.GetRejectReason(), state.GetDebugMessage());
# 1008 : 17 : }
# 1009 : 2238 : return false; // state filled in by CheckInputScripts
# 1010 : 2238 : }
# 1011 : :
# 1012 : 25416 : return true;
# 1013 : 27654 : }
# 1014 : :
# 1015 : : bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws)
# 1016 : 24890 : {
# 1017 : 24890 : AssertLockHeld(cs_main);
# 1018 : 24890 : AssertLockHeld(m_pool.cs);
# 1019 : 24890 : const CTransaction& tx = *ws.m_ptx;
# 1020 : 24890 : const uint256& hash = ws.m_hash;
# 1021 : 24890 : TxValidationState& state = ws.m_state;
# 1022 : :
# 1023 : : // Check again against the current block tip's script verification
# 1024 : : // flags to cache our script execution flags. This is, of course,
# 1025 : : // useless if the next block has different script flags from the
# 1026 : : // previous one, but because the cache tracks script flags for us it
# 1027 : : // will auto-invalidate and we'll just have a few blocks of extra
# 1028 : : // misses on soft-fork activation.
# 1029 : : //
# 1030 : : // This is also useful in case of bugs in the standard flags that cause
# 1031 : : // transactions to pass as valid when they're actually invalid. For
# 1032 : : // instance the STRICTENC flag was incorrectly allowing certain
# 1033 : : // CHECKSIG NOT scripts to pass, even though they were invalid.
# 1034 : : //
# 1035 : : // There is a similar check in CreateNewBlock() to prevent creating
# 1036 : : // invalid blocks (using TestBlockValidity), however allowing such
# 1037 : : // transactions into the mempool can be exploited as a DoS attack.
# 1038 : 24890 : unsigned int currentBlockScriptVerifyFlags{GetBlockScriptFlags(*m_active_chainstate.m_chain.Tip(), m_active_chainstate.m_chainman)};
# 1039 [ - + ]: 24890 : if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags,
# 1040 : 24890 : ws.m_precomputed_txdata, m_active_chainstate.CoinsTip())) {
# 1041 : 0 : LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s\n", hash.ToString(), state.ToString());
# 1042 : 0 : return Assume(false);
# 1043 : 0 : }
# 1044 : :
# 1045 : 24890 : return true;
# 1046 : 24890 : }
# 1047 : :
# 1048 : : bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
# 1049 : 21258 : {
# 1050 : 21258 : AssertLockHeld(cs_main);
# 1051 : 21258 : AssertLockHeld(m_pool.cs);
# 1052 : 21258 : const CTransaction& tx = *ws.m_ptx;
# 1053 : 21258 : const uint256& hash = ws.m_hash;
# 1054 : 21258 : TxValidationState& state = ws.m_state;
# 1055 : 21258 : const bool bypass_limits = args.m_bypass_limits;
# 1056 : :
# 1057 : 21258 : std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
# 1058 : :
# 1059 : : // Remove conflicting transactions from the mempool
# 1060 [ + + ]: 21258 : for (CTxMemPool::txiter it : ws.m_all_conflicting)
# 1061 : 1506 : {
# 1062 [ + - ]: 1506 : LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
# 1063 : 1506 : it->GetTx().GetHash().ToString(),
# 1064 : 1506 : hash.ToString(),
# 1065 : 1506 : FormatMoney(ws.m_modified_fees - ws.m_conflicting_fees),
# 1066 : 1506 : (int)entry->GetTxSize() - (int)ws.m_conflicting_size);
# 1067 : 1506 : ws.m_replaced_transactions.push_back(it->GetSharedTx());
# 1068 : 1506 : }
# 1069 : 21258 : m_pool.RemoveStaged(ws.m_all_conflicting, false, MemPoolRemovalReason::REPLACED);
# 1070 : :
# 1071 : : // This transaction should only count for fee estimation if:
# 1072 : : // - it's not being re-added during a reorg which bypasses typical mempool fee limits
# 1073 : : // - the node is not behind
# 1074 : : // - the transaction is not dependent on any other transactions in the mempool
# 1075 : : // - it's not part of a package. Since package relay is not currently supported, this
# 1076 : : // transaction has not necessarily been accepted to miners' mempools.
# 1077 [ + + ][ + + ]: 21258 : bool validForFeeEstimation = !bypass_limits && !args.m_package_submission && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx);
# [ + + ][ + + ]
# 1078 : :
# 1079 : : // Store transaction in memory
# 1080 : 21258 : m_pool.addUnchecked(*entry, ws.m_ancestors, validForFeeEstimation);
# 1081 : :
# 1082 : : // trim mempool and check if tx was trimmed
# 1083 : : // If we are validating a package, don't trim here because we could evict a previous transaction
# 1084 : : // in the package. LimitMempoolSize() should be called at the very end to make sure the mempool
# 1085 : : // is still within limits and package submission happens atomically.
# 1086 [ + + ][ + + ]: 21258 : if (!args.m_package_submission && !bypass_limits) {
# 1087 : 20952 : LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip());
# 1088 [ - + ]: 20952 : if (!m_pool.exists(GenTxid::Txid(hash)))
# 1089 : 0 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
# 1090 : 20952 : }
# 1091 : 21258 : return true;
# 1092 : 21258 : }
# 1093 : :
# 1094 : : bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces,
# 1095 : : PackageValidationState& package_state,
# 1096 : : std::map<const uint256, const MempoolAcceptResult>& results)
# 1097 : 7 : {
# 1098 : 7 : AssertLockHeld(cs_main);
# 1099 : 7 : AssertLockHeld(m_pool.cs);
# 1100 : : // Sanity check: none of the transactions should be in the mempool, and none of the transactions
# 1101 : : // should have a same-txid-different-witness equivalent in the mempool.
# 1102 : 7 : assert(std::all_of(workspaces.cbegin(), workspaces.cend(), [this](const auto& ws){
# 1103 : 7 : return !m_pool.exists(GenTxid::Txid(ws.m_ptx->GetHash())); }));
# 1104 : :
# 1105 : 0 : bool all_submitted = true;
# 1106 : : // ConsensusScriptChecks adds to the script cache and is therefore consensus-critical;
# 1107 : : // CheckInputsFromMempoolAndCache asserts that transactions only spend coins available from the
# 1108 : : // mempool or UTXO set. Submit each transaction to the mempool immediately after calling
# 1109 : : // ConsensusScriptChecks to make the outputs available for subsequent transactions.
# 1110 [ + + ]: 14 : for (Workspace& ws : workspaces) {
# 1111 [ - + ]: 14 : if (!ConsensusScriptChecks(args, ws)) {
# 1112 : 0 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1113 : : // Since PolicyScriptChecks() passed, this should never fail.
# 1114 : 0 : Assume(false);
# 1115 : 0 : all_submitted = false;
# 1116 : 0 : package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
# 1117 : 0 : strprintf("BUG! PolicyScriptChecks succeeded but ConsensusScriptChecks failed: %s",
# 1118 : 0 : ws.m_ptx->GetHash().ToString()));
# 1119 : 0 : }
# 1120 : :
# 1121 : : // Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the
# 1122 : : // last calculation done in PreChecks, since package ancestors have already been submitted.
# 1123 : 14 : std::string unused_err_string;
# 1124 [ - + ]: 14 : if(!m_pool.CalculateMemPoolAncestors(*ws.m_entry, ws.m_ancestors, m_limit_ancestors,
# 1125 : 14 : m_limit_ancestor_size, m_limit_descendants,
# 1126 : 14 : m_limit_descendant_size, unused_err_string)) {
# 1127 : 0 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1128 : : // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail.
# 1129 : 0 : Assume(false);
# 1130 : 0 : all_submitted = false;
# 1131 : 0 : package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
# 1132 : 0 : strprintf("BUG! Mempool ancestors or descendants were underestimated: %s",
# 1133 : 0 : ws.m_ptx->GetHash().ToString()));
# 1134 : 0 : }
# 1135 : : // If we call LimitMempoolSize() for each individual Finalize(), the mempool will not take
# 1136 : : // the transaction's descendant feerate into account because it hasn't seen them yet. Also,
# 1137 : : // we risk evicting a transaction that a subsequent package transaction depends on. Instead,
# 1138 : : // allow the mempool to temporarily bypass limits, the maximum package size) while
# 1139 : : // submitting transactions individually and then trim at the very end.
# 1140 [ - + ]: 14 : if (!Finalize(args, ws)) {
# 1141 : 0 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1142 : : // Since LimitMempoolSize() won't be called, this should never fail.
# 1143 : 0 : Assume(false);
# 1144 : 0 : all_submitted = false;
# 1145 : 0 : package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
# 1146 : 0 : strprintf("BUG! Adding to mempool failed: %s", ws.m_ptx->GetHash().ToString()));
# 1147 : 0 : }
# 1148 : 14 : }
# 1149 : :
# 1150 : : // It may or may not be the case that all the transactions made it into the mempool. Regardless,
# 1151 : : // make sure we haven't exceeded max mempool size.
# 1152 : 7 : LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip());
# 1153 : :
# 1154 : : // Find the wtxids of the transactions that made it into the mempool. Allow partial submission,
# 1155 : : // but don't report success unless they all made it into the mempool.
# 1156 [ + + ]: 14 : for (Workspace& ws : workspaces) {
# 1157 [ + - ]: 14 : if (m_pool.exists(GenTxid::Wtxid(ws.m_ptx->GetWitnessHash()))) {
# 1158 : 14 : results.emplace(ws.m_ptx->GetWitnessHash(),
# 1159 : 14 : MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees));
# 1160 : 14 : GetMainSignals().TransactionAddedToMempool(ws.m_ptx, m_pool.GetAndIncrementSequence());
# 1161 : 14 : } else {
# 1162 : 0 : all_submitted = false;
# 1163 : 0 : ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
# 1164 : 0 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1165 : 0 : }
# 1166 : 14 : }
# 1167 : 7 : return all_submitted;
# 1168 : 7 : }
# 1169 : :
# 1170 : : MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
# 1171 : 33307 : {
# 1172 : 33307 : AssertLockHeld(cs_main);
# 1173 : 33307 : LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
# 1174 : :
# 1175 : 33307 : Workspace ws(ptx);
# 1176 : :
# 1177 [ + + ]: 33307 : if (!PreChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1178 : :
# 1179 [ + + ][ + + ]: 27141 : if (m_rbf && !ReplacementChecks(ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1180 : :
# 1181 : : // Perform the inexpensive checks first and avoid hashing and signature verification unless
# 1182 : : // those checks pass, to mitigate CPU exhaustion denial-of-service attacks.
# 1183 [ + + ]: 27113 : if (!PolicyScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1184 : :
# 1185 [ - + ]: 24876 : if (!ConsensusScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1186 : :
# 1187 : : // Tx was accepted, but not added
# 1188 [ + + ]: 24876 : if (args.m_test_accept) {
# 1189 : 3632 : return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees);
# 1190 : 3632 : }
# 1191 : :
# 1192 [ - + ]: 21244 : if (!Finalize(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1193 : :
# 1194 : 21244 : GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence());
# 1195 : :
# 1196 : 21244 : return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees);
# 1197 : 21244 : }
# 1198 : :
# 1199 : : PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args)
# 1200 : 85 : {
# 1201 : 85 : AssertLockHeld(cs_main);
# 1202 : :
# 1203 : : // These context-free package limits can be done before taking the mempool lock.
# 1204 : 85 : PackageValidationState package_state;
# 1205 [ + + ]: 85 : if (!CheckPackage(txns, package_state)) return PackageMempoolAcceptResult(package_state, {});
# 1206 : :
# 1207 : 81 : std::vector<Workspace> workspaces{};
# 1208 : 81 : workspaces.reserve(txns.size());
# 1209 : 81 : std::transform(txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
# 1210 : 646 : [](const auto& tx) { return Workspace(tx); });
# 1211 : 81 : std::map<const uint256, const MempoolAcceptResult> results;
# 1212 : :
# 1213 : 81 : LOCK(m_pool.cs);
# 1214 : :
# 1215 : : // Do all PreChecks first and fail fast to avoid running expensive script checks when unnecessary.
# 1216 [ + + ]: 618 : for (Workspace& ws : workspaces) {
# 1217 [ + + ]: 618 : if (!PreChecks(args, ws)) {
# 1218 : 6 : package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
# 1219 : : // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
# 1220 : 6 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1221 : 6 : return PackageMempoolAcceptResult(package_state, std::move(results));
# 1222 : 6 : }
# 1223 : : // Make the coins created by this transaction available for subsequent transactions in the
# 1224 : : // package to spend. Since we already checked conflicts in the package and we don't allow
# 1225 : : // replacements, we don't need to track the coins spent. Note that this logic will need to be
# 1226 : : // updated if package replace-by-fee is allowed in the future.
# 1227 : 612 : assert(!args.m_allow_replacement);
# 1228 : 0 : m_viewmempool.PackageAddTransaction(ws.m_ptx);
# 1229 : 612 : }
# 1230 : :
# 1231 : : // Transactions must meet two minimum feerates: the mempool minimum fee and min relay fee.
# 1232 : : // For transactions consisting of exactly one child and its parents, it suffices to use the
# 1233 : : // package feerate (total modified fees / total virtual size) to check this requirement.
# 1234 : 75 : const auto m_total_vsize = std::accumulate(workspaces.cbegin(), workspaces.cend(), int64_t{0},
# 1235 : 609 : [](int64_t sum, auto& ws) { return sum + ws.m_vsize; });
# 1236 : 75 : const auto m_total_modified_fees = std::accumulate(workspaces.cbegin(), workspaces.cend(), CAmount{0},
# 1237 : 609 : [](CAmount sum, auto& ws) { return sum + ws.m_modified_fees; });
# 1238 : 75 : const CFeeRate package_feerate(m_total_modified_fees, m_total_vsize);
# 1239 : 75 : TxValidationState placeholder_state;
# 1240 [ + + ]: 75 : if (args.m_package_feerates &&
# 1241 [ + + ]: 75 : !CheckFeeRate(m_total_vsize, m_total_modified_fees, placeholder_state)) {
# 1242 : 6 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-fee-too-low");
# 1243 : 6 : return PackageMempoolAcceptResult(package_state, package_feerate, {});
# 1244 : 6 : }
# 1245 : :
# 1246 : : // Apply package mempool ancestor/descendant limits. Skip if there is only one transaction,
# 1247 : : // because it's unnecessary. Also, CPFP carve out can increase the limit for individual
# 1248 : : // transactions, but this exemption is not extended to packages in CheckPackageLimits().
# 1249 : 69 : std::string err_string;
# 1250 [ + - ][ + + ]: 69 : if (txns.size() > 1 && !PackageMempoolChecks(txns, package_state)) {
# 1251 : 10 : return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1252 : 10 : }
# 1253 : :
# 1254 [ + + ]: 541 : for (Workspace& ws : workspaces) {
# 1255 [ + + ]: 541 : if (!PolicyScriptChecks(args, ws)) {
# 1256 : : // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
# 1257 : 1 : package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
# 1258 : 1 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1259 : 1 : return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1260 : 1 : }
# 1261 [ + + ]: 540 : if (args.m_test_accept) {
# 1262 : : // When test_accept=true, transactions that pass PolicyScriptChecks are valid because there are
# 1263 : : // no further mempool checks (passing PolicyScriptChecks implies passing ConsensusScriptChecks).
# 1264 : 526 : results.emplace(ws.m_ptx->GetWitnessHash(),
# 1265 : 526 : MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions),
# 1266 : 526 : ws.m_vsize, ws.m_base_fees));
# 1267 : 526 : }
# 1268 : 540 : }
# 1269 : :
# 1270 [ + + ]: 58 : if (args.m_test_accept) return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1271 : :
# 1272 [ - + ]: 7 : if (!SubmitPackage(args, workspaces, package_state, results)) {
# 1273 : : // PackageValidationState filled in by SubmitPackage().
# 1274 : 0 : return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1275 : 0 : }
# 1276 : :
# 1277 : 7 : return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1278 : 7 : }
# 1279 : :
# 1280 : : PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package, ATMPArgs& args)
# 1281 : 38 : {
# 1282 : 38 : AssertLockHeld(cs_main);
# 1283 : 38 : PackageValidationState package_state;
# 1284 : :
# 1285 : : // Check that the package is well-formed. If it isn't, we won't try to validate any of the
# 1286 : : // transactions and thus won't return any MempoolAcceptResults, just a package-wide error.
# 1287 : :
# 1288 : : // Context-free package checks.
# 1289 [ - + ]: 38 : if (!CheckPackage(package, package_state)) return PackageMempoolAcceptResult(package_state, {});
# 1290 : :
# 1291 : : // All transactions in the package must be a parent of the last transaction. This is just an
# 1292 : : // opportunity for us to fail fast on a context-free check without taking the mempool lock.
# 1293 [ + + ]: 38 : if (!IsChildWithParents(package)) {
# 1294 : 5 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-parents");
# 1295 : 5 : return PackageMempoolAcceptResult(package_state, {});
# 1296 : 5 : }
# 1297 : :
# 1298 : : // IsChildWithParents() guarantees the package is > 1 transactions.
# 1299 : 33 : assert(package.size() > 1);
# 1300 : : // The package must be 1 child with all of its unconfirmed parents. The package is expected to
# 1301 : : // be sorted, so the last transaction is the child.
# 1302 : 0 : const auto& child = package.back();
# 1303 : 33 : std::unordered_set<uint256, SaltedTxidHasher> unconfirmed_parent_txids;
# 1304 : 33 : std::transform(package.cbegin(), package.cend() - 1,
# 1305 : 33 : std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()),
# 1306 : 86 : [](const auto& tx) { return tx->GetHash(); });
# 1307 : :
# 1308 : : // All child inputs must refer to a preceding package transaction or a confirmed UTXO. The only
# 1309 : : // way to verify this is to look up the child's inputs in our current coins view (not including
# 1310 : : // mempool), and enforce that all parents not present in the package be available at chain tip.
# 1311 : : // Since this check can bring new coins into the coins cache, keep track of these coins and
# 1312 : : // uncache them if we don't end up submitting this package to the mempool.
# 1313 : 33 : const CCoinsViewCache& coins_tip_cache = m_active_chainstate.CoinsTip();
# 1314 [ + + ]: 88 : for (const auto& input : child->vin) {
# 1315 [ + - ]: 88 : if (!coins_tip_cache.HaveCoinInCache(input.prevout)) {
# 1316 : 88 : args.m_coins_to_uncache.push_back(input.prevout);
# 1317 : 88 : }
# 1318 : 88 : }
# 1319 : : // Using the MemPoolAccept m_view cache allows us to look up these same coins faster later.
# 1320 : : // This should be connecting directly to CoinsTip, not to m_viewmempool, because we specifically
# 1321 : : // require inputs to be confirmed if they aren't in the package.
# 1322 : 33 : m_view.SetBackend(m_active_chainstate.CoinsTip());
# 1323 : 88 : const auto package_or_confirmed = [this, &unconfirmed_parent_txids](const auto& input) {
# 1324 [ + + ][ - + ]: 88 : return unconfirmed_parent_txids.count(input.prevout.hash) > 0 || m_view.HaveCoin(input.prevout);
# 1325 : 88 : };
# 1326 [ + + ]: 33 : if (!std::all_of(child->vin.cbegin(), child->vin.cend(), package_or_confirmed)) {
# 1327 : 2 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-unconfirmed-parents");
# 1328 : 2 : return PackageMempoolAcceptResult(package_state, {});
# 1329 : 2 : }
# 1330 : : // Protect against bugs where we pull more inputs from disk that miss being added to
# 1331 : : // coins_to_uncache. The backend will be connected again when needed in PreChecks.
# 1332 : 31 : m_view.SetBackend(m_dummy);
# 1333 : :
# 1334 : 31 : LOCK(m_pool.cs);
# 1335 : 31 : std::map<const uint256, const MempoolAcceptResult> results;
# 1336 : : // Node operators are free to set their mempool policies however they please, nodes may receive
# 1337 : : // transactions in different orders, and malicious counterparties may try to take advantage of
# 1338 : : // policy differences to pin or delay propagation of transactions. As such, it's possible for
# 1339 : : // some package transaction(s) to already be in the mempool, and we don't want to reject the
# 1340 : : // entire package in that case (as that could be a censorship vector). De-duplicate the
# 1341 : : // transactions that are already in the mempool, and only call AcceptMultipleTransactions() with
# 1342 : : // the new transactions. This ensures we don't double-count transaction counts and sizes when
# 1343 : : // checking ancestor/descendant limits, or double-count transaction fees for fee-related policy.
# 1344 : 31 : ATMPArgs single_args = ATMPArgs::SingleInPackageAccept(args);
# 1345 : 31 : bool quit_early{false};
# 1346 : 31 : std::vector<CTransactionRef> txns_new;
# 1347 [ + + ]: 115 : for (const auto& tx : package) {
# 1348 : 115 : const auto& wtxid = tx->GetWitnessHash();
# 1349 : 115 : const auto& txid = tx->GetHash();
# 1350 : : // There are 3 possibilities: already in mempool, same-txid-diff-wtxid already in mempool,
# 1351 : : // or not in mempool. An already confirmed tx is treated as one not in mempool, because all
# 1352 : : // we know is that the inputs aren't available.
# 1353 [ + + ]: 115 : if (m_pool.exists(GenTxid::Wtxid(wtxid))) {
# 1354 : : // Exact transaction already exists in the mempool.
# 1355 : 29 : auto iter = m_pool.GetIter(txid);
# 1356 : 29 : assert(iter != std::nullopt);
# 1357 : 0 : results.emplace(wtxid, MempoolAcceptResult::MempoolTx(iter.value()->GetTxSize(), iter.value()->GetFee()));
# 1358 [ + + ]: 86 : } else if (m_pool.exists(GenTxid::Txid(txid))) {
# 1359 : : // Transaction with the same non-witness data but different witness (same txid,
# 1360 : : // different wtxid) already exists in the mempool.
# 1361 : : //
# 1362 : : // We don't allow replacement transactions right now, so just swap the package
# 1363 : : // transaction for the mempool one. Note that we are ignoring the validity of the
# 1364 : : // package transaction passed in.
# 1365 : : // TODO: allow witness replacement in packages.
# 1366 : 6 : auto iter = m_pool.GetIter(txid);
# 1367 : 6 : assert(iter != std::nullopt);
# 1368 : : // Provide the wtxid of the mempool tx so that the caller can look it up in the mempool.
# 1369 : 0 : results.emplace(wtxid, MempoolAcceptResult::MempoolTxDifferentWitness(iter.value()->GetTx().GetWitnessHash()));
# 1370 : 80 : } else {
# 1371 : : // Transaction does not already exist in the mempool.
# 1372 : : // Try submitting the transaction on its own.
# 1373 : 80 : const auto single_res = AcceptSingleTransaction(tx, single_args);
# 1374 [ + + ]: 80 : if (single_res.m_result_type == MempoolAcceptResult::ResultType::VALID) {
# 1375 : : // The transaction succeeded on its own and is now in the mempool. Don't include it
# 1376 : : // in package validation, because its fees should only be "used" once.
# 1377 : 56 : assert(m_pool.exists(GenTxid::Wtxid(wtxid)));
# 1378 : 0 : results.emplace(wtxid, single_res);
# 1379 [ + + ]: 56 : } else if (single_res.m_state.GetResult() != TxValidationResult::TX_MEMPOOL_POLICY &&
# 1380 [ - + ]: 24 : single_res.m_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
# 1381 : : // Package validation policy only differs from individual policy in its evaluation
# 1382 : : // of feerate. For example, if a transaction fails here due to violation of a
# 1383 : : // consensus rule, the result will not change when it is submitted as part of a
# 1384 : : // package. To minimize the amount of repeated work, unless the transaction fails
# 1385 : : // due to feerate or missing inputs (its parent is a previous transaction in the
# 1386 : : // package that failed due to feerate), don't run package validation. Note that this
# 1387 : : // decision might not make sense if different types of packages are allowed in the
# 1388 : : // future. Continue individually validating the rest of the transactions, because
# 1389 : : // some of them may still be valid.
# 1390 : 0 : quit_early = true;
# 1391 : 24 : } else {
# 1392 : 24 : txns_new.push_back(tx);
# 1393 : 24 : }
# 1394 : 80 : }
# 1395 : 115 : }
# 1396 : :
# 1397 : : // Nothing to do if the entire package has already been submitted.
# 1398 [ - + ][ + + ]: 31 : if (quit_early || txns_new.empty()) {
# 1399 : : // No package feerate when no package validation was done.
# 1400 : 18 : return PackageMempoolAcceptResult(package_state, std::move(results));
# 1401 : 18 : }
# 1402 : : // Validate the (deduplicated) transactions as a package.
# 1403 : 13 : auto submission_result = AcceptMultipleTransactions(txns_new, args);
# 1404 : : // Include already-in-mempool transaction results in the final result.
# 1405 [ + + ]: 13 : for (const auto& [wtxid, mempoolaccept_res] : results) {
# 1406 : 7 : submission_result.m_tx_results.emplace(wtxid, mempoolaccept_res);
# 1407 : 7 : }
# 1408 [ + + ]: 13 : if (submission_result.m_state.IsValid()) assert(submission_result.m_package_feerate.has_value());
# 1409 : 0 : return submission_result;
# 1410 : 31 : }
# 1411 : :
# 1412 : : } // anon namespace
# 1413 : :
# 1414 : : MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, const CTransactionRef& tx,
# 1415 : : int64_t accept_time, bool bypass_limits, bool test_accept)
# 1416 : : EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
# 1417 : 33227 : {
# 1418 : 33227 : AssertLockHeld(::cs_main);
# 1419 : 33227 : const CChainParams& chainparams{active_chainstate.m_params};
# 1420 : 33227 : assert(active_chainstate.GetMempool() != nullptr);
# 1421 : 0 : CTxMemPool& pool{*active_chainstate.GetMempool()};
# 1422 : :
# 1423 : 33227 : std::vector<COutPoint> coins_to_uncache;
# 1424 : 33227 : auto args = MemPoolAccept::ATMPArgs::SingleAccept(chainparams, accept_time, bypass_limits, coins_to_uncache, test_accept);
# 1425 : 33227 : const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args);
# 1426 [ + + ]: 33227 : if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) {
# 1427 : : // Remove coins that were not present in the coins cache before calling
# 1428 : : // AcceptSingleTransaction(); this is to prevent memory DoS in case we receive a large
# 1429 : : // number of invalid transactions that attempt to overrun the in-memory coins cache
# 1430 : : // (`CCoinsViewCache::cacheCoins`).
# 1431 : :
# 1432 [ + + ]: 8407 : for (const COutPoint& hashTx : coins_to_uncache)
# 1433 : 3985 : active_chainstate.CoinsTip().Uncache(hashTx);
# 1434 : 8407 : }
# 1435 : : // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
# 1436 : 33227 : BlockValidationState state_dummy;
# 1437 : 33227 : active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
# 1438 : 33227 : return result;
# 1439 : 33227 : }
# 1440 : :
# 1441 : : PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTxMemPool& pool,
# 1442 : : const Package& package, bool test_accept)
# 1443 : 110 : {
# 1444 : 110 : AssertLockHeld(cs_main);
# 1445 : 110 : assert(!package.empty());
# 1446 : 0 : assert(std::all_of(package.cbegin(), package.cend(), [](const auto& tx){return tx != nullptr;}));
# 1447 : :
# 1448 : 0 : std::vector<COutPoint> coins_to_uncache;
# 1449 : 110 : const CChainParams& chainparams = active_chainstate.m_params;
# 1450 : 110 : const auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
# 1451 : 110 : AssertLockHeld(cs_main);
# 1452 [ + + ]: 110 : if (test_accept) {
# 1453 : 72 : auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(chainparams, GetTime(), coins_to_uncache);
# 1454 : 72 : return MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args);
# 1455 : 72 : } else {
# 1456 : 38 : auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(chainparams, GetTime(), coins_to_uncache);
# 1457 : 38 : return MemPoolAccept(pool, active_chainstate).AcceptPackage(package, args);
# 1458 : 38 : }
# 1459 : 110 : }();
# 1460 : :
# 1461 : : // Uncache coins pertaining to transactions that were not submitted to the mempool.
# 1462 [ + + ][ + + ]: 110 : if (test_accept || result.m_state.IsInvalid()) {
# 1463 [ + + ]: 568 : for (const COutPoint& hashTx : coins_to_uncache) {
# 1464 : 568 : active_chainstate.CoinsTip().Uncache(hashTx);
# 1465 : 568 : }
# 1466 : 85 : }
# 1467 : : // Ensure the coins cache is still within limits.
# 1468 : 110 : BlockValidationState state_dummy;
# 1469 : 110 : active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
# 1470 : 110 : return result;
# 1471 : 110 : }
# 1472 : :
# 1473 : : CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
# 1474 : 168754 : {
# 1475 : 168754 : int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
# 1476 : : // Force block reward to zero when right shift is undefined.
# 1477 [ + + ]: 168754 : if (halvings >= 64)
# 1478 : 563 : return 0;
# 1479 : :
# 1480 : 168191 : CAmount nSubsidy = 50 * COIN;
# 1481 : : // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
# 1482 : 168191 : nSubsidy >>= halvings;
# 1483 : 168191 : return nSubsidy;
# 1484 : 168754 : }
# 1485 : :
# 1486 : : CoinsViews::CoinsViews(
# 1487 : : fs::path ldb_name,
# 1488 : : size_t cache_size_bytes,
# 1489 : : bool in_memory,
# 1490 : : bool should_wipe) : m_dbview(
# 1491 : : gArgs.GetDataDirNet() / ldb_name, cache_size_bytes, in_memory, should_wipe),
# 1492 : 971 : m_catcherview(&m_dbview) {}
# 1493 : :
# 1494 : : void CoinsViews::InitCache()
# 1495 : 970 : {
# 1496 : 970 : AssertLockHeld(::cs_main);
# 1497 : 970 : m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
# 1498 : 970 : }
# 1499 : :
# 1500 : : CChainState::CChainState(
# 1501 : : CTxMemPool* mempool,
# 1502 : : BlockManager& blockman,
# 1503 : : ChainstateManager& chainman,
# 1504 : : std::optional<uint256> from_snapshot_blockhash)
# 1505 : : : m_mempool(mempool),
# 1506 : : m_blockman(blockman),
# 1507 : : m_params(chainman.GetParams()),
# 1508 : : m_chainman(chainman),
# 1509 : 977 : m_from_snapshot_blockhash(from_snapshot_blockhash) {}
# 1510 : :
# 1511 : : void CChainState::InitCoinsDB(
# 1512 : : size_t cache_size_bytes,
# 1513 : : bool in_memory,
# 1514 : : bool should_wipe,
# 1515 : : fs::path leveldb_name)
# 1516 : 971 : {
# 1517 [ + + ]: 971 : if (m_from_snapshot_blockhash) {
# 1518 : 10 : leveldb_name += "_" + m_from_snapshot_blockhash->ToString();
# 1519 : 10 : }
# 1520 : :
# 1521 : 971 : m_coins_views = std::make_unique<CoinsViews>(
# 1522 : 971 : leveldb_name, cache_size_bytes, in_memory, should_wipe);
# 1523 : 971 : }
# 1524 : :
# 1525 : : void CChainState::InitCoinsCache(size_t cache_size_bytes)
# 1526 : 970 : {
# 1527 : 970 : AssertLockHeld(::cs_main);
# 1528 : 970 : assert(m_coins_views != nullptr);
# 1529 : 0 : m_coinstip_cache_size_bytes = cache_size_bytes;
# 1530 : 970 : m_coins_views->InitCache();
# 1531 : 970 : }
# 1532 : :
# 1533 : : // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
# 1534 : : // is a performance-related implementation detail. This function must be marked
# 1535 : : // `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
# 1536 : : // can call it.
# 1537 : : //
# 1538 : : bool CChainState::IsInitialBlockDownload() const
# 1539 : 1256962 : {
# 1540 : : // Optimization: pre-test latch before taking the lock.
# 1541 [ + + ]: 1256962 : if (m_cached_finished_ibd.load(std::memory_order_relaxed))
# 1542 : 1186740 : return false;
# 1543 : :
# 1544 : 70222 : LOCK(cs_main);
# 1545 [ - + ]: 70222 : if (m_cached_finished_ibd.load(std::memory_order_relaxed))
# 1546 : 0 : return false;
# 1547 [ + + ][ - + ]: 70222 : if (fImporting || fReindex)
# 1548 : 10423 : return true;
# 1549 [ + + ]: 59799 : if (m_chain.Tip() == nullptr)
# 1550 : 2 : return true;
# 1551 [ + + ]: 59797 : if (m_chain.Tip()->nChainWork < nMinimumChainWork)
# 1552 : 20517 : return true;
# 1553 [ + + ]: 39280 : if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
# 1554 : 38677 : return true;
# 1555 : 603 : LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
# 1556 : 603 : m_cached_finished_ibd.store(true, std::memory_order_relaxed);
# 1557 : 603 : return false;
# 1558 : 39280 : }
# 1559 : :
# 1560 : : static void AlertNotify(const std::string& strMessage)
# 1561 : 2 : {
# 1562 : 2 : uiInterface.NotifyAlertChanged();
# 1563 : 2 : #if HAVE_SYSTEM
# 1564 : 2 : std::string strCmd = gArgs.GetArg("-alertnotify", "");
# 1565 [ - + ]: 2 : if (strCmd.empty()) return;
# 1566 : :
# 1567 : : // Alert text should be plain ascii coming from a trusted source, but to
# 1568 : : // be safe we first strip anything not in safeChars, then add single quotes around
# 1569 : : // the whole string before passing it to the shell:
# 1570 : 2 : std::string singleQuote("'");
# 1571 : 2 : std::string safeStatus = SanitizeString(strMessage);
# 1572 : 2 : safeStatus = singleQuote+safeStatus+singleQuote;
# 1573 : 2 : ReplaceAll(strCmd, "%s", safeStatus);
# 1574 : :
# 1575 : 2 : std::thread t(runCommand, strCmd);
# 1576 : 2 : t.detach(); // thread runs free
# 1577 : 2 : #endif
# 1578 : 2 : }
# 1579 : :
# 1580 : : void CChainState::CheckForkWarningConditions()
# 1581 : 81242 : {
# 1582 : 81242 : AssertLockHeld(cs_main);
# 1583 : :
# 1584 : : // Before we get past initial download, we cannot reliably alert about forks
# 1585 : : // (we assume we don't get stuck on a fork before finishing our initial sync)
# 1586 [ + + ]: 81242 : if (IsInitialBlockDownload()) {
# 1587 : 7695 : return;
# 1588 : 7695 : }
# 1589 : :
# 1590 [ + + ][ + + ]: 73547 : if (m_chainman.m_best_invalid && m_chainman.m_best_invalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) {
# [ + + ]
# 1591 : 23 : LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
# 1592 : 23 : SetfLargeWorkInvalidChainFound(true);
# 1593 : 73524 : } else {
# 1594 : 73524 : SetfLargeWorkInvalidChainFound(false);
# 1595 : 73524 : }
# 1596 : 73547 : }
# 1597 : :
# 1598 : : // Called both upon regular invalid block discovery *and* InvalidateBlock
# 1599 : : void CChainState::InvalidChainFound(CBlockIndex* pindexNew)
# 1600 : 5410 : {
# 1601 : 5410 : AssertLockHeld(cs_main);
# 1602 [ + + ][ + + ]: 5410 : if (!m_chainman.m_best_invalid || pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork) {
# 1603 : 1457 : m_chainman.m_best_invalid = pindexNew;
# 1604 : 1457 : }
# 1605 [ + - ][ + + ]: 5410 : if (m_chainman.m_best_header != nullptr && m_chainman.m_best_header->GetAncestor(pindexNew->nHeight) == pindexNew) {
# 1606 : 2732 : m_chainman.m_best_header = m_chain.Tip();
# 1607 : 2732 : }
# 1608 : :
# 1609 : 5410 : LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__,
# 1610 : 5410 : pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
# 1611 : 5410 : log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
# 1612 : 5410 : CBlockIndex *tip = m_chain.Tip();
# 1613 : 5410 : assert (tip);
# 1614 : 5410 : LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__,
# 1615 : 5410 : tip->GetBlockHash().ToString(), m_chain.Height(), log(tip->nChainWork.getdouble())/log(2.0),
# 1616 : 5410 : FormatISO8601DateTime(tip->GetBlockTime()));
# 1617 : 5410 : CheckForkWarningConditions();
# 1618 : 5410 : }
# 1619 : :
# 1620 : : // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
# 1621 : : // which does its own setBlockIndexCandidates management.
# 1622 : : void CChainState::InvalidBlockFound(CBlockIndex* pindex, const BlockValidationState& state)
# 1623 : 2664 : {
# 1624 : 2664 : AssertLockHeld(cs_main);
# 1625 [ + - ]: 2664 : if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
# 1626 : 2664 : pindex->nStatus |= BLOCK_FAILED_VALID;
# 1627 : 2664 : m_chainman.m_failed_blocks.insert(pindex);
# 1628 : 2664 : m_blockman.m_dirty_blockindex.insert(pindex);
# 1629 : 2664 : setBlockIndexCandidates.erase(pindex);
# 1630 : 2664 : InvalidChainFound(pindex);
# 1631 : 2664 : }
# 1632 : 2664 : }
# 1633 : :
# 1634 : : void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
# 1635 : 253887 : {
# 1636 : : // mark inputs spent
# 1637 [ + + ]: 253887 : if (!tx.IsCoinBase()) {
# 1638 : 125066 : txundo.vprevout.reserve(tx.vin.size());
# 1639 [ + + ]: 160924 : for (const CTxIn &txin : tx.vin) {
# 1640 : 160924 : txundo.vprevout.emplace_back();
# 1641 : 160924 : bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
# 1642 : 160924 : assert(is_spent);
# 1643 : 160924 : }
# 1644 : 125066 : }
# 1645 : : // add outputs
# 1646 : 253887 : AddCoins(inputs, tx, nHeight);
# 1647 : 253887 : }
# 1648 : :
# 1649 : 387214 : bool CScriptCheck::operator()() {
# 1650 : 387214 : const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
# 1651 : 387214 : const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
# 1652 : 387214 : return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
# 1653 : 387214 : }
# 1654 : :
# 1655 : : static CuckooCache::cache<uint256, SignatureCacheHasher> g_scriptExecutionCache;
# 1656 : : static CSHA256 g_scriptExecutionCacheHasher;
# 1657 : :
# 1658 : : bool InitScriptExecutionCache(size_t max_size_bytes)
# 1659 : 1667 : {
# 1660 : : // Setup the salted hasher
# 1661 : 1667 : uint256 nonce = GetRandHash();
# 1662 : : // We want the nonce to be 64 bytes long to force the hasher to process
# 1663 : : // this chunk, which makes later hash computations more efficient. We
# 1664 : : // just write our 32-byte entropy twice to fill the 64 bytes.
# 1665 : 1667 : g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
# 1666 : 1667 : g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
# 1667 : :
# 1668 : 1667 : auto setup_results = g_scriptExecutionCache.setup_bytes(max_size_bytes);
# 1669 [ - + ]: 1667 : if (!setup_results) return false;
# 1670 : :
# 1671 : 1667 : const auto [num_elems, approx_size_bytes] = *setup_results;
# 1672 : 1667 : LogPrintf("Using %zu MiB out of %zu MiB requested for script execution cache, able to store %zu elements\n",
# 1673 : 1667 : approx_size_bytes >> 20, max_size_bytes >> 20, num_elems);
# 1674 : 1667 : return true;
# 1675 : 1667 : }
# 1676 : :
# 1677 : : /**
# 1678 : : * Check whether all of this transaction's input scripts succeed.
# 1679 : : *
# 1680 : : * This involves ECDSA signature checks so can be computationally intensive. This function should
# 1681 : : * only be called after the cheap sanity checks in CheckTxInputs passed.
# 1682 : : *
# 1683 : : * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any
# 1684 : : * script checks which are not necessary (eg due to script execution cache hits) are, obviously,
# 1685 : : * not pushed onto pvChecks/run.
# 1686 : : *
# 1687 : : * Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache
# 1688 : : * which are matched. This is useful for checking blocks where we will likely never need the cache
# 1689 : : * entry again.
# 1690 : : *
# 1691 : : * Note that we may set state.reason to NOT_STANDARD for extra soft-fork flags in flags, block-checking
# 1692 : : * callers should probably reset it to CONSENSUS in such cases.
# 1693 : : *
# 1694 : : * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
# 1695 : : */
# 1696 : : bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
# 1697 : : const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
# 1698 : : bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
# 1699 : : std::vector<CScriptCheck>* pvChecks)
# 1700 : 393467 : {
# 1701 [ - + ]: 393467 : if (tx.IsCoinBase()) return true;
# 1702 : :
# 1703 [ + + ]: 393467 : if (pvChecks) {
# 1704 : 195763 : pvChecks->reserve(tx.vin.size());
# 1705 : 195763 : }
# 1706 : :
# 1707 : : // First check if script executions have been cached with the same
# 1708 : : // flags. Note that this assumes that the inputs provided are
# 1709 : : // correct (ie that the transaction hash which is in tx's prevouts
# 1710 : : // properly commits to the scriptPubKey in the inputs view of that
# 1711 : : // transaction).
# 1712 : 393467 : uint256 hashCacheEntry;
# 1713 : 393467 : CSHA256 hasher = g_scriptExecutionCacheHasher;
# 1714 : 393467 : hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
# 1715 : 393467 : AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
# 1716 [ + + ]: 393467 : if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
# 1717 : 98640 : return true;
# 1718 : 98640 : }
# 1719 : :
# 1720 [ + + ]: 294827 : if (!txdata.m_spent_outputs_ready) {
# 1721 : 61553 : std::vector<CTxOut> spent_outputs;
# 1722 : 61553 : spent_outputs.reserve(tx.vin.size());
# 1723 : :
# 1724 [ + + ]: 110565 : for (const auto& txin : tx.vin) {
# 1725 : 110565 : const COutPoint& prevout = txin.prevout;
# 1726 : 110565 : const Coin& coin = inputs.AccessCoin(prevout);
# 1727 : 110565 : assert(!coin.IsSpent());
# 1728 : 0 : spent_outputs.emplace_back(coin.out);
# 1729 : 110565 : }
# 1730 : 61553 : txdata.Init(tx, std::move(spent_outputs));
# 1731 : 61553 : }
# 1732 : 294827 : assert(txdata.m_spent_outputs.size() == tx.vin.size());
# 1733 : :
# 1734 [ + + ]: 595647 : for (unsigned int i = 0; i < tx.vin.size(); i++) {
# 1735 : :
# 1736 : : // We very carefully only pass in things to CScriptCheck which
# 1737 : : // are clearly committed to by tx' witness hash. This provides
# 1738 : : // a sanity check that our caching is not introducing consensus
# 1739 : : // failures through additional data in, eg, the coins being
# 1740 : : // spent being checked as a part of CScriptCheck.
# 1741 : :
# 1742 : : // Verify signature
# 1743 : 375741 : CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata);
# 1744 [ + + ]: 375741 : if (pvChecks) {
# 1745 : 113402 : pvChecks->push_back(CScriptCheck());
# 1746 : 113402 : check.swap(pvChecks->back());
# 1747 [ + + ]: 262339 : } else if (!check()) {
# 1748 [ + - ]: 74921 : if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
# 1749 : : // Check whether the failure was caused by a
# 1750 : : // non-mandatory script verification check, such as
# 1751 : : // non-standard DER encodings or non-null dummy
# 1752 : : // arguments; if so, ensure we return NOT_STANDARD
# 1753 : : // instead of CONSENSUS to avoid downstream users
# 1754 : : // splitting the network between upgraded and
# 1755 : : // non-upgraded nodes by banning CONSENSUS-failing
# 1756 : : // data providers.
# 1757 : 74921 : CScriptCheck check2(txdata.m_spent_outputs[i], tx, i,
# 1758 : 74921 : flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
# 1759 [ + + ]: 74921 : if (check2())
# 1760 : 57194 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
# 1761 : 74921 : }
# 1762 : : // MANDATORY flag failures correspond to
# 1763 : : // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
# 1764 : : // failures are the most serious case of validation
# 1765 : : // failures, we may need to consider using
# 1766 : : // RECENT_CONSENSUS_CHANGE for any script failure that
# 1767 : : // could be due to non-upgraded nodes which we may want to
# 1768 : : // support, to avoid splitting the network (but this
# 1769 : : // depends on the details of how net_processing handles
# 1770 : : // such errors).
# 1771 : 17727 : return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
# 1772 : 74921 : }
# 1773 : 375741 : }
# 1774 : :
# 1775 [ + + ][ + + ]: 219906 : if (cacheFullScriptStore && !pvChecks) {
# 1776 : : // We executed all of the provided scripts, and were told to
# 1777 : : // cache the result. Do so now.
# 1778 : 88260 : g_scriptExecutionCache.insert(hashCacheEntry);
# 1779 : 88260 : }
# 1780 : :
# 1781 : 219906 : return true;
# 1782 : 294827 : }
# 1783 : :
# 1784 : : bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage)
# 1785 : 1 : {
# 1786 : 1 : AbortNode(strMessage, userMessage);
# 1787 : 1 : return state.Error(strMessage);
# 1788 : 1 : }
# 1789 : :
# 1790 : : /**
# 1791 : : * Restore the UTXO in a Coin at a given COutPoint
# 1792 : : * @param undo The Coin to be restored.
# 1793 : : * @param view The coins view to which to apply the changes.
# 1794 : : * @param out The out point that corresponds to the tx input.
# 1795 : : * @return A DisconnectResult as an int
# 1796 : : */
# 1797 : : int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
# 1798 : 19796 : {
# 1799 : 19796 : bool fClean = true;
# 1800 : :
# 1801 [ + + ]: 19796 : if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
# 1802 : :
# 1803 [ - + ]: 19796 : if (undo.nHeight == 0) {
# 1804 : : // Missing undo metadata (height and coinbase). Older versions included this
# 1805 : : // information only in undo records for the last spend of a transactions'
# 1806 : : // outputs. This implies that it must be present for some other output of the same tx.
# 1807 : 0 : const Coin& alternate = AccessByTxid(view, out.hash);
# 1808 [ # # ]: 0 : if (!alternate.IsSpent()) {
# 1809 : 0 : undo.nHeight = alternate.nHeight;
# 1810 : 0 : undo.fCoinBase = alternate.fCoinBase;
# 1811 : 0 : } else {
# 1812 : 0 : return DISCONNECT_FAILED; // adding output for transaction without known metadata
# 1813 : 0 : }
# 1814 : 0 : }
# 1815 : : // If the coin already exists as an unspent coin in the cache, then the
# 1816 : : // possible_overwrite parameter to AddCoin must be set to true. We have
# 1817 : : // already checked whether an unspent coin exists above using HaveCoin, so
# 1818 : : // we don't need to guess. When fClean is false, an unspent coin already
# 1819 : : // existed and it is an overwrite.
# 1820 : 19796 : view.AddCoin(out, std::move(undo), !fClean);
# 1821 : :
# 1822 [ + + ]: 19796 : return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
# 1823 : 19796 : }
# 1824 : :
# 1825 : : /** Undo the effects of this block (with given index) on the UTXO set represented by coins.
# 1826 : : * When FAILED is returned, view is left in an indeterminate state. */
# 1827 : : DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
# 1828 : 12439 : {
# 1829 : 12439 : AssertLockHeld(::cs_main);
# 1830 : 12439 : bool fClean = true;
# 1831 : :
# 1832 : 12439 : CBlockUndo blockUndo;
# 1833 [ + + ]: 12439 : if (!UndoReadFromDisk(blockUndo, pindex)) {
# 1834 : 1 : error("DisconnectBlock(): failure reading undo data");
# 1835 : 1 : return DISCONNECT_FAILED;
# 1836 : 1 : }
# 1837 : :
# 1838 [ - + ]: 12438 : if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
# 1839 : 0 : error("DisconnectBlock(): block and undo data inconsistent");
# 1840 : 0 : return DISCONNECT_FAILED;
# 1841 : 0 : }
# 1842 : :
# 1843 : : // undo transactions in reverse order
# 1844 [ + + ]: 33447 : for (int i = block.vtx.size() - 1; i >= 0; i--) {
# 1845 : 21009 : const CTransaction &tx = *(block.vtx[i]);
# 1846 : 21009 : uint256 hash = tx.GetHash();
# 1847 : 21009 : bool is_coinbase = tx.IsCoinBase();
# 1848 : :
# 1849 : : // Check that all outputs are available and match the outputs in the block itself
# 1850 : : // exactly.
# 1851 [ + + ]: 59685 : for (size_t o = 0; o < tx.vout.size(); o++) {
# 1852 [ + + ]: 38676 : if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
# 1853 : 27323 : COutPoint out(hash, o);
# 1854 : 27323 : Coin coin;
# 1855 : 27323 : bool is_spent = view.SpendCoin(out, &coin);
# 1856 [ - + ][ - + ]: 27323 : if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
# [ - + ][ - + ]
# 1857 : 0 : fClean = false; // transaction output mismatch
# 1858 : 0 : }
# 1859 : 27323 : }
# 1860 : 38676 : }
# 1861 : :
# 1862 : : // restore inputs
# 1863 [ + + ]: 21009 : if (i > 0) { // not coinbases
# 1864 : 8571 : CTxUndo &txundo = blockUndo.vtxundo[i-1];
# 1865 [ - + ]: 8571 : if (txundo.vprevout.size() != tx.vin.size()) {
# 1866 : 0 : error("DisconnectBlock(): transaction and undo data inconsistent");
# 1867 : 0 : return DISCONNECT_FAILED;
# 1868 : 0 : }
# 1869 [ + + ]: 24867 : for (unsigned int j = tx.vin.size(); j > 0;) {
# 1870 : 16296 : --j;
# 1871 : 16296 : const COutPoint& out = tx.vin[j].prevout;
# 1872 : 16296 : int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
# 1873 [ - + ]: 16296 : if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
# 1874 [ + - ][ + - ]: 16296 : fClean = fClean && res != DISCONNECT_UNCLEAN;
# 1875 : 16296 : }
# 1876 : : // At this point, all of txundo.vprevout should have been moved out.
# 1877 : 8571 : }
# 1878 : 21009 : }
# 1879 : :
# 1880 : : // move best block pointer to prevout block
# 1881 : 12438 : view.SetBestBlock(pindex->pprev->GetBlockHash());
# 1882 : :
# 1883 [ + - ]: 12438 : return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
# 1884 : 12438 : }
# 1885 : :
# 1886 : : static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
# 1887 : :
# 1888 : : void StartScriptCheckWorkerThreads(int threads_num)
# 1889 : 1021 : {
# 1890 : 1021 : scriptcheckqueue.StartWorkerThreads(threads_num);
# 1891 : 1021 : }
# 1892 : :
# 1893 : : void StopScriptCheckWorkerThreads()
# 1894 : 1029 : {
# 1895 : 1029 : scriptcheckqueue.StopWorkerThreads();
# 1896 : 1029 : }
# 1897 : :
# 1898 : : /**
# 1899 : : * Threshold condition checker that triggers when unknown versionbits are seen on the network.
# 1900 : : */
# 1901 : : class WarningBitsConditionChecker : public AbstractThresholdConditionChecker
# 1902 : : {
# 1903 : : private:
# 1904 : : const ChainstateManager& m_chainman;
# 1905 : : int m_bit;
# 1906 : :
# 1907 : : public:
# 1908 : 2403056 : explicit WarningBitsConditionChecker(const ChainstateManager& chainman, int bit) : m_chainman{chainman}, m_bit(bit) {}
# 1909 : :
# 1910 : 2403056 : int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
# 1911 : 2403056 : int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
# 1912 : 2403056 : int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
# 1913 : 2403056 : int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
# 1914 : :
# 1915 : : bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
# 1916 : 1277568 : {
# 1917 [ + - ]: 1277568 : return pindex->nHeight >= params.MinBIP9WarningHeight &&
# 1918 [ + + ]: 1277568 : ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
# 1919 [ + + ]: 1277568 : ((pindex->nVersion >> m_bit) & 1) != 0 &&
# 1920 [ + + ]: 1277568 : ((m_chainman.m_versionbitscache.ComputeBlockVersion(pindex->pprev, params) >> m_bit) & 1) == 0;
# 1921 : 1277568 : }
# 1922 : : };
# 1923 : :
# 1924 : : static std::array<ThresholdConditionCache, VERSIONBITS_NUM_BITS> warningcache GUARDED_BY(cs_main);
# 1925 : :
# 1926 : : static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const ChainstateManager& chainman)
# 1927 : 145175 : {
# 1928 : 145175 : const Consensus::Params& consensusparams = chainman.GetConsensus();
# 1929 : :
# 1930 : : // BIP16 didn't become active until Apr 1 2012 (on mainnet, and
# 1931 : : // retroactively applied to testnet)
# 1932 : : // However, only one historical block violated the P2SH rules (on both
# 1933 : : // mainnet and testnet).
# 1934 : : // Similarly, only one historical block violated the TAPROOT rules on
# 1935 : : // mainnet.
# 1936 : : // For simplicity, always leave P2SH+WITNESS+TAPROOT on except for the two
# 1937 : : // violating blocks.
# 1938 : 145175 : uint32_t flags{SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_TAPROOT};
# 1939 : 145175 : const auto it{consensusparams.script_flag_exceptions.find(*Assert(block_index.phashBlock))};
# 1940 [ - + ]: 145175 : if (it != consensusparams.script_flag_exceptions.end()) {
# 1941 : 0 : flags = it->second;
# 1942 : 0 : }
# 1943 : :
# 1944 : : // Enforce the DERSIG (BIP66) rule
# 1945 [ + + ]: 145175 : if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_DERSIG)) {
# 1946 : 143894 : flags |= SCRIPT_VERIFY_DERSIG;
# 1947 : 143894 : }
# 1948 : :
# 1949 : : // Enforce CHECKLOCKTIMEVERIFY (BIP65)
# 1950 [ + + ]: 145175 : if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_CLTV)) {
# 1951 : 144697 : flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
# 1952 : 144697 : }
# 1953 : :
# 1954 : : // Enforce CHECKSEQUENCEVERIFY (BIP112)
# 1955 [ + + ]: 145175 : if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_CSV)) {
# 1956 : 143423 : flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
# 1957 : 143423 : }
# 1958 : :
# 1959 : : // Enforce BIP147 NULLDUMMY (activated simultaneously with segwit)
# 1960 [ + + ]: 145175 : if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_SEGWIT)) {
# 1961 : 142742 : flags |= SCRIPT_VERIFY_NULLDUMMY;
# 1962 : 142742 : }
# 1963 : :
# 1964 : 145175 : return flags;
# 1965 : 145175 : }
# 1966 : :
# 1967 : :
# 1968 : : static int64_t nTimeCheck = 0;
# 1969 : : static int64_t nTimeForks = 0;
# 1970 : : static int64_t nTimeConnect = 0;
# 1971 : : static int64_t nTimeVerify = 0;
# 1972 : : static int64_t nTimeUndo = 0;
# 1973 : : static int64_t nTimeIndex = 0;
# 1974 : : static int64_t nTimeTotal = 0;
# 1975 : : static int64_t nBlocksTotal = 0;
# 1976 : :
# 1977 : : /** Apply the effects of this block (with given index) on the UTXO set represented by coins.
# 1978 : : * Validity checks that depend on the UTXO set are also done; ConnectBlock()
# 1979 : : * can fail if those validity checks fail (among other reasons). */
# 1980 : : bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex,
# 1981 : : CCoinsViewCache& view, bool fJustCheck)
# 1982 : 120772 : {
# 1983 : 120772 : AssertLockHeld(cs_main);
# 1984 : 120772 : assert(pindex);
# 1985 : :
# 1986 : 0 : uint256 block_hash{block.GetHash()};
# 1987 : 120772 : assert(*pindex->phashBlock == block_hash);
# 1988 : :
# 1989 : 0 : int64_t nTimeStart = GetTimeMicros();
# 1990 : :
# 1991 : : // Check it again in case a previous version let a bad block in
# 1992 : : // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
# 1993 : : // ContextualCheckBlockHeader() here. This means that if we add a new
# 1994 : : // consensus rule that is enforced in one of those two functions, then we
# 1995 : : // may have let in a block that violates the rule prior to updating the
# 1996 : : // software, and we would NOT be enforcing the rule here. Fully solving
# 1997 : : // upgrade from one software version to the next after a consensus rule
# 1998 : : // change is potentially tricky and issue-specific (see NeedsRedownload()
# 1999 : : // for one approach that was used for BIP 141 deployment).
# 2000 : : // Also, currently the rule against blocks more than 2 hours in the future
# 2001 : : // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
# 2002 : : // re-enforce that rule here (at least until we make it impossible for
# 2003 : : // m_adjusted_time_callback() to go backward).
# 2004 [ - + ]: 120772 : if (!CheckBlock(block, state, m_params.GetConsensus(), !fJustCheck, !fJustCheck)) {
# 2005 [ # # ]: 0 : if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
# 2006 : : // We don't write down blocks to disk if they may have been
# 2007 : : // corrupted, so this should be impossible unless we're having hardware
# 2008 : : // problems.
# 2009 : 0 : return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
# 2010 : 0 : }
# 2011 : 0 : return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
# 2012 : 0 : }
# 2013 : :
# 2014 : : // verify that the view's current state corresponds to the previous block
# 2015 [ + + ]: 120772 : uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
# 2016 : 120772 : assert(hashPrevBlock == view.GetBestBlock());
# 2017 : :
# 2018 : 0 : nBlocksTotal++;
# 2019 : :
# 2020 : : // Special case for the genesis block, skipping connection of its transactions
# 2021 : : // (its coinbase is unspendable)
# 2022 [ + + ]: 120772 : if (block_hash == m_params.GetConsensus().hashGenesisBlock) {
# 2023 [ + - ]: 486 : if (!fJustCheck)
# 2024 : 486 : view.SetBestBlock(pindex->GetBlockHash());
# 2025 : 486 : return true;
# 2026 : 486 : }
# 2027 : :
# 2028 : 120286 : bool fScriptChecks = true;
# 2029 [ + + ]: 120286 : if (!hashAssumeValid.IsNull()) {
# 2030 : : // We've been configured with the hash of a block which has been externally verified to have a valid history.
# 2031 : : // A suitable default value is included with the software and updated from time to time. Because validity
# 2032 : : // relative to a piece of software is an objective fact these defaults can be easily reviewed.
# 2033 : : // This setting doesn't force the selection of any particular chain but makes validating some faster by
# 2034 : : // effectively caching the result of part of the verification.
# 2035 : 2570 : BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
# 2036 [ + + ]: 2570 : if (it != m_blockman.m_block_index.end()) {
# 2037 [ + + ]: 2304 : if (it->second.GetAncestor(pindex->nHeight) == pindex &&
# 2038 [ + - ]: 2304 : m_chainman.m_best_header->GetAncestor(pindex->nHeight) == pindex &&
# 2039 [ + - ]: 2304 : m_chainman.m_best_header->nChainWork >= nMinimumChainWork) {
# 2040 : : // This block is a member of the assumed verified chain and an ancestor of the best header.
# 2041 : : // Script verification is skipped when connecting blocks under the
# 2042 : : // assumevalid block. Assuming the assumevalid block is valid this
# 2043 : : // is safe because block merkle hashes are still computed and checked,
# 2044 : : // Of course, if an assumed valid block is invalid due to false scriptSigs
# 2045 : : // this optimization would allow an invalid chain to be accepted.
# 2046 : : // The equivalent time check discourages hash power from extorting the network via DOS attack
# 2047 : : // into accepting an invalid block through telling users they must manually set assumevalid.
# 2048 : : // Requiring a software change or burying the invalid block, regardless of the setting, makes
# 2049 : : // it hard to hide the implication of the demand. This also avoids having release candidates
# 2050 : : // that are hardly doing any signature verification at all in testing without having to
# 2051 : : // artificially set the default assumed verified block further back.
# 2052 : : // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
# 2053 : : // least as good as the expected chain.
# 2054 : 204 : fScriptChecks = (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_params.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
# 2055 : 204 : }
# 2056 : 2304 : }
# 2057 : 2570 : }
# 2058 : :
# 2059 : 120286 : int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
# 2060 [ + - ]: 120286 : LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
# 2061 : :
# 2062 : : // Do not allow blocks that contain transactions which 'overwrite' older transactions,
# 2063 : : // unless those are already completely spent.
# 2064 : : // If such overwrites are allowed, coinbases and transactions depending upon those
# 2065 : : // can be duplicated to remove the ability to spend the first instance -- even after
# 2066 : : // being sent to another address.
# 2067 : : // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
# 2068 : : // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
# 2069 : : // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
# 2070 : : // two in the chain that violate it. This prevents exploiting the issue against nodes during their
# 2071 : : // initial block download.
# 2072 [ - + ][ # # ]: 120286 : bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
# 2073 [ - + ][ # # ]: 120286 : (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
# 2074 : :
# 2075 : : // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
# 2076 : : // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
# 2077 : : // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
# 2078 : : // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
# 2079 : : // duplicate transactions descending from the known pairs either.
# 2080 : : // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
# 2081 : :
# 2082 : : // BIP34 requires that a block at height X (block X) has its coinbase
# 2083 : : // scriptSig start with a CScriptNum of X (indicated height X). The above
# 2084 : : // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
# 2085 : : // case that there is a block X before the BIP34 height of 227,931 which has
# 2086 : : // an indicated height Y where Y is greater than X. The coinbase for block
# 2087 : : // X would also be a valid coinbase for block Y, which could be a BIP30
# 2088 : : // violation. An exhaustive search of all mainnet coinbases before the
# 2089 : : // BIP34 height which have an indicated height greater than the block height
# 2090 : : // reveals many occurrences. The 3 lowest indicated heights found are
# 2091 : : // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
# 2092 : : // heights would be the first opportunity for BIP30 to be violated.
# 2093 : :
# 2094 : : // The search reveals a great many blocks which have an indicated height
# 2095 : : // greater than 1,983,702, so we simply remove the optimization to skip
# 2096 : : // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
# 2097 : : // that block in another 25 years or so, we should take advantage of a
# 2098 : : // future consensus change to do a new and improved version of BIP34 that
# 2099 : : // will actually prevent ever creating any duplicate coinbases in the
# 2100 : : // future.
# 2101 : 120286 : static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
# 2102 : :
# 2103 : : // There is no potential to create a duplicate coinbase at block 209,921
# 2104 : : // because this is still before the BIP34 height and so explicit BIP30
# 2105 : : // checking is still active.
# 2106 : :
# 2107 : : // The final case is block 176,684 which has an indicated height of
# 2108 : : // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
# 2109 : : // before block 490,897 so there was not much opportunity to address this
# 2110 : : // case other than to carefully analyze it and determine it would not be a
# 2111 : : // problem. Block 490,897 was, in fact, mined with a different coinbase than
# 2112 : : // block 176,684, but it is important to note that even if it hadn't been or
# 2113 : : // is remined on an alternate fork with a duplicate coinbase, we would still
# 2114 : : // not run into a BIP30 violation. This is because the coinbase for 176,684
# 2115 : : // is spent in block 185,956 in transaction
# 2116 : : // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
# 2117 : : // spending transaction can't be duplicated because it also spends coinbase
# 2118 : : // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
# 2119 : : // coinbase has an indicated height of over 4.2 billion, and wouldn't be
# 2120 : : // duplicatable until that height, and it's currently impossible to create a
# 2121 : : // chain that long. Nevertheless we may wish to consider a future soft fork
# 2122 : : // which retroactively prevents block 490,897 from creating a duplicate
# 2123 : : // coinbase. The two historical BIP30 violations often provide a confusing
# 2124 : : // edge case when manipulating the UTXO and it would be simpler not to have
# 2125 : : // another edge case to deal with.
# 2126 : :
# 2127 : : // testnet3 has no blocks before the BIP34 height with indicated heights
# 2128 : : // post BIP34 before approximately height 486,000,000. After block
# 2129 : : // 1,983,702 testnet3 starts doing unnecessary BIP30 checking again.
# 2130 : 120286 : assert(pindex->pprev);
# 2131 : 0 : CBlockIndex* pindexBIP34height = pindex->pprev->GetAncestor(m_params.GetConsensus().BIP34Height);
# 2132 : : //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
# 2133 [ + - ][ + + ]: 120286 : fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == m_params.GetConsensus().BIP34Hash));
# [ + - ]
# 2134 : :
# 2135 : : // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
# 2136 : : // consensus change that ensures coinbases at those heights cannot
# 2137 : : // duplicate earlier coinbases.
# 2138 [ + - ][ # # ]: 120286 : if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
# 2139 [ + + ]: 181172 : for (const auto& tx : block.vtx) {
# 2140 [ + + ]: 694922 : for (size_t o = 0; o < tx->vout.size(); o++) {
# 2141 [ + + ]: 513751 : if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
# 2142 : 1 : LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
# 2143 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
# 2144 : 1 : }
# 2145 : 513751 : }
# 2146 : 181172 : }
# 2147 : 120286 : }
# 2148 : :
# 2149 : : // Enforce BIP68 (sequence locks)
# 2150 : 120285 : int nLockTimeFlags = 0;
# 2151 [ + + ]: 120285 : if (DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_CSV)) {
# 2152 : 118771 : nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
# 2153 : 118771 : }
# 2154 : :
# 2155 : : // Get the script flags for this block
# 2156 : 120285 : unsigned int flags{GetBlockScriptFlags(*pindex, m_chainman)};
# 2157 : :
# 2158 : 120285 : int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
# 2159 [ + - ]: 120285 : LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
# 2160 : :
# 2161 : 120285 : CBlockUndo blockundo;
# 2162 : :
# 2163 : : // Precomputed transaction data pointers must not be invalidated
# 2164 : : // until after `control` has run the script checks (potentially
# 2165 : : // in multiple threads). Preallocate the vector size so a new allocation
# 2166 : : // doesn't invalidate pointers into the vector, and keep txsdata in scope
# 2167 : : // for as long as `control`.
# 2168 [ + + ][ + + ]: 120285 : CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
# 2169 : 120285 : std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
# 2170 : :
# 2171 : 120285 : std::vector<int> prevheights;
# 2172 : 120285 : CAmount nFees = 0;
# 2173 : 120285 : int nInputs = 0;
# 2174 : 120285 : int64_t nSigOpsCost = 0;
# 2175 : 120285 : blockundo.vtxundo.reserve(block.vtx.size() - 1);
# 2176 [ + + ]: 298802 : for (unsigned int i = 0; i < block.vtx.size(); i++)
# 2177 : 181167 : {
# 2178 : 181167 : const CTransaction &tx = *(block.vtx[i]);
# 2179 : :
# 2180 : 181167 : nInputs += tx.vin.size();
# 2181 : :
# 2182 [ + + ]: 181167 : if (!tx.IsCoinBase())
# 2183 : 60882 : {
# 2184 : 60882 : CAmount txfee = 0;
# 2185 : 60882 : TxValidationState tx_state;
# 2186 [ + + ]: 60882 : if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
# 2187 : : // Any transaction validation failure in ConnectBlock is a block consensus failure
# 2188 : 30 : state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
# 2189 : 30 : tx_state.GetRejectReason(), tx_state.GetDebugMessage());
# 2190 : 30 : return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
# 2191 : 30 : }
# 2192 : 60852 : nFees += txfee;
# 2193 [ - + ]: 60852 : if (!MoneyRange(nFees)) {
# 2194 : 0 : LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
# 2195 : 0 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
# 2196 : 0 : }
# 2197 : :
# 2198 : : // Check that transaction is BIP68 final
# 2199 : : // BIP68 lock checks (as opposed to nLockTime checks) must
# 2200 : : // be in ConnectBlock because they require the UTXO set
# 2201 : 60852 : prevheights.resize(tx.vin.size());
# 2202 [ + + ]: 161248 : for (size_t j = 0; j < tx.vin.size(); j++) {
# 2203 : 100396 : prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
# 2204 : 100396 : }
# 2205 : :
# 2206 [ + + ]: 60852 : if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
# 2207 : 12 : LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
# 2208 : 12 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
# 2209 : 12 : }
# 2210 : 60852 : }
# 2211 : :
# 2212 : : // GetTransactionSigOpCost counts 3 types of sigops:
# 2213 : : // * legacy (always)
# 2214 : : // * p2sh (when P2SH enabled in flags and excludes coinbase)
# 2215 : : // * witness (when witness enabled in flags and excludes coinbase)
# 2216 : 181125 : nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
# 2217 [ + + ]: 181125 : if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
# 2218 : 4 : LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
# 2219 : 4 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
# 2220 : 4 : }
# 2221 : :
# 2222 [ + + ]: 181121 : if (!tx.IsCoinBase())
# 2223 : 60836 : {
# 2224 : 60836 : std::vector<CScriptCheck> vChecks;
# 2225 : 60836 : bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
# 2226 : 60836 : TxValidationState tx_state;
# 2227 [ + + ][ + + ]: 60836 : if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
# [ + + ]
# 2228 : : // Any transaction validation failure in ConnectBlock is a block consensus failure
# 2229 : 2604 : state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
# 2230 : 2604 : tx_state.GetRejectReason(), tx_state.GetDebugMessage());
# 2231 : 2604 : return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
# 2232 : 2604 : tx.GetHash().ToString(), state.ToString());
# 2233 : 2604 : }
# 2234 : 58232 : control.Add(vChecks);
# 2235 : 58232 : }
# 2236 : :
# 2237 : 178517 : CTxUndo undoDummy;
# 2238 [ + + ]: 178517 : if (i > 0) {
# 2239 : 58232 : blockundo.vtxundo.push_back(CTxUndo());
# 2240 : 58232 : }
# 2241 [ + + ]: 178517 : UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
# 2242 : 178517 : }
# 2243 : 117635 : int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
# 2244 [ + - ][ + + ]: 117635 : LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
# 2245 : :
# 2246 : 117635 : CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, m_params.GetConsensus());
# 2247 [ + + ]: 117635 : if (block.vtx[0]->GetValueOut() > blockReward) {
# 2248 : 5 : LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
# 2249 : 5 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
# 2250 : 5 : }
# 2251 : :
# 2252 [ + + ]: 117630 : if (!control.Wait()) {
# 2253 : 16 : LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
# 2254 : 16 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
# 2255 : 16 : }
# 2256 : 117614 : int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
# 2257 [ + - ][ + + ]: 117614 : LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
# 2258 : :
# 2259 [ + + ]: 117614 : if (fJustCheck)
# 2260 : 35945 : return true;
# 2261 : :
# 2262 [ - + ]: 81669 : if (!m_blockman.WriteUndoDataForBlock(blockundo, state, pindex, m_params)) {
# 2263 : 0 : return false;
# 2264 : 0 : }
# 2265 : :
# 2266 : 81669 : int64_t nTime5 = GetTimeMicros(); nTimeUndo += nTime5 - nTime4;
# 2267 [ + - ]: 81669 : LogPrint(BCLog::BENCH, " - Write undo data: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeUndo * MICRO, nTimeUndo * MILLI / nBlocksTotal);
# 2268 : :
# 2269 [ + + ]: 81669 : if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
# 2270 : 80012 : pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
# 2271 : 80012 : m_blockman.m_dirty_blockindex.insert(pindex);
# 2272 : 80012 : }
# 2273 : :
# 2274 : : // add this block to the view's block chain
# 2275 : 81669 : view.SetBestBlock(pindex->GetBlockHash());
# 2276 : :
# 2277 : 81669 : int64_t nTime6 = GetTimeMicros(); nTimeIndex += nTime6 - nTime5;
# 2278 [ + - ]: 81669 : LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
# 2279 : :
# 2280 : 81669 : TRACE6(validation, block_connected,
# 2281 : 81669 : block_hash.data(),
# 2282 : 81669 : pindex->nHeight,
# 2283 : 81669 : block.vtx.size(),
# 2284 : 81669 : nInputs,
# 2285 : 81669 : nSigOpsCost,
# 2286 : 81669 : nTime5 - nTimeStart // in microseconds (µs)
# 2287 : 81669 : );
# 2288 : :
# 2289 : 81669 : return true;
# 2290 : 81669 : }
# 2291 : :
# 2292 : : CoinsCacheSizeState CChainState::GetCoinsCacheSizeState()
# 2293 : 280323 : {
# 2294 : 280323 : AssertLockHeld(::cs_main);
# 2295 : 280323 : return this->GetCoinsCacheSizeState(
# 2296 : 280323 : m_coinstip_cache_size_bytes,
# 2297 [ + + ]: 280323 : m_mempool ? m_mempool->m_max_size_bytes : 0);
# 2298 : 280323 : }
# 2299 : :
# 2300 : : CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
# 2301 : : size_t max_coins_cache_size_bytes,
# 2302 : : size_t max_mempool_size_bytes)
# 2303 : 280325 : {
# 2304 : 280325 : AssertLockHeld(::cs_main);
# 2305 [ + + ]: 280325 : const int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
# 2306 : 280325 : int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
# 2307 : 280325 : int64_t nTotalSpace =
# 2308 : 280325 : max_coins_cache_size_bytes + std::max<int64_t>(int64_t(max_mempool_size_bytes) - nMempoolUsage, 0);
# 2309 : :
# 2310 : : //! No need to periodic flush if at least this much space still available.
# 2311 : 280325 : static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
# 2312 : 280325 : int64_t large_threshold =
# 2313 : 280325 : std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
# 2314 : :
# 2315 [ + + ]: 280325 : if (cacheSize > nTotalSpace) {
# 2316 : 1 : LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
# 2317 : 1 : return CoinsCacheSizeState::CRITICAL;
# 2318 [ - + ]: 280324 : } else if (cacheSize > large_threshold) {
# 2319 : 0 : return CoinsCacheSizeState::LARGE;
# 2320 : 0 : }
# 2321 : 280324 : return CoinsCacheSizeState::OK;
# 2322 : 280325 : }
# 2323 : :
# 2324 : : bool CChainState::FlushStateToDisk(
# 2325 : : BlockValidationState &state,
# 2326 : : FlushStateMode mode,
# 2327 : : int nManualPruneHeight)
# 2328 : 280323 : {
# 2329 : 280323 : LOCK(cs_main);
# 2330 : 280323 : assert(this->CanFlushToDisk());
# 2331 : 0 : static std::chrono::microseconds nLastWrite{0};
# 2332 : 280323 : static std::chrono::microseconds nLastFlush{0};
# 2333 : 280323 : std::set<int> setFilesToPrune;
# 2334 : 280323 : bool full_flush_completed = false;
# 2335 : :
# 2336 : 280323 : const size_t coins_count = CoinsTip().GetCacheSize();
# 2337 : 280323 : const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
# 2338 : :
# 2339 : 280323 : try {
# 2340 : 280323 : {
# 2341 : 280323 : bool fFlushForPrune = false;
# 2342 : 280323 : bool fDoFullFlush = false;
# 2343 : :
# 2344 : 280323 : CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
# 2345 : 280323 : LOCK(m_blockman.cs_LastBlockFile);
# 2346 [ + + ][ + + ]: 280323 : if (fPruneMode && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) {
# [ - + ][ + - ]
# 2347 : : // make sure we don't prune above any of the prune locks bestblocks
# 2348 : : // pruning is height-based
# 2349 : 4 : int last_prune{m_chain.Height()}; // last height we can prune
# 2350 : 4 : std::optional<std::string> limiting_lock; // prune lock that actually was the limiting factor, only used for logging
# 2351 : :
# 2352 [ - + ]: 4 : for (const auto& prune_lock : m_blockman.m_prune_locks) {
# 2353 [ # # ]: 0 : if (prune_lock.second.height_first == std::numeric_limits<int>::max()) continue;
# 2354 : : // Remove the buffer and one additional block here to get actual height that is outside of the buffer
# 2355 : 0 : const int lock_height{prune_lock.second.height_first - PRUNE_LOCK_BUFFER - 1};
# 2356 : 0 : last_prune = std::max(1, std::min(last_prune, lock_height));
# 2357 [ # # ]: 0 : if (last_prune == lock_height) {
# 2358 : 0 : limiting_lock = prune_lock.first;
# 2359 : 0 : }
# 2360 : 0 : }
# 2361 : :
# 2362 [ - + ]: 4 : if (limiting_lock) {
# 2363 [ # # ]: 0 : LogPrint(BCLog::PRUNE, "%s limited pruning to height %d\n", limiting_lock.value(), last_prune);
# 2364 : 0 : }
# 2365 : :
# 2366 [ - + ]: 4 : if (nManualPruneHeight > 0) {
# 2367 : 0 : LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
# 2368 : :
# 2369 : 0 : m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height());
# 2370 : 4 : } else {
# 2371 : 4 : LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
# 2372 : :
# 2373 : 4 : m_blockman.FindFilesToPrune(setFilesToPrune, m_params.PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload());
# 2374 : 4 : m_blockman.m_check_for_pruning = false;
# 2375 : 4 : }
# 2376 [ - + ]: 4 : if (!setFilesToPrune.empty()) {
# 2377 : 0 : fFlushForPrune = true;
# 2378 [ # # ]: 0 : if (!m_blockman.m_have_pruned) {
# 2379 : 0 : m_blockman.m_block_tree_db->WriteFlag("prunedblockfiles", true);
# 2380 : 0 : m_blockman.m_have_pruned = true;
# 2381 : 0 : }
# 2382 : 0 : }
# 2383 : 4 : }
# 2384 : 280323 : const auto nNow = GetTime<std::chrono::microseconds>();
# 2385 : : // Avoid writing/flushing immediately after startup.
# 2386 [ + + ]: 280323 : if (nLastWrite.count() == 0) {
# 2387 : 750 : nLastWrite = nNow;
# 2388 : 750 : }
# 2389 [ + + ]: 280323 : if (nLastFlush.count() == 0) {
# 2390 : 750 : nLastFlush = nNow;
# 2391 : 750 : }
# 2392 : : // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
# 2393 [ + + ][ - + ]: 280323 : bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
# 2394 : : // The cache is over the limit, we have to write now.
# 2395 [ + + ][ - + ]: 280323 : bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
# 2396 : : // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
# 2397 [ + + ][ + + ]: 280323 : bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
# 2398 : : // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
# 2399 [ + + ][ + + ]: 280323 : bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
# 2400 : : // Combine all conditions that result in a full cache flush.
# 2401 [ + + ][ - + ]: 280323 : fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
# [ - + ][ + + ]
# [ - + ]
# 2402 : : // Write blocks and block index to disk.
# 2403 [ + + ][ + + ]: 280323 : if (fDoFullFlush || fPeriodicWrite) {
# 2404 : : // Ensure we can write block index
# 2405 [ - + ]: 2084 : if (!CheckDiskSpace(gArgs.GetBlocksDirPath())) {
# 2406 : 0 : return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
# 2407 : 0 : }
# 2408 : 2084 : {
# 2409 : 2084 : LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
# 2410 : :
# 2411 : : // First make sure all block and undo data is flushed to disk.
# 2412 : 2084 : m_blockman.FlushBlockFile();
# 2413 : 2084 : }
# 2414 : :
# 2415 : : // Then update all block file information (which may refer to block and undo files).
# 2416 : 2084 : {
# 2417 : 2084 : LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
# 2418 : :
# 2419 [ - + ]: 2084 : if (!m_blockman.WriteBlockIndexDB()) {
# 2420 : 0 : return AbortNode(state, "Failed to write to block index database");
# 2421 : 0 : }
# 2422 : 2084 : }
# 2423 : : // Finally remove any pruned files
# 2424 [ - + ]: 2084 : if (fFlushForPrune) {
# 2425 : 0 : LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
# 2426 : :
# 2427 : 0 : UnlinkPrunedFiles(setFilesToPrune);
# 2428 : 0 : }
# 2429 : 2084 : nLastWrite = nNow;
# 2430 : 2084 : }
# 2431 : : // Flush best chain related state. This can only be done if the blocks / block index write was also done.
# 2432 [ + + ][ + + ]: 280323 : if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
# [ + - ]
# 2433 : 2074 : LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fkB)",
# 2434 : 2074 : coins_count, coins_mem_usage / 1000), BCLog::BENCH);
# 2435 : :
# 2436 : : // Typical Coin structures on disk are around 48 bytes in size.
# 2437 : : // Pushing a new one to the database can cause it to be written
# 2438 : : // twice (once in the log, and once in the tables). This is already
# 2439 : : // an overestimation, as most will delete an existing entry or
# 2440 : : // overwrite one. Still, use a conservative safety factor of 2.
# 2441 [ - + ]: 2074 : if (!CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
# 2442 : 0 : return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
# 2443 : 0 : }
# 2444 : : // Flush the chainstate (which may refer to block index entries).
# 2445 [ - + ]: 2074 : if (!CoinsTip().Flush())
# 2446 : 0 : return AbortNode(state, "Failed to write to coin database");
# 2447 : 2074 : nLastFlush = nNow;
# 2448 : 2074 : full_flush_completed = true;
# 2449 : 2074 : TRACE5(utxocache, flush,
# 2450 : 2074 : (int64_t)(GetTimeMicros() - nNow.count()), // in microseconds (µs)
# 2451 : 2074 : (uint32_t)mode,
# 2452 : 2074 : (uint64_t)coins_count,
# 2453 : 2074 : (uint64_t)coins_mem_usage,
# 2454 : 2074 : (bool)fFlushForPrune);
# 2455 : 2074 : }
# 2456 : 280323 : }
# 2457 [ + + ]: 280323 : if (full_flush_completed) {
# 2458 : : // Update best block in wallet (so we can detect restored wallets).
# 2459 : 2074 : GetMainSignals().ChainStateFlushed(m_chain.GetLocator());
# 2460 : 2074 : }
# 2461 : 280323 : } catch (const std::runtime_error& e) {
# 2462 : 0 : return AbortNode(state, std::string("System error while flushing: ") + e.what());
# 2463 : 0 : }
# 2464 : 280323 : return true;
# 2465 : 280323 : }
# 2466 : :
# 2467 : : void CChainState::ForceFlushStateToDisk()
# 2468 : 2032 : {
# 2469 : 2032 : BlockValidationState state;
# 2470 [ - + ]: 2032 : if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
# 2471 : 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
# 2472 : 0 : }
# 2473 : 2032 : }
# 2474 : :
# 2475 : : void CChainState::PruneAndFlush()
# 2476 : 3 : {
# 2477 : 3 : BlockValidationState state;
# 2478 : 3 : m_blockman.m_check_for_pruning = true;
# 2479 [ - + ]: 3 : if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
# 2480 : 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
# 2481 : 0 : }
# 2482 : 3 : }
# 2483 : :
# 2484 : : static void DoWarning(const bilingual_str& warning)
# 2485 : 4 : {
# 2486 : 4 : static bool fWarned = false;
# 2487 : 4 : SetMiscWarning(warning);
# 2488 [ + + ]: 4 : if (!fWarned) {
# 2489 : 2 : AlertNotify(warning.original);
# 2490 : 2 : fWarned = true;
# 2491 : 2 : }
# 2492 : 4 : }
# 2493 : :
# 2494 : : /** Private helper function that concatenates warning messages. */
# 2495 : : static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
# 2496 : 144 : {
# 2497 [ - + ]: 144 : if (!res.empty()) res += Untranslated(", ");
# 2498 : 144 : res += warn;
# 2499 : 144 : }
# 2500 : :
# 2501 : : static void UpdateTipLog(
# 2502 : : const CCoinsViewCache& coins_tip,
# 2503 : : const CBlockIndex* tip,
# 2504 : : const CChainParams& params,
# 2505 : : const std::string& func_name,
# 2506 : : const std::string& prefix,
# 2507 : : const std::string& warning_messages) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
# 2508 : 91526 : {
# 2509 : :
# 2510 : 91526 : AssertLockHeld(::cs_main);
# 2511 [ + + ]: 91526 : LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n",
# 2512 : 91526 : prefix, func_name,
# 2513 : 91526 : tip->GetBlockHash().ToString(), tip->nHeight, tip->nVersion,
# 2514 : 91526 : log(tip->nChainWork.getdouble()) / log(2.0), (unsigned long)tip->nChainTx,
# 2515 : 91526 : FormatISO8601DateTime(tip->GetBlockTime()),
# 2516 : 91526 : GuessVerificationProgress(params.TxData(), tip),
# 2517 : 91526 : coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
# 2518 : 91526 : coins_tip.GetCacheSize(),
# 2519 : 91526 : !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : "");
# 2520 : 91526 : }
# 2521 : :
# 2522 : : void CChainState::UpdateTip(const CBlockIndex* pindexNew)
# 2523 : 91527 : {
# 2524 : 91527 : AssertLockHeld(::cs_main);
# 2525 : 91527 : const auto& coins_tip = this->CoinsTip();
# 2526 : :
# 2527 : : // The remainder of the function isn't relevant if we are not acting on
# 2528 : : // the active chainstate, so return if need be.
# 2529 [ + + ]: 91527 : if (this != &m_chainman.ActiveChainstate()) {
# 2530 : : // Only log every so often so that we don't bury log messages at the tip.
# 2531 : 1 : constexpr int BACKGROUND_LOG_INTERVAL = 2000;
# 2532 [ - + ]: 1 : if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
# 2533 : 0 : UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "[background validation] ", "");
# 2534 : 0 : }
# 2535 : 1 : return;
# 2536 : 1 : }
# 2537 : :
# 2538 : : // New best block
# 2539 [ + + ]: 91526 : if (m_mempool) {
# 2540 : 91425 : m_mempool->AddTransactionsUpdated(1);
# 2541 : 91425 : }
# 2542 : :
# 2543 : 91526 : {
# 2544 : 91526 : LOCK(g_best_block_mutex);
# 2545 : 91526 : g_best_block = pindexNew->GetBlockHash();
# 2546 : 91526 : g_best_block_cv.notify_all();
# 2547 : 91526 : }
# 2548 : :
# 2549 : 91526 : bilingual_str warning_messages;
# 2550 [ + + ]: 91526 : if (!this->IsInitialBlockDownload()) {
# 2551 : 82864 : const CBlockIndex* pindex = pindexNew;
# 2552 [ + + ]: 2485920 : for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
# 2553 : 2403056 : WarningBitsConditionChecker checker(m_chainman, bit);
# 2554 : 2403056 : ThresholdState state = checker.GetStateFor(pindex, m_params.GetConsensus(), warningcache.at(bit));
# 2555 [ + + ][ + + ]: 2403056 : if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
# 2556 : 148 : const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
# 2557 [ + + ]: 148 : if (state == ThresholdState::ACTIVE) {
# 2558 : 4 : DoWarning(warning);
# 2559 : 144 : } else {
# 2560 : 144 : AppendWarning(warning_messages, warning);
# 2561 : 144 : }
# 2562 : 148 : }
# 2563 : 2403056 : }
# 2564 : 82864 : }
# 2565 : 91526 : UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "", warning_messages.original);
# 2566 : 91526 : }
# 2567 : :
# 2568 : : /** Disconnect m_chain's tip.
# 2569 : : * After calling, the mempool will be in an inconsistent state, with
# 2570 : : * transactions from disconnected blocks being added to disconnectpool. You
# 2571 : : * should make the mempool consistent again by calling MaybeUpdateMempoolForReorg.
# 2572 : : * with cs_main held.
# 2573 : : *
# 2574 : : * If disconnectpool is nullptr, then no disconnected transactions are added to
# 2575 : : * disconnectpool (note that the caller is responsible for mempool consistency
# 2576 : : * in any case).
# 2577 : : */
# 2578 : : bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTransactions* disconnectpool)
# 2579 : 9662 : {
# 2580 : 9662 : AssertLockHeld(cs_main);
# 2581 [ + - ]: 9662 : if (m_mempool) AssertLockHeld(m_mempool->cs);
# 2582 : :
# 2583 : 9662 : CBlockIndex *pindexDelete = m_chain.Tip();
# 2584 : 9662 : assert(pindexDelete);
# 2585 : 0 : assert(pindexDelete->pprev);
# 2586 : : // Read block from disk.
# 2587 : 0 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
# 2588 : 9662 : CBlock& block = *pblock;
# 2589 [ - + ]: 9662 : if (!ReadBlockFromDisk(block, pindexDelete, m_params.GetConsensus())) {
# 2590 : 0 : return error("DisconnectTip(): Failed to read block");
# 2591 : 0 : }
# 2592 : : // Apply the block atomically to the chain state.
# 2593 : 9662 : int64_t nStart = GetTimeMicros();
# 2594 : 9662 : {
# 2595 : 9662 : CCoinsViewCache view(&CoinsTip());
# 2596 : 9662 : assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
# 2597 [ + + ]: 9662 : if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
# 2598 : 1 : return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
# 2599 : 9661 : bool flushed = view.Flush();
# 2600 : 9661 : assert(flushed);
# 2601 : 9661 : }
# 2602 [ + - ]: 9661 : LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
# 2603 : :
# 2604 : 9661 : {
# 2605 : : // Prune locks that began at or after the tip should be moved backward so they get a chance to reorg
# 2606 : 9661 : const int max_height_first{pindexDelete->nHeight - 1};
# 2607 [ + + ]: 9661 : for (auto& prune_lock : m_blockman.m_prune_locks) {
# 2608 [ - + ]: 32 : if (prune_lock.second.height_first <= max_height_first) continue;
# 2609 : :
# 2610 : 32 : prune_lock.second.height_first = max_height_first;
# 2611 [ + - ]: 32 : LogPrint(BCLog::PRUNE, "%s prune lock moved back to %d\n", prune_lock.first, max_height_first);
# 2612 : 32 : }
# 2613 : 9661 : }
# 2614 : :
# 2615 : : // Write the chain state to disk, if necessary.
# 2616 [ - + ]: 9661 : if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
# 2617 : 0 : return false;
# 2618 : 0 : }
# 2619 : :
# 2620 [ + - ][ + - ]: 9661 : if (disconnectpool && m_mempool) {
# 2621 : : // Save transactions to re-add to mempool at end of reorg
# 2622 [ + + ]: 25511 : for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
# 2623 : 15850 : disconnectpool->addTransaction(*it);
# 2624 : 15850 : }
# 2625 [ + + ]: 12866 : while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
# 2626 : : // Drop the earliest entry, and remove its children from the mempool.
# 2627 : 3205 : auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
# 2628 : 3205 : m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG);
# 2629 : 3205 : disconnectpool->removeEntry(it);
# 2630 : 3205 : }
# 2631 : 9661 : }
# 2632 : :
# 2633 : 9661 : m_chain.SetTip(*pindexDelete->pprev);
# 2634 : :
# 2635 : 9661 : UpdateTip(pindexDelete->pprev);
# 2636 : : // Let wallets know transactions went from 1-confirmed to
# 2637 : : // 0-confirmed or conflicted:
# 2638 : 9661 : GetMainSignals().BlockDisconnected(pblock, pindexDelete);
# 2639 : 9661 : return true;
# 2640 : 9661 : }
# 2641 : :
# 2642 : : static int64_t nTimeReadFromDiskTotal = 0;
# 2643 : : static int64_t nTimeConnectTotal = 0;
# 2644 : : static int64_t nTimeFlush = 0;
# 2645 : : static int64_t nTimeChainState = 0;
# 2646 : : static int64_t nTimePostConnect = 0;
# 2647 : :
# 2648 : : struct PerBlockConnectTrace {
# 2649 : : CBlockIndex* pindex = nullptr;
# 2650 : : std::shared_ptr<const CBlock> pblock;
# 2651 : 179681 : PerBlockConnectTrace() = default;
# 2652 : : };
# 2653 : : /**
# 2654 : : * Used to track blocks whose transactions were applied to the UTXO state as a
# 2655 : : * part of a single ActivateBestChainStep call.
# 2656 : : *
# 2657 : : * This class is single-use, once you call GetBlocksConnected() you have to throw
# 2658 : : * it away and make a new one.
# 2659 : : */
# 2660 : : class ConnectTrace {
# 2661 : : private:
# 2662 : : std::vector<PerBlockConnectTrace> blocksConnected;
# 2663 : :
# 2664 : : public:
# 2665 : 97815 : explicit ConnectTrace() : blocksConnected(1) {}
# 2666 : :
# 2667 : 81866 : void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
# 2668 : 81866 : assert(!blocksConnected.back().pindex);
# 2669 : 0 : assert(pindex);
# 2670 : 0 : assert(pblock);
# 2671 : 0 : blocksConnected.back().pindex = pindex;
# 2672 : 81866 : blocksConnected.back().pblock = std::move(pblock);
# 2673 : 81866 : blocksConnected.emplace_back();
# 2674 : 81866 : }
# 2675 : :
# 2676 : 75832 : std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
# 2677 : : // We always keep one extra block at the end of our list because
# 2678 : : // blocks are added after all the conflicted transactions have
# 2679 : : // been filled in. Thus, the last entry should always be an empty
# 2680 : : // one waiting for the transactions from the next block. We pop
# 2681 : : // the last entry here to make sure the list we return is sane.
# 2682 : 75832 : assert(!blocksConnected.back().pindex);
# 2683 : 0 : blocksConnected.pop_back();
# 2684 : 75832 : return blocksConnected;
# 2685 : 75832 : }
# 2686 : : };
# 2687 : :
# 2688 : : /**
# 2689 : : * Connect a new block to m_chain. pblock is either nullptr or a pointer to a CBlock
# 2690 : : * corresponding to pindexNew, to bypass loading it again from disk.
# 2691 : : *
# 2692 : : * The block is added to connectTrace if connection succeeds.
# 2693 : : */
# 2694 : : bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool)
# 2695 : 84530 : {
# 2696 : 84530 : AssertLockHeld(cs_main);
# 2697 [ + + ]: 84530 : if (m_mempool) AssertLockHeld(m_mempool->cs);
# 2698 : :
# 2699 : 84530 : assert(pindexNew->pprev == m_chain.Tip());
# 2700 : : // Read block from disk.
# 2701 : 0 : int64_t nTime1 = GetTimeMicros();
# 2702 : 84530 : std::shared_ptr<const CBlock> pthisBlock;
# 2703 [ + + ]: 84530 : if (!pblock) {
# 2704 : 12004 : std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
# 2705 [ - + ]: 12004 : if (!ReadBlockFromDisk(*pblockNew, pindexNew, m_params.GetConsensus())) {
# 2706 : 0 : return AbortNode(state, "Failed to read block");
# 2707 : 0 : }
# 2708 : 12004 : pthisBlock = pblockNew;
# 2709 : 72526 : } else {
# 2710 [ + - ]: 72526 : LogPrint(BCLog::BENCH, " - Using cached block\n");
# 2711 : 72526 : pthisBlock = pblock;
# 2712 : 72526 : }
# 2713 : 84530 : const CBlock& blockConnecting = *pthisBlock;
# 2714 : : // Apply the block atomically to the chain state.
# 2715 : 84530 : int64_t nTime2 = GetTimeMicros(); nTimeReadFromDiskTotal += nTime2 - nTime1;
# 2716 : 84530 : int64_t nTime3;
# 2717 [ + - ]: 84530 : LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDiskTotal * MICRO, nTimeReadFromDiskTotal * MILLI / nBlocksTotal);
# 2718 : 84530 : {
# 2719 : 84530 : CCoinsViewCache view(&CoinsTip());
# 2720 : 84530 : bool rv = ConnectBlock(blockConnecting, state, pindexNew, view);
# 2721 : 84530 : GetMainSignals().BlockChecked(blockConnecting, state);
# 2722 [ + + ]: 84530 : if (!rv) {
# 2723 [ + - ]: 2664 : if (state.IsInvalid())
# 2724 : 2664 : InvalidBlockFound(pindexNew, state);
# 2725 : 2664 : return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
# 2726 : 2664 : }
# 2727 : 81866 : nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
# 2728 : 81866 : assert(nBlocksTotal > 0);
# 2729 [ + - ]: 81866 : LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
# 2730 : 81866 : bool flushed = view.Flush();
# 2731 : 81866 : assert(flushed);
# 2732 : 81866 : }
# 2733 : 0 : int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
# 2734 [ + - ]: 81866 : LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
# 2735 : : // Write the chain state to disk, if necessary.
# 2736 [ - + ]: 81866 : if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
# 2737 : 0 : return false;
# 2738 : 0 : }
# 2739 : 81866 : int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
# 2740 [ + - ]: 81866 : LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
# 2741 : : // Remove conflicting transactions from the mempool.;
# 2742 [ + + ]: 81866 : if (m_mempool) {
# 2743 : 81765 : m_mempool->removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
# 2744 : 81765 : disconnectpool.removeForBlock(blockConnecting.vtx);
# 2745 : 81765 : }
# 2746 : : // Update m_chain & related variables.
# 2747 : 81866 : m_chain.SetTip(*pindexNew);
# 2748 : 81866 : UpdateTip(pindexNew);
# 2749 : :
# 2750 : 81866 : int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
# 2751 [ + - ]: 81866 : LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
# 2752 [ + - ]: 81866 : LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
# 2753 : :
# 2754 : 81866 : connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
# 2755 : 81866 : return true;
# 2756 : 81866 : }
# 2757 : :
# 2758 : : /**
# 2759 : : * Return the tip of the chain with the most work in it, that isn't
# 2760 : : * known to be invalid (it's however far from certain to be valid).
# 2761 : : */
# 2762 : : CBlockIndex* CChainState::FindMostWorkChain()
# 2763 : 95324 : {
# 2764 : 95324 : AssertLockHeld(::cs_main);
# 2765 : 95328 : do {
# 2766 : 95328 : CBlockIndex *pindexNew = nullptr;
# 2767 : :
# 2768 : : // Find the best candidate header.
# 2769 : 95328 : {
# 2770 : 95328 : std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
# 2771 [ - + ]: 95328 : if (it == setBlockIndexCandidates.rend())
# 2772 : 0 : return nullptr;
# 2773 : 95328 : pindexNew = *it;
# 2774 : 95328 : }
# 2775 : :
# 2776 : : // Check whether all blocks on the path between the currently active chain and the candidate are valid.
# 2777 : : // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
# 2778 : 0 : CBlockIndex *pindexTest = pindexNew;
# 2779 : 95328 : bool fInvalidAncestor = false;
# 2780 [ + + ][ + + ]: 179869 : while (pindexTest && !m_chain.Contains(pindexTest)) {
# 2781 : 84545 : assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
# 2782 : :
# 2783 : : // Pruned nodes may have entries in setBlockIndexCandidates for
# 2784 : : // which block files have been deleted. Remove those as candidates
# 2785 : : // for the most work chain if we come across them; we can't switch
# 2786 : : // to a chain unless we have all the non-active-chain parent blocks.
# 2787 : 0 : bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
# 2788 : 84545 : bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
# 2789 [ + + ][ - + ]: 84545 : if (fFailedChain || fMissingData) {
# 2790 : : // Candidate chain is not usable (either invalid or missing data)
# 2791 [ + - ][ - + ]: 4 : if (fFailedChain && (m_chainman.m_best_invalid == nullptr || pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork)) {
# [ - + ]
# 2792 : 0 : m_chainman.m_best_invalid = pindexNew;
# 2793 : 0 : }
# 2794 : 4 : CBlockIndex *pindexFailed = pindexNew;
# 2795 : : // Remove the entire chain from the set.
# 2796 [ + + ]: 8 : while (pindexTest != pindexFailed) {
# 2797 [ + - ]: 4 : if (fFailedChain) {
# 2798 : 4 : pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
# 2799 [ # # ]: 4 : } else if (fMissingData) {
# 2800 : : // If we're missing data, then add back to m_blocks_unlinked,
# 2801 : : // so that if the block arrives in the future we can try adding
# 2802 : : // to setBlockIndexCandidates again.
# 2803 : 0 : m_blockman.m_blocks_unlinked.insert(
# 2804 : 0 : std::make_pair(pindexFailed->pprev, pindexFailed));
# 2805 : 0 : }
# 2806 : 4 : setBlockIndexCandidates.erase(pindexFailed);
# 2807 : 4 : pindexFailed = pindexFailed->pprev;
# 2808 : 4 : }
# 2809 : 4 : setBlockIndexCandidates.erase(pindexTest);
# 2810 : 4 : fInvalidAncestor = true;
# 2811 : 4 : break;
# 2812 : 4 : }
# 2813 : 84541 : pindexTest = pindexTest->pprev;
# 2814 : 84541 : }
# 2815 [ + + ]: 95328 : if (!fInvalidAncestor)
# 2816 : 95324 : return pindexNew;
# 2817 : 95328 : } while(true);
# 2818 : 95324 : }
# 2819 : :
# 2820 : : /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
# 2821 : 82346 : void CChainState::PruneBlockIndexCandidates() {
# 2822 : : // Note that we can't delete the current block itself, as we may need to return to it later in case a
# 2823 : : // reorganization to a better block fails.
# 2824 : 82346 : std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
# 2825 [ + - ][ + + ]: 227685 : while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
# [ + + ]
# 2826 : 145339 : setBlockIndexCandidates.erase(it++);
# 2827 : 145339 : }
# 2828 : : // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
# 2829 : 82346 : assert(!setBlockIndexCandidates.empty());
# 2830 : 82346 : }
# 2831 : :
# 2832 : : /**
# 2833 : : * Try to make some progress towards making pindexMostWork the active block.
# 2834 : : * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork.
# 2835 : : *
# 2836 : : * @returns true unless a system error occurred
# 2837 : : */
# 2838 : : bool CChainState::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
# 2839 : 75833 : {
# 2840 : 75833 : AssertLockHeld(cs_main);
# 2841 [ + + ]: 75833 : if (m_mempool) AssertLockHeld(m_mempool->cs);
# 2842 : :
# 2843 : 75833 : const CBlockIndex* pindexOldTip = m_chain.Tip();
# 2844 : 75833 : const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork);
# 2845 : :
# 2846 : : // Disconnect active blocks which are no longer in the best chain.
# 2847 : 75833 : bool fBlocksDisconnected = false;
# 2848 : 75833 : DisconnectedBlockTransactions disconnectpool;
# 2849 [ + + ][ + + ]: 84546 : while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
# 2850 [ + + ]: 8714 : if (!DisconnectTip(state, &disconnectpool)) {
# 2851 : : // This is likely a fatal error, but keep the mempool consistent,
# 2852 : : // just in case. Only remove from the mempool in this case.
# 2853 : 1 : MaybeUpdateMempoolForReorg(disconnectpool, false);
# 2854 : :
# 2855 : : // If we're unable to disconnect a block during normal operation,
# 2856 : : // then that is a failure of our local system -- we should abort
# 2857 : : // rather than stay on a less work chain.
# 2858 : 1 : AbortNode(state, "Failed to disconnect block; see debug.log for details");
# 2859 : 1 : return false;
# 2860 : 1 : }
# 2861 : 8713 : fBlocksDisconnected = true;
# 2862 : 8713 : }
# 2863 : :
# 2864 : : // Build list of new blocks to connect (in descending height order).
# 2865 : 75832 : std::vector<CBlockIndex*> vpindexToConnect;
# 2866 : 75832 : bool fContinue = true;
# 2867 [ + + ]: 75832 : int nHeight = pindexFork ? pindexFork->nHeight : -1;
# 2868 [ + + ][ + + ]: 151925 : while (fContinue && nHeight != pindexMostWork->nHeight) {
# 2869 : : // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
# 2870 : : // a few blocks along the way.
# 2871 : 76093 : int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
# 2872 : 76093 : vpindexToConnect.clear();
# 2873 : 76093 : vpindexToConnect.reserve(nTargetHeight - nHeight);
# 2874 : 76093 : CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
# 2875 [ + + ][ + + ]: 219263 : while (pindexIter && pindexIter->nHeight != nHeight) {
# 2876 : 143170 : vpindexToConnect.push_back(pindexIter);
# 2877 : 143170 : pindexIter = pindexIter->pprev;
# 2878 : 143170 : }
# 2879 : 76093 : nHeight = nTargetHeight;
# 2880 : :
# 2881 : : // Connect new blocks.
# 2882 [ + + ]: 84530 : for (CBlockIndex* pindexConnect : reverse_iterate(vpindexToConnect)) {
# 2883 [ + + ][ + + ]: 84530 : if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
# 2884 [ + - ]: 2664 : if (state.IsInvalid()) {
# 2885 : : // The block violates a consensus rule.
# 2886 [ + - ]: 2664 : if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
# 2887 : 2664 : InvalidChainFound(vpindexToConnect.front());
# 2888 : 2664 : }
# 2889 : 2664 : state = BlockValidationState();
# 2890 : 2664 : fInvalidFound = true;
# 2891 : 2664 : fContinue = false;
# 2892 : 2664 : break;
# 2893 : 2664 : } else {
# 2894 : : // A system error occurred (disk space, database error, ...).
# 2895 : : // Make the mempool consistent with the current tip, just in case
# 2896 : : // any observers try to use it before shutdown.
# 2897 : 0 : MaybeUpdateMempoolForReorg(disconnectpool, false);
# 2898 : 0 : return false;
# 2899 : 0 : }
# 2900 : 81866 : } else {
# 2901 : 81866 : PruneBlockIndexCandidates();
# 2902 [ + + ][ + + ]: 81866 : if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
# 2903 : : // We're in a better position than we were. Return temporarily to release the lock.
# 2904 : 73156 : fContinue = false;
# 2905 : 73156 : break;
# 2906 : 73156 : }
# 2907 : 81866 : }
# 2908 : 84530 : }
# 2909 : 76093 : }
# 2910 : :
# 2911 [ + + ]: 75832 : if (fBlocksDisconnected) {
# 2912 : : // If any blocks were disconnected, disconnectpool may be non empty. Add
# 2913 : : // any disconnected transactions back to the mempool.
# 2914 : 83 : MaybeUpdateMempoolForReorg(disconnectpool, true);
# 2915 : 83 : }
# 2916 [ + + ]: 75832 : if (m_mempool) m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1);
# 2917 : :
# 2918 : 75832 : CheckForkWarningConditions();
# 2919 : :
# 2920 : 75832 : return true;
# 2921 : 75832 : }
# 2922 : :
# 2923 : : static SynchronizationState GetSynchronizationState(bool init)
# 2924 : 132630 : {
# 2925 [ + + ]: 132630 : if (!init) return SynchronizationState::POST_INIT;
# 2926 [ + + ]: 10607 : if (::fReindex) return SynchronizationState::INIT_REINDEX;
# 2927 : 9146 : return SynchronizationState::INIT_DOWNLOAD;
# 2928 : 10607 : }
# 2929 : :
# 2930 : 119141 : static bool NotifyHeaderTip(CChainState& chainstate) LOCKS_EXCLUDED(cs_main) {
# 2931 : 119141 : bool fNotify = false;
# 2932 : 119141 : bool fInitialBlockDownload = false;
# 2933 : 119141 : static CBlockIndex* pindexHeaderOld = nullptr;
# 2934 : 119141 : CBlockIndex* pindexHeader = nullptr;
# 2935 : 119141 : {
# 2936 : 119141 : LOCK(cs_main);
# 2937 : 119141 : pindexHeader = chainstate.m_chainman.m_best_header;
# 2938 : :
# 2939 [ + + ]: 119141 : if (pindexHeader != pindexHeaderOld) {
# 2940 : 59387 : fNotify = true;
# 2941 : 59387 : fInitialBlockDownload = chainstate.IsInitialBlockDownload();
# 2942 : 59387 : pindexHeaderOld = pindexHeader;
# 2943 : 59387 : }
# 2944 : 119141 : }
# 2945 : : // Send block tip changed notifications without cs_main
# 2946 [ + + ]: 119141 : if (fNotify) {
# 2947 : 59387 : uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader->nHeight, pindexHeader->nTime, false);
# 2948 : 59387 : }
# 2949 : 119141 : return fNotify;
# 2950 : 119141 : }
# 2951 : :
# 2952 : 98839 : static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
# 2953 : 98839 : AssertLockNotHeld(cs_main);
# 2954 : :
# 2955 [ + + ]: 98839 : if (GetMainSignals().CallbacksPending() > 10) {
# 2956 : 268 : SyncWithValidationInterfaceQueue();
# 2957 : 268 : }
# 2958 : 98839 : }
# 2959 : :
# 2960 : : bool CChainState::ActivateBestChain(BlockValidationState& state, std::shared_ptr<const CBlock> pblock)
# 2961 : 92660 : {
# 2962 : 92660 : AssertLockNotHeld(m_chainstate_mutex);
# 2963 : :
# 2964 : : // Note that while we're often called here from ProcessNewBlock, this is
# 2965 : : // far from a guarantee. Things in the P2P/RPC will often end up calling
# 2966 : : // us in the middle of ProcessNewBlock - do not assume pblock is set
# 2967 : : // sanely for performance or correctness!
# 2968 : 92660 : AssertLockNotHeld(::cs_main);
# 2969 : :
# 2970 : : // ABC maintains a fair degree of expensive-to-calculate internal state
# 2971 : : // because this function periodically releases cs_main so that it does not lock up other threads for too long
# 2972 : : // during large connects - and to allow for e.g. the callback queue to drain
# 2973 : : // we use m_chainstate_mutex to enforce mutual exclusion so that only one caller may execute this function at a time
# 2974 : 92660 : LOCK(m_chainstate_mutex);
# 2975 : :
# 2976 : 92660 : CBlockIndex *pindexMostWork = nullptr;
# 2977 : 92660 : CBlockIndex *pindexNewTip = nullptr;
# 2978 : 92660 : int nStopAtHeight = gArgs.GetIntArg("-stopatheight", DEFAULT_STOPATHEIGHT);
# 2979 : 97809 : do {
# 2980 : : // Block until the validation queue drains. This should largely
# 2981 : : // never happen in normal operation, however may happen during
# 2982 : : // reindex, causing memory blowup if we run too far ahead.
# 2983 : : // Note that if a validationinterface callback ends up calling
# 2984 : : // ActivateBestChain this may lead to a deadlock! We should
# 2985 : : // probably have a DEBUG_LOCKORDER test for this in the future.
# 2986 : 97809 : LimitValidationInterfaceQueue();
# 2987 : :
# 2988 : 97809 : {
# 2989 : 97809 : LOCK(cs_main);
# 2990 : : // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
# 2991 : 97809 : LOCK(MempoolMutex());
# 2992 : 97809 : CBlockIndex* starting_tip = m_chain.Tip();
# 2993 : 97809 : bool blocks_connected = false;
# 2994 : 97815 : do {
# 2995 : : // We absolutely may not unlock cs_main until we've made forward progress
# 2996 : : // (with the exception of shutdown due to hardware issues, low disk space, etc).
# 2997 : 97815 : ConnectTrace connectTrace; // Destructed before cs_main is unlocked
# 2998 : :
# 2999 [ + + ]: 97815 : if (pindexMostWork == nullptr) {
# 3000 : 95324 : pindexMostWork = FindMostWorkChain();
# 3001 : 95324 : }
# 3002 : :
# 3003 : : // Whether we have anything to do at all.
# 3004 [ - + ][ + + ]: 97815 : if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
# 3005 : 21982 : break;
# 3006 : 21982 : }
# 3007 : :
# 3008 : 75833 : bool fInvalidFound = false;
# 3009 : 75833 : std::shared_ptr<const CBlock> nullBlockPtr;
# 3010 [ + + ][ + + ]: 75833 : if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
# [ + + ]
# 3011 : : // A system error occurred
# 3012 : 1 : return false;
# 3013 : 1 : }
# 3014 : 75832 : blocks_connected = true;
# 3015 : :
# 3016 [ + + ]: 75832 : if (fInvalidFound) {
# 3017 : : // Wipe cache, we may need another branch now.
# 3018 : 2664 : pindexMostWork = nullptr;
# 3019 : 2664 : }
# 3020 : 75832 : pindexNewTip = m_chain.Tip();
# 3021 : :
# 3022 [ + + ]: 81866 : for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
# 3023 : 81866 : assert(trace.pblock && trace.pindex);
# 3024 : 0 : GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
# 3025 : 81866 : }
# 3026 [ - + ][ + + ]: 75832 : } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
# [ + + ][ + + ]
# 3027 [ + + ]: 97808 : if (!blocks_connected) return true;
# 3028 : :
# 3029 : 75826 : const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
# 3030 : 75826 : bool fInitialDownload = IsInitialBlockDownload();
# 3031 : :
# 3032 : : // Notify external listeners about the new tip.
# 3033 : : // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
# 3034 [ + + ]: 75826 : if (pindexFork != pindexNewTip) {
# 3035 : : // Notify ValidationInterface subscribers
# 3036 : 73162 : GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
# 3037 : :
# 3038 : : // Always notify the UI if a new block tip was connected
# 3039 : 73162 : uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
# 3040 : 73162 : }
# 3041 : 75826 : }
# 3042 : : // When we reach this point, we switched to a new tip (stored in pindexNewTip).
# 3043 : :
# 3044 [ + + ][ + - ]: 75826 : if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
# [ + + ]
# 3045 : :
# 3046 : : // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
# 3047 : : // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
# 3048 : : // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
# 3049 : : // that the best block hash is non-null.
# 3050 [ + + ]: 75826 : if (ShutdownRequested()) break;
# 3051 [ + + ]: 75826 : } while (pindexNewTip != pindexMostWork);
# 3052 : 70677 : CheckBlockIndex();
# 3053 : :
# 3054 : : // Write changes periodically to disk, after relay.
# 3055 [ - + ]: 70677 : if (!FlushStateToDisk(state, FlushStateMode::PERIODIC)) {
# 3056 : 0 : return false;
# 3057 : 0 : }
# 3058 : :
# 3059 : 70677 : return true;
# 3060 : 70677 : }
# 3061 : :
# 3062 : : bool CChainState::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex)
# 3063 : 9 : {
# 3064 : 9 : AssertLockNotHeld(m_chainstate_mutex);
# 3065 : 9 : AssertLockNotHeld(::cs_main);
# 3066 : 9 : {
# 3067 : 9 : LOCK(cs_main);
# 3068 [ + + ]: 9 : if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
# 3069 : : // Nothing to do, this block is not at the tip.
# 3070 : 1 : return true;
# 3071 : 1 : }
# 3072 [ + + ]: 8 : if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
# 3073 : : // The chain has been extended since the last call, reset the counter.
# 3074 : 4 : nBlockReverseSequenceId = -1;
# 3075 : 4 : }
# 3076 : 8 : nLastPreciousChainwork = m_chain.Tip()->nChainWork;
# 3077 : 8 : setBlockIndexCandidates.erase(pindex);
# 3078 : 8 : pindex->nSequenceId = nBlockReverseSequenceId;
# 3079 [ + - ]: 8 : if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
# 3080 : : // We can't keep reducing the counter if somebody really wants to
# 3081 : : // call preciousblock 2**31-1 times on the same set of tips...
# 3082 : 8 : nBlockReverseSequenceId--;
# 3083 : 8 : }
# 3084 [ + - ][ + - ]: 8 : if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
# 3085 : 8 : setBlockIndexCandidates.insert(pindex);
# 3086 : 8 : PruneBlockIndexCandidates();
# 3087 : 8 : }
# 3088 : 8 : }
# 3089 : :
# 3090 : 0 : return ActivateBestChain(state, std::shared_ptr<const CBlock>());
# 3091 : 9 : }
# 3092 : :
# 3093 : : bool CChainState::InvalidateBlock(BlockValidationState& state, CBlockIndex* pindex)
# 3094 : 82 : {
# 3095 : 82 : AssertLockNotHeld(m_chainstate_mutex);
# 3096 : 82 : AssertLockNotHeld(::cs_main);
# 3097 : :
# 3098 : : // Genesis block can't be invalidated
# 3099 : 82 : assert(pindex);
# 3100 [ - + ]: 82 : if (pindex->nHeight == 0) return false;
# 3101 : :
# 3102 : 82 : CBlockIndex* to_mark_failed = pindex;
# 3103 : 82 : bool pindex_was_in_chain = false;
# 3104 : 82 : int disconnected = 0;
# 3105 : :
# 3106 : : // We do not allow ActivateBestChain() to run while InvalidateBlock() is
# 3107 : : // running, as that could cause the tip to change while we disconnect
# 3108 : : // blocks.
# 3109 : 82 : LOCK(m_chainstate_mutex);
# 3110 : :
# 3111 : : // We'll be acquiring and releasing cs_main below, to allow the validation
# 3112 : : // callbacks to run. However, we should keep the block index in a
# 3113 : : // consistent state as we disconnect blocks -- in particular we need to
# 3114 : : // add equal-work blocks to setBlockIndexCandidates as we disconnect.
# 3115 : : // To avoid walking the block index repeatedly in search of candidates,
# 3116 : : // build a map once so that we can look up candidate blocks by chain
# 3117 : : // work as we go.
# 3118 : 82 : std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
# 3119 : :
# 3120 : 82 : {
# 3121 : 82 : LOCK(cs_main);
# 3122 [ + + ]: 16659 : for (auto& entry : m_blockman.m_block_index) {
# 3123 : 16659 : CBlockIndex* candidate = &entry.second;
# 3124 : : // We don't need to put anything in our active chain into the
# 3125 : : // multimap, because those candidates will be found and considered
# 3126 : : // as we disconnect.
# 3127 : : // Instead, consider only non-active-chain blocks that have at
# 3128 : : // least as much work as where we expect the new tip to end up.
# 3129 [ + + ][ + + ]: 16659 : if (!m_chain.Contains(candidate) &&
# 3130 [ + + ]: 16659 : !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
# 3131 [ + + ]: 16659 : candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
# 3132 [ + - ]: 16659 : candidate->HaveTxsDownloaded()) {
# 3133 : 6 : candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
# 3134 : 6 : }
# 3135 : 16659 : }
# 3136 : 82 : }
# 3137 : :
# 3138 : : // Disconnect (descendants of) pindex, and mark them invalid.
# 3139 : 1030 : while (true) {
# 3140 [ - + ]: 1030 : if (ShutdownRequested()) break;
# 3141 : :
# 3142 : : // Make sure the queue of validation callbacks doesn't grow unboundedly.
# 3143 : 1030 : LimitValidationInterfaceQueue();
# 3144 : :
# 3145 : 1030 : LOCK(cs_main);
# 3146 : : // Lock for as long as disconnectpool is in scope to make sure MaybeUpdateMempoolForReorg is
# 3147 : : // called after DisconnectTip without unlocking in between
# 3148 : 1030 : LOCK(MempoolMutex());
# 3149 [ + + ]: 1030 : if (!m_chain.Contains(pindex)) break;
# 3150 : 948 : pindex_was_in_chain = true;
# 3151 : 948 : CBlockIndex *invalid_walk_tip = m_chain.Tip();
# 3152 : :
# 3153 : : // ActivateBestChain considers blocks already in m_chain
# 3154 : : // unconditionally valid already, so force disconnect away from it.
# 3155 : 948 : DisconnectedBlockTransactions disconnectpool;
# 3156 : 948 : bool ret = DisconnectTip(state, &disconnectpool);
# 3157 : : // DisconnectTip will add transactions to disconnectpool.
# 3158 : : // Adjust the mempool to be consistent with the new tip, adding
# 3159 : : // transactions back to the mempool if disconnecting was successful,
# 3160 : : // and we're not doing a very deep invalidation (in which case
# 3161 : : // keeping the mempool up to date is probably futile anyway).
# 3162 [ + + ][ + - ]: 948 : MaybeUpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
# 3163 [ - + ]: 948 : if (!ret) return false;
# 3164 : 948 : assert(invalid_walk_tip->pprev == m_chain.Tip());
# 3165 : :
# 3166 : : // We immediately mark the disconnected blocks as invalid.
# 3167 : : // This prevents a case where pruned nodes may fail to invalidateblock
# 3168 : : // and be left unable to start as they have no tip candidates (as there
# 3169 : : // are no blocks that meet the "have data and are not invalid per
# 3170 : : // nStatus" criteria for inclusion in setBlockIndexCandidates).
# 3171 : 0 : invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
# 3172 : 948 : m_blockman.m_dirty_blockindex.insert(invalid_walk_tip);
# 3173 : 948 : setBlockIndexCandidates.erase(invalid_walk_tip);
# 3174 : 948 : setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
# 3175 [ + + ][ - + ]: 948 : if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
# 3176 : : // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
# 3177 : : // need to be BLOCK_FAILED_CHILD instead.
# 3178 : 0 : to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
# 3179 : 0 : m_blockman.m_dirty_blockindex.insert(to_mark_failed);
# 3180 : 0 : }
# 3181 : :
# 3182 : : // Add any equal or more work headers to setBlockIndexCandidates
# 3183 : 948 : auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
# 3184 [ + + ]: 954 : while (candidate_it != candidate_blocks_by_work.end()) {
# 3185 [ + - ]: 6 : if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
# 3186 : 6 : setBlockIndexCandidates.insert(candidate_it->second);
# 3187 : 6 : candidate_it = candidate_blocks_by_work.erase(candidate_it);
# 3188 : 6 : } else {
# 3189 : 0 : ++candidate_it;
# 3190 : 0 : }
# 3191 : 6 : }
# 3192 : :
# 3193 : : // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
# 3194 : : // iterations, or, if it's the last one, call InvalidChainFound on it.
# 3195 : 948 : to_mark_failed = invalid_walk_tip;
# 3196 : 948 : }
# 3197 : :
# 3198 : 82 : CheckBlockIndex();
# 3199 : :
# 3200 : 82 : {
# 3201 : 82 : LOCK(cs_main);
# 3202 [ - + ]: 82 : if (m_chain.Contains(to_mark_failed)) {
# 3203 : : // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
# 3204 : 0 : return false;
# 3205 : 0 : }
# 3206 : :
# 3207 : : // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
# 3208 : 82 : to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
# 3209 : 82 : m_blockman.m_dirty_blockindex.insert(to_mark_failed);
# 3210 : 82 : setBlockIndexCandidates.erase(to_mark_failed);
# 3211 : 82 : m_chainman.m_failed_blocks.insert(to_mark_failed);
# 3212 : :
# 3213 : : // If any new blocks somehow arrived while we were disconnecting
# 3214 : : // (above), then the pre-calculation of what should go into
# 3215 : : // setBlockIndexCandidates may have missed entries. This would
# 3216 : : // technically be an inconsistency in the block index, but if we clean
# 3217 : : // it up here, this should be an essentially unobservable error.
# 3218 : : // Loop back over all block index entries and add any missing entries
# 3219 : : // to setBlockIndexCandidates.
# 3220 [ + + ]: 16659 : for (auto& [_, block_index] : m_blockman.m_block_index) {
# 3221 [ + + ][ + + ]: 16659 : if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(&block_index, m_chain.Tip())) {
# [ + + ][ + + ]
# 3222 : 88 : setBlockIndexCandidates.insert(&block_index);
# 3223 : 88 : }
# 3224 : 16659 : }
# 3225 : :
# 3226 : 82 : InvalidChainFound(to_mark_failed);
# 3227 : 82 : }
# 3228 : :
# 3229 : : // Only notify about a new block tip if the active chain was modified.
# 3230 [ + + ]: 82 : if (pindex_was_in_chain) {
# 3231 : 81 : uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
# 3232 : 81 : }
# 3233 : 82 : return true;
# 3234 : 82 : }
# 3235 : :
# 3236 : 9 : void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
# 3237 : 9 : AssertLockHeld(cs_main);
# 3238 : :
# 3239 : 9 : int nHeight = pindex->nHeight;
# 3240 : :
# 3241 : : // Remove the invalidity flag from this block and all its descendants.
# 3242 [ + + ]: 1310 : for (auto& [_, block_index] : m_blockman.m_block_index) {
# 3243 [ + + ][ + + ]: 1310 : if (!block_index.IsValid() && block_index.GetAncestor(nHeight) == pindex) {
# 3244 : 229 : block_index.nStatus &= ~BLOCK_FAILED_MASK;
# 3245 : 229 : m_blockman.m_dirty_blockindex.insert(&block_index);
# 3246 [ + - ][ + - ]: 229 : if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), &block_index)) {
# [ + - ][ + - ]
# 3247 : 229 : setBlockIndexCandidates.insert(&block_index);
# 3248 : 229 : }
# 3249 [ + + ]: 229 : if (&block_index == m_chainman.m_best_invalid) {
# 3250 : : // Reset invalid block marker if it was pointing to one of those.
# 3251 : 8 : m_chainman.m_best_invalid = nullptr;
# 3252 : 8 : }
# 3253 : 229 : m_chainman.m_failed_blocks.erase(&block_index);
# 3254 : 229 : }
# 3255 : 1310 : }
# 3256 : :
# 3257 : : // Remove the invalidity flag from all ancestors too.
# 3258 [ + + ]: 1084 : while (pindex != nullptr) {
# 3259 [ + + ]: 1075 : if (pindex->nStatus & BLOCK_FAILED_MASK) {
# 3260 : 1 : pindex->nStatus &= ~BLOCK_FAILED_MASK;
# 3261 : 1 : m_blockman.m_dirty_blockindex.insert(pindex);
# 3262 : 1 : m_chainman.m_failed_blocks.erase(pindex);
# 3263 : 1 : }
# 3264 : 1075 : pindex = pindex->pprev;
# 3265 : 1075 : }
# 3266 : 9 : }
# 3267 : :
# 3268 : : /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
# 3269 : : void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
# 3270 : 83200 : {
# 3271 : 83200 : AssertLockHeld(cs_main);
# 3272 : 83200 : pindexNew->nTx = block.vtx.size();
# 3273 : 83200 : pindexNew->nChainTx = 0;
# 3274 : 83200 : pindexNew->nFile = pos.nFile;
# 3275 : 83200 : pindexNew->nDataPos = pos.nPos;
# 3276 : 83200 : pindexNew->nUndoPos = 0;
# 3277 : 83200 : pindexNew->nStatus |= BLOCK_HAVE_DATA;
# 3278 [ + + ]: 83200 : if (DeploymentActiveAt(*pindexNew, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
# 3279 : 81511 : pindexNew->nStatus |= BLOCK_OPT_WITNESS;
# 3280 : 81511 : }
# 3281 : 83200 : pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
# 3282 : 83200 : m_blockman.m_dirty_blockindex.insert(pindexNew);
# 3283 : :
# 3284 [ + + ][ + + ]: 83200 : if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
# 3285 : : // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
# 3286 : 82436 : std::deque<CBlockIndex*> queue;
# 3287 : 82436 : queue.push_back(pindexNew);
# 3288 : :
# 3289 : : // Recursively process any descendant blocks that now may be eligible to be connected.
# 3290 [ + + ]: 165632 : while (!queue.empty()) {
# 3291 : 83196 : CBlockIndex *pindex = queue.front();
# 3292 : 83196 : queue.pop_front();
# 3293 [ + + ]: 83196 : pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
# 3294 : 83196 : pindex->nSequenceId = nBlockSequenceId++;
# 3295 [ + + ][ + + ]: 83196 : if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
# [ + + ]
# 3296 : 75574 : setBlockIndexCandidates.insert(pindex);
# 3297 : 75574 : }
# 3298 : 83196 : std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
# 3299 [ + + ]: 83956 : while (range.first != range.second) {
# 3300 : 760 : std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
# 3301 : 760 : queue.push_back(it->second);
# 3302 : 760 : range.first++;
# 3303 : 760 : m_blockman.m_blocks_unlinked.erase(it);
# 3304 : 760 : }
# 3305 : 83196 : }
# 3306 : 82436 : } else {
# 3307 [ + - ][ + - ]: 764 : if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
# 3308 : 764 : m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
# 3309 : 764 : }
# 3310 : 764 : }
# 3311 : 83200 : }
# 3312 : :
# 3313 : : static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
# 3314 : 262747 : {
# 3315 : : // Check proof of work matches claimed amount
# 3316 [ + + ][ + + ]: 262747 : if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
# 3317 : 1 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
# 3318 : :
# 3319 : 262746 : return true;
# 3320 : 262747 : }
# 3321 : :
# 3322 : : bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
# 3323 : 346205 : {
# 3324 : : // These are checks that are independent of context.
# 3325 : :
# 3326 [ + + ]: 346205 : if (block.fChecked)
# 3327 : 175695 : return true;
# 3328 : :
# 3329 : : // Check that the header is valid (particularly PoW). This is mostly
# 3330 : : // redundant with the call in AcceptBlockHeader.
# 3331 [ + + ]: 170510 : if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
# 3332 : 1 : return false;
# 3333 : :
# 3334 : : // Signet only: check block solution
# 3335 [ + + ][ + + ]: 170509 : if (consensusParams.signet_blocks && fCheckPOW && !CheckSignetBlockSolution(block, consensusParams)) {
# [ + + ]
# 3336 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure");
# 3337 : 1 : }
# 3338 : :
# 3339 : : // Check the merkle root.
# 3340 [ + + ]: 170508 : if (fCheckMerkleRoot) {
# 3341 : 98600 : bool mutated;
# 3342 : 98600 : uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
# 3343 [ + + ]: 98600 : if (block.hashMerkleRoot != hashMerkleRoot2)
# 3344 : 13 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
# 3345 : :
# 3346 : : // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
# 3347 : : // of transactions in a block without affecting the merkle root of a block,
# 3348 : : // while still invalidating it.
# 3349 [ + + ]: 98587 : if (mutated)
# 3350 : 161 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
# 3351 : 98587 : }
# 3352 : :
# 3353 : : // All potential-corruption validation must be done before we do any
# 3354 : : // transaction validation, as otherwise we may mark the header as invalid
# 3355 : : // because we receive the wrong transactions for it.
# 3356 : : // Note that witness malleability is checked in ContextualCheckBlock, so no
# 3357 : : // checks that use witness data may be performed here.
# 3358 : :
# 3359 : : // Size limits
# 3360 [ + + ][ - + ]: 170334 : if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
# [ + + ]
# 3361 : 2 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
# 3362 : :
# 3363 : : // First transaction must be coinbase, the rest must not be
# 3364 [ - + ][ + + ]: 170332 : if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
# 3365 : 2 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
# 3366 [ + + ]: 247199 : for (unsigned int i = 1; i < block.vtx.size(); i++)
# 3367 [ + + ]: 76872 : if (block.vtx[i]->IsCoinBase())
# 3368 : 3 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
# 3369 : :
# 3370 : : // Check transactions
# 3371 : : // Must check for duplicate inputs (see CVE-2018-17144)
# 3372 [ + + ]: 247194 : for (const auto& tx : block.vtx) {
# 3373 : 247194 : TxValidationState tx_state;
# 3374 [ + + ]: 247194 : if (!CheckTransaction(*tx, tx_state)) {
# 3375 : : // CheckBlock() does context-free validation checks. The only
# 3376 : : // possible failures are consensus failures.
# 3377 : 389 : assert(tx_state.GetResult() == TxValidationResult::TX_CONSENSUS);
# 3378 : 0 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(),
# 3379 : 389 : strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
# 3380 : 389 : }
# 3381 : 247194 : }
# 3382 : 169938 : unsigned int nSigOps = 0;
# 3383 [ + + ]: 169938 : for (const auto& tx : block.vtx)
# 3384 : 246266 : {
# 3385 : 246266 : nSigOps += GetLegacySigOpCount(*tx);
# 3386 : 246266 : }
# 3387 [ + + ]: 169938 : if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
# 3388 : 9 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
# 3389 : :
# 3390 [ + + ][ + - ]: 169929 : if (fCheckPOW && fCheckMerkleRoot)
# 3391 : 98022 : block.fChecked = true;
# 3392 : :
# 3393 : 169929 : return true;
# 3394 : 169938 : }
# 3395 : :
# 3396 : : void ChainstateManager::UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev) const
# 3397 : 47018 : {
# 3398 : 47018 : int commitpos = GetWitnessCommitmentIndex(block);
# 3399 : 47018 : static const std::vector<unsigned char> nonce(32, 0x00);
# 3400 [ + + ][ + + ]: 47018 : if (commitpos != NO_WITNESS_COMMITMENT && DeploymentActiveAfter(pindexPrev, *this, Consensus::DEPLOYMENT_SEGWIT) && !block.vtx[0]->HasWitness()) {
# [ + + ]
# 3401 : 36253 : CMutableTransaction tx(*block.vtx[0]);
# 3402 : 36253 : tx.vin[0].scriptWitness.stack.resize(1);
# 3403 : 36253 : tx.vin[0].scriptWitness.stack[0] = nonce;
# 3404 : 36253 : block.vtx[0] = MakeTransactionRef(std::move(tx));
# 3405 : 36253 : }
# 3406 : 47018 : }
# 3407 : :
# 3408 : : std::vector<unsigned char> ChainstateManager::GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev) const
# 3409 : 42895 : {
# 3410 : 42895 : std::vector<unsigned char> commitment;
# 3411 : 42895 : int commitpos = GetWitnessCommitmentIndex(block);
# 3412 : 42895 : std::vector<unsigned char> ret(32, 0x00);
# 3413 [ + - ]: 42895 : if (commitpos == NO_WITNESS_COMMITMENT) {
# 3414 : 42895 : uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
# 3415 : 42895 : CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
# 3416 : 42895 : CTxOut out;
# 3417 : 42895 : out.nValue = 0;
# 3418 : 42895 : out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT);
# 3419 : 42895 : out.scriptPubKey[0] = OP_RETURN;
# 3420 : 42895 : out.scriptPubKey[1] = 0x24;
# 3421 : 42895 : out.scriptPubKey[2] = 0xaa;
# 3422 : 42895 : out.scriptPubKey[3] = 0x21;
# 3423 : 42895 : out.scriptPubKey[4] = 0xa9;
# 3424 : 42895 : out.scriptPubKey[5] = 0xed;
# 3425 : 42895 : memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
# 3426 : 42895 : commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
# 3427 : 42895 : CMutableTransaction tx(*block.vtx[0]);
# 3428 : 42895 : tx.vout.push_back(out);
# 3429 : 42895 : block.vtx[0] = MakeTransactionRef(std::move(tx));
# 3430 : 42895 : }
# 3431 : 42895 : UpdateUncommittedBlockStructures(block, pindexPrev);
# 3432 : 42895 : return commitment;
# 3433 : 42895 : }
# 3434 : :
# 3435 : : bool HasValidProofOfWork(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams)
# 3436 : 10622 : {
# 3437 : 10622 : return std::all_of(headers.cbegin(), headers.cend(),
# 3438 : 527376 : [&](const auto& header) { return CheckProofOfWork(header.GetHash(), header.nBits, consensusParams);});
# 3439 : 10622 : }
# 3440 : :
# 3441 : : arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader>& headers)
# 3442 : 49515 : {
# 3443 : 49515 : arith_uint256 total_work{0};
# 3444 [ + + ]: 542821 : for (const CBlockHeader& header : headers) {
# 3445 : 542821 : CBlockIndex dummy(header);
# 3446 : 542821 : total_work += GetBlockProof(dummy);
# 3447 : 542821 : }
# 3448 : 49515 : return total_work;
# 3449 : 49515 : }
# 3450 : :
# 3451 : : /** Context-dependent validity checks.
# 3452 : : * By "context", we mean only the previous block headers, but not the UTXO
# 3453 : : * set; UTXO-related validity checks are done in ConnectBlock().
# 3454 : : * NOTE: This function is not currently invoked by ConnectBlock(), so we
# 3455 : : * should consider upgrade issues if we change which consensus rules are
# 3456 : : * enforced in this function (eg by adding a new consensus rule). See comment
# 3457 : : * in ConnectBlock().
# 3458 : : * Note that -reindex-chainstate skips the validation that happens here!
# 3459 : : */
# 3460 : : static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const ChainstateManager& chainman, const CBlockIndex* pindexPrev, NodeClock::time_point now) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
# 3461 : 128194 : {
# 3462 : 128194 : AssertLockHeld(::cs_main);
# 3463 : 128194 : assert(pindexPrev != nullptr);
# 3464 : 0 : const int nHeight = pindexPrev->nHeight + 1;
# 3465 : :
# 3466 : : // Check proof of work
# 3467 : 128194 : const Consensus::Params& consensusParams = chainman.GetConsensus();
# 3468 [ + + ]: 128194 : if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
# 3469 : 2 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
# 3470 : :
# 3471 : : // Check against checkpoints
# 3472 [ + + ]: 128192 : if (fCheckpointsEnabled) {
# 3473 : : // Don't accept any forks from the main chain prior to last checkpoint.
# 3474 : : // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
# 3475 : : // BlockIndex().
# 3476 : 127930 : const CBlockIndex* pcheckpoint = blockman.GetLastCheckpoint(chainman.GetParams().Checkpoints());
# 3477 [ + + ][ + + ]: 127930 : if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
# 3478 : 1 : LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
# 3479 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
# 3480 : 1 : }
# 3481 : 127930 : }
# 3482 : :
# 3483 : : // Check timestamp against prev
# 3484 [ + + ]: 128191 : if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
# 3485 : 6 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
# 3486 : :
# 3487 : : // Check timestamp
# 3488 [ + + ]: 128185 : if (block.Time() > now + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) {
# 3489 : 5 : return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
# 3490 : 5 : }
# 3491 : :
# 3492 : : // Reject blocks with outdated version
# 3493 [ + + ][ + + ]: 128180 : if ((block.nVersion < 2 && DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_HEIGHTINCB)) ||
# 3494 [ + + ][ + + ]: 128180 : (block.nVersion < 3 && DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_DERSIG)) ||
# 3495 [ + + ][ + + ]: 128180 : (block.nVersion < 4 && DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_CLTV))) {
# 3496 : 3 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
# 3497 : 3 : strprintf("rejected nVersion=0x%08x block", block.nVersion));
# 3498 : 3 : }
# 3499 : :
# 3500 : 128177 : return true;
# 3501 : 128180 : }
# 3502 : :
# 3503 : : /** NOTE: This function is not currently invoked by ConnectBlock(), so we
# 3504 : : * should consider upgrade issues if we change which consensus rules are
# 3505 : : * enforced in this function (eg by adding a new consensus rule). See comment
# 3506 : : * in ConnectBlock().
# 3507 : : * Note that -reindex-chainstate skips the validation that happens here!
# 3508 : : */
# 3509 : : static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const ChainstateManager& chainman, const CBlockIndex* pindexPrev)
# 3510 : 118700 : {
# 3511 [ + + ]: 118700 : const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
# 3512 : :
# 3513 : : // Enforce BIP113 (Median Time Past).
# 3514 : 118700 : bool enforce_locktime_median_time_past{false};
# 3515 [ + + ]: 118700 : if (DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_CSV)) {
# 3516 : 117179 : assert(pindexPrev != nullptr);
# 3517 : 0 : enforce_locktime_median_time_past = true;
# 3518 : 117179 : }
# 3519 : :
# 3520 [ + + ]: 118700 : const int64_t nLockTimeCutoff{enforce_locktime_median_time_past ?
# 3521 : 117179 : pindexPrev->GetMedianTimePast() :
# 3522 : 118700 : block.GetBlockTime()};
# 3523 : :
# 3524 : : // Check that all transactions are finalized
# 3525 [ + + ]: 177366 : for (const auto& tx : block.vtx) {
# 3526 [ + + ]: 177366 : if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
# 3527 : 7 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
# 3528 : 7 : }
# 3529 : 177366 : }
# 3530 : :
# 3531 : : // Enforce rule that the coinbase starts with serialized block height
# 3532 [ + + ]: 118693 : if (DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_HEIGHTINCB))
# 3533 : 118426 : {
# 3534 : 118426 : CScript expect = CScript() << nHeight;
# 3535 [ + + ]: 118426 : if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
# 3536 [ - + ]: 118426 : !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
# 3537 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
# 3538 : 1 : }
# 3539 : 118426 : }
# 3540 : :
# 3541 : : // Validation for witness commitments.
# 3542 : : // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
# 3543 : : // coinbase (where 0x0000....0000 is used instead).
# 3544 : : // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
# 3545 : : // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
# 3546 : : // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
# 3547 : : // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
# 3548 : : // multiple, the last one is used.
# 3549 : 118692 : bool fHaveWitness = false;
# 3550 [ + + ]: 118692 : if (DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_SEGWIT)) {
# 3551 : 116535 : int commitpos = GetWitnessCommitmentIndex(block);
# 3552 [ + + ]: 116535 : if (commitpos != NO_WITNESS_COMMITMENT) {
# 3553 : 110093 : bool malleated = false;
# 3554 : 110093 : uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
# 3555 : : // The malleation check is ignored; as the transaction tree itself
# 3556 : : // already does not permit it, it is impossible to trigger in the
# 3557 : : // witness tree.
# 3558 [ + + ][ - + ]: 110093 : if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
# 3559 : 6 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
# 3560 : 6 : }
# 3561 : 110087 : CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
# 3562 [ + + ]: 110087 : if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
# 3563 : 3 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
# 3564 : 3 : }
# 3565 : 110084 : fHaveWitness = true;
# 3566 : 110084 : }
# 3567 : 116535 : }
# 3568 : :
# 3569 : : // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
# 3570 [ + + ]: 118683 : if (!fHaveWitness) {
# 3571 [ + + ]: 31542 : for (const auto& tx : block.vtx) {
# 3572 [ + + ]: 31542 : if (tx->HasWitness()) {
# 3573 : 5 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
# 3574 : 5 : }
# 3575 : 31542 : }
# 3576 : 8599 : }
# 3577 : :
# 3578 : : // After the coinbase witness reserved value and commitment are verified,
# 3579 : : // we can check if the block weight passes (before we've checked the
# 3580 : : // coinbase witness, it would be possible for the weight to be too
# 3581 : : // large by filling up the coinbase witness, which doesn't change
# 3582 : : // the block hash, so we couldn't mark the block as permanently
# 3583 : : // failed).
# 3584 [ + + ]: 118678 : if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
# 3585 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
# 3586 : 1 : }
# 3587 : :
# 3588 : 118677 : return true;
# 3589 : 118678 : }
# 3590 : :
# 3591 : : bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, CBlockIndex** ppindex, bool min_pow_checked)
# 3592 : 160716 : {
# 3593 : 160716 : AssertLockHeld(cs_main);
# 3594 : :
# 3595 : : // Check for duplicate
# 3596 : 160716 : uint256 hash = block.GetHash();
# 3597 : 160716 : BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)};
# 3598 [ + + ]: 160716 : if (hash != GetConsensus().hashGenesisBlock) {
# 3599 [ + + ]: 160703 : if (miSelf != m_blockman.m_block_index.end()) {
# 3600 : : // Block header is already known.
# 3601 : 68466 : CBlockIndex* pindex = &(miSelf->second);
# 3602 [ + - ]: 68466 : if (ppindex)
# 3603 : 68466 : *ppindex = pindex;
# 3604 [ + + ]: 68466 : if (pindex->nStatus & BLOCK_FAILED_MASK) {
# 3605 [ + - ]: 318 : LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n", __func__, hash.ToString());
# 3606 : 318 : return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
# 3607 : 318 : }
# 3608 : 68148 : return true;
# 3609 : 68466 : }
# 3610 : :
# 3611 [ - + ]: 92237 : if (!CheckBlockHeader(block, state, GetConsensus())) {
# 3612 [ # # ]: 0 : LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
# 3613 : 0 : return false;
# 3614 : 0 : }
# 3615 : :
# 3616 : : // Get prev block index
# 3617 : 92237 : CBlockIndex* pindexPrev = nullptr;
# 3618 : 92237 : BlockMap::iterator mi{m_blockman.m_block_index.find(block.hashPrevBlock)};
# 3619 [ + + ]: 92237 : if (mi == m_blockman.m_block_index.end()) {
# 3620 [ + - ]: 3 : LogPrint(BCLog::VALIDATION, "%s: %s prev block not found\n", __func__, hash.ToString());
# 3621 : 3 : return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
# 3622 : 3 : }
# 3623 : 92234 : pindexPrev = &((*mi).second);
# 3624 [ + + ]: 92234 : if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
# 3625 [ + - ]: 4 : LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
# 3626 : 4 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
# 3627 : 4 : }
# 3628 [ + + ]: 92230 : if (!ContextualCheckBlockHeader(block, state, m_blockman, *this, pindexPrev, m_adjusted_time_callback())) {
# 3629 [ + - ]: 14 : LogPrint(BCLog::VALIDATION, "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
# 3630 : 14 : return false;
# 3631 : 14 : }
# 3632 : :
# 3633 : : /* Determine if this block descends from any block which has been found
# 3634 : : * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
# 3635 : : * them as failed. For example:
# 3636 : : *
# 3637 : : * D3
# 3638 : : * /
# 3639 : : * B2 - C2
# 3640 : : * / \
# 3641 : : * A D2 - E2 - F2
# 3642 : : * \
# 3643 : : * B1 - C1 - D1 - E1
# 3644 : : *
# 3645 : : * In the case that we attempted to reorg from E1 to F2, only to find
# 3646 : : * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
# 3647 : : * but NOT D3 (it was not in any of our candidate sets at the time).
# 3648 : : *
# 3649 : : * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
# 3650 : : * in LoadBlockIndex.
# 3651 : : */
# 3652 [ + + ]: 92216 : if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
# 3653 : : // The above does not mean "invalid": it checks if the previous block
# 3654 : : // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
# 3655 : : // optimization, in the common case of adding a new block to the tip,
# 3656 : : // we don't need to iterate over the failed blocks list.
# 3657 [ + + ]: 50378 : for (const CBlockIndex* failedit : m_failed_blocks) {
# 3658 [ + + ]: 50378 : if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
# 3659 : 1 : assert(failedit->nStatus & BLOCK_FAILED_VALID);
# 3660 : 0 : CBlockIndex* invalid_walk = pindexPrev;
# 3661 [ + + ]: 2 : while (invalid_walk != failedit) {
# 3662 : 1 : invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
# 3663 : 1 : m_blockman.m_dirty_blockindex.insert(invalid_walk);
# 3664 : 1 : invalid_walk = invalid_walk->pprev;
# 3665 : 1 : }
# 3666 [ + - ]: 1 : LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
# 3667 : 1 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
# 3668 : 1 : }
# 3669 : 50378 : }
# 3670 : 40608 : }
# 3671 : 92216 : }
# 3672 [ + + ]: 92228 : if (!min_pow_checked) {
# 3673 [ + - ]: 1 : LogPrint(BCLog::VALIDATION, "%s: not adding new block header %s, missing anti-dos proof-of-work validation\n", __func__, hash.ToString());
# 3674 : 1 : return state.Invalid(BlockValidationResult::BLOCK_HEADER_LOW_WORK, "too-little-chainwork");
# 3675 : 1 : }
# 3676 : 92227 : CBlockIndex* pindex{m_blockman.AddToBlockIndex(block, m_best_header)};
# 3677 : :
# 3678 [ + - ]: 92227 : if (ppindex)
# 3679 : 92227 : *ppindex = pindex;
# 3680 : :
# 3681 : 92227 : return true;
# 3682 : 92228 : }
# 3683 : :
# 3684 : : // Exposed wrapper for AcceptBlockHeader
# 3685 : : bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, bool min_pow_checked, BlockValidationState& state, const CBlockIndex** ppindex)
# 3686 : 25999 : {
# 3687 : 25999 : AssertLockNotHeld(cs_main);
# 3688 : 25999 : {
# 3689 : 25999 : LOCK(cs_main);
# 3690 [ + + ]: 67213 : for (const CBlockHeader& header : headers) {
# 3691 : 67213 : CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
# 3692 : 67213 : bool accepted{AcceptBlockHeader(header, state, &pindex, min_pow_checked)};
# 3693 : 67213 : ActiveChainstate().CheckBlockIndex();
# 3694 : :
# 3695 [ + + ]: 67213 : if (!accepted) {
# 3696 : 25 : return false;
# 3697 : 25 : }
# 3698 [ + + ]: 67188 : if (ppindex) {
# 3699 : 66282 : *ppindex = pindex;
# 3700 : 66282 : }
# 3701 : 67188 : }
# 3702 : 25999 : }
# 3703 [ + + ]: 25974 : if (NotifyHeaderTip(ActiveChainstate())) {
# 3704 [ + + ][ + + ]: 18020 : if (ActiveChainstate().IsInitialBlockDownload() && ppindex && *ppindex) {
# [ + - ]
# 3705 : 400 : const CBlockIndex& last_accepted{**ppindex};
# 3706 : 400 : const int64_t blocks_left{(GetTime() - last_accepted.GetBlockTime()) / GetConsensus().nPowTargetSpacing};
# 3707 : 400 : const double progress{100.0 * last_accepted.nHeight / (last_accepted.nHeight + blocks_left)};
# 3708 : 400 : LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress);
# 3709 : 400 : }
# 3710 : 18020 : }
# 3711 : 25974 : return true;
# 3712 : 25999 : }
# 3713 : :
# 3714 : : void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t height, int64_t timestamp)
# 3715 : 11 : {
# 3716 : 11 : AssertLockNotHeld(cs_main);
# 3717 : 11 : const auto& chainstate = ActiveChainstate();
# 3718 : 11 : {
# 3719 : 11 : LOCK(cs_main);
# 3720 : : // Don't report headers presync progress if we already have a post-minchainwork header chain.
# 3721 : : // This means we lose reporting for potentially legimate, but unlikely, deep reorgs, but
# 3722 : : // prevent attackers that spam low-work headers from filling our logs.
# 3723 [ + - ]: 11 : if (m_best_header->nChainWork >= UintToArith256(GetConsensus().nMinimumChainWork)) return;
# 3724 : : // Rate limit headers presync updates to 4 per second, as these are not subject to DoS
# 3725 : : // protection.
# 3726 : 0 : auto now = std::chrono::steady_clock::now();
# 3727 [ # # ]: 0 : if (now < m_last_presync_update + std::chrono::milliseconds{250}) return;
# 3728 : 0 : m_last_presync_update = now;
# 3729 : 0 : }
# 3730 : 0 : bool initial_download = chainstate.IsInitialBlockDownload();
# 3731 : 0 : uiInterface.NotifyHeaderTip(GetSynchronizationState(initial_download), height, timestamp, /*presync=*/true);
# 3732 [ # # ]: 0 : if (initial_download) {
# 3733 : 0 : const int64_t blocks_left{(GetTime() - timestamp) / GetConsensus().nPowTargetSpacing};
# 3734 : 0 : const double progress{100.0 * height / (height + blocks_left)};
# 3735 : 0 : LogPrintf("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n", height, progress);
# 3736 : 0 : }
# 3737 : 0 : }
# 3738 : :
# 3739 : : /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
# 3740 : : bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock, bool min_pow_checked)
# 3741 : 93503 : {
# 3742 : 93503 : const CBlock& block = *pblock;
# 3743 : :
# 3744 [ + + ]: 93503 : if (fNewBlock) *fNewBlock = false;
# 3745 : 93503 : AssertLockHeld(cs_main);
# 3746 : :
# 3747 : 93503 : CBlockIndex *pindexDummy = nullptr;
# 3748 [ + + ]: 93503 : CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
# 3749 : :
# 3750 : 93503 : bool accepted_header{m_chainman.AcceptBlockHeader(block, state, &pindex, min_pow_checked)};
# 3751 : 93503 : CheckBlockIndex();
# 3752 : :
# 3753 [ + + ]: 93503 : if (!accepted_header)
# 3754 : 316 : return false;
# 3755 : :
# 3756 : : // Try to process all requested blocks that we don't have, but only
# 3757 : : // process an unrequested block if it's new and has enough work to
# 3758 : : // advance our tip, and isn't too many blocks ahead.
# 3759 : 93187 : bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
# 3760 [ + + ]: 93187 : bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
# 3761 : : // Blocks that are too out-of-order needlessly limit the effectiveness of
# 3762 : : // pruning, because pruning will not delete block files that contain any
# 3763 : : // blocks which are too close in height to the tip. Apply this test
# 3764 : : // regardless of whether pruning is enabled; it should generally be safe to
# 3765 : : // not process unrequested blocks.
# 3766 : 93187 : bool fTooFarAhead{pindex->nHeight > m_chain.Height() + int(MIN_BLOCKS_TO_KEEP)};
# 3767 : :
# 3768 : : // TODO: Decouple this function from the block download logic by removing fRequested
# 3769 : : // This requires some new chain data structure to efficiently look up if a
# 3770 : : // block is in a chain leading to a candidate for best tip, despite not
# 3771 : : // being such a candidate itself.
# 3772 : : // Note that this would break the getblockfrompeer RPC
# 3773 : :
# 3774 : : // TODO: deal better with return value and error conditions for duplicate
# 3775 : : // and unrequested blocks.
# 3776 [ + + ]: 93187 : if (fAlreadyHave) return true;
# 3777 [ + + ]: 82752 : if (!fRequested) { // If we didn't ask for it:
# 3778 [ - + ]: 615 : if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
# 3779 [ + + ]: 615 : if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
# 3780 [ + + ]: 610 : if (fTooFarAhead) return true; // Block height is too high
# 3781 : :
# 3782 : : // Protect against DoS attacks from low-work chains.
# 3783 : : // If our tip is behind, a peer could try to send us
# 3784 : : // low-work blocks on a fake chain that we would never
# 3785 : : // request; don't process these.
# 3786 [ - + ]: 609 : if (pindex->nChainWork < nMinimumChainWork) return true;
# 3787 : 609 : }
# 3788 : :
# 3789 [ - + ]: 82746 : if (!CheckBlock(block, state, m_params.GetConsensus()) ||
# 3790 [ + + ]: 82746 : !ContextualCheckBlock(block, state, m_chainman, pindex->pprev)) {
# 3791 [ + - ][ + + ]: 22 : if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
# 3792 : 8 : pindex->nStatus |= BLOCK_FAILED_VALID;
# 3793 : 8 : m_blockman.m_dirty_blockindex.insert(pindex);
# 3794 : 8 : }
# 3795 : 22 : return error("%s: %s", __func__, state.ToString());
# 3796 : 22 : }
# 3797 : :
# 3798 : : // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
# 3799 : : // (but if it does not build on our best tip, let the SendMessages loop relay it)
# 3800 [ + + ][ + + ]: 82724 : if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
# 3801 : 66910 : GetMainSignals().NewPoWValidBlock(pindex, pblock);
# 3802 : :
# 3803 : : // Write block to history file
# 3804 [ + + ]: 82724 : if (fNewBlock) *fNewBlock = true;
# 3805 : 82724 : try {
# 3806 : 82724 : FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, pindex->nHeight, m_chain, m_params, dbp)};
# 3807 [ - + ]: 82724 : if (blockPos.IsNull()) {
# 3808 : 0 : state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
# 3809 : 0 : return false;
# 3810 : 0 : }
# 3811 : 82724 : ReceivedBlockTransactions(block, pindex, blockPos);
# 3812 : 82724 : } catch (const std::runtime_error& e) {
# 3813 : 0 : return AbortNode(state, std::string("System error: ") + e.what());
# 3814 : 0 : }
# 3815 : :
# 3816 : 82724 : FlushStateToDisk(state, FlushStateMode::NONE);
# 3817 : :
# 3818 : 82724 : CheckBlockIndex();
# 3819 : :
# 3820 : 82724 : return true;
# 3821 : 82724 : }
# 3822 : :
# 3823 : : bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked, bool* new_block)
# 3824 : 92518 : {
# 3825 : 92518 : AssertLockNotHeld(cs_main);
# 3826 : :
# 3827 : 92518 : {
# 3828 : 92518 : CBlockIndex *pindex = nullptr;
# 3829 [ + + ]: 92518 : if (new_block) *new_block = false;
# 3830 : 92518 : BlockValidationState state;
# 3831 : :
# 3832 : : // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
# 3833 : : // Therefore, the following critical section must include the CheckBlock() call as well.
# 3834 : 92518 : LOCK(cs_main);
# 3835 : :
# 3836 : : // Skipping AcceptBlock() for CheckBlock() failures means that we will never mark a block as invalid if
# 3837 : : // CheckBlock() fails. This is protective against consensus failure if there are any unknown forms of block
# 3838 : : // malleability that cause CheckBlock() to fail; see e.g. CVE-2012-2459 and
# 3839 : : // https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html. Because CheckBlock() is
# 3840 : : // not very expensive, the anti-DoS benefits of caching failure (of a definitely-invalid block) are not substantial.
# 3841 : 92518 : bool ret = CheckBlock(*block, state, GetConsensus());
# 3842 [ + + ]: 92518 : if (ret) {
# 3843 : : // Store to disk
# 3844 : 91949 : ret = ActiveChainstate().AcceptBlock(block, state, &pindex, force_processing, nullptr, new_block, min_pow_checked);
# 3845 : 91949 : }
# 3846 [ + + ]: 92518 : if (!ret) {
# 3847 : 903 : GetMainSignals().BlockChecked(*block, state);
# 3848 : 903 : return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
# 3849 : 903 : }
# 3850 : 92518 : }
# 3851 : :
# 3852 : 91615 : NotifyHeaderTip(ActiveChainstate());
# 3853 : :
# 3854 : 91615 : BlockValidationState state; // Only used to report errors, not invalidity - ignore it
# 3855 [ + + ]: 91615 : if (!ActiveChainstate().ActivateBestChain(state, block)) {
# 3856 : 1 : return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
# 3857 : 1 : }
# 3858 : :
# 3859 : 91614 : return true;
# 3860 : 91615 : }
# 3861 : :
# 3862 : : MempoolAcceptResult ChainstateManager::ProcessTransaction(const CTransactionRef& tx, bool test_accept)
# 3863 : 29100 : {
# 3864 : 29100 : AssertLockHeld(cs_main);
# 3865 : 29100 : CChainState& active_chainstate = ActiveChainstate();
# 3866 [ - + ]: 29100 : if (!active_chainstate.GetMempool()) {
# 3867 : 0 : TxValidationState state;
# 3868 : 0 : state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool");
# 3869 : 0 : return MempoolAcceptResult::Failure(state);
# 3870 : 0 : }
# 3871 : 29100 : auto result = AcceptToMemoryPool(active_chainstate, tx, GetTime(), /*bypass_limits=*/ false, test_accept);
# 3872 : 29100 : active_chainstate.GetMempool()->check(active_chainstate.CoinsTip(), active_chainstate.m_chain.Height() + 1);
# 3873 : 29100 : return result;
# 3874 : 29100 : }
# 3875 : :
# 3876 : : bool TestBlockValidity(BlockValidationState& state,
# 3877 : : const CChainParams& chainparams,
# 3878 : : CChainState& chainstate,
# 3879 : : const CBlock& block,
# 3880 : : CBlockIndex* pindexPrev,
# 3881 : : const std::function<NodeClock::time_point()>& adjusted_time_callback,
# 3882 : : bool fCheckPOW,
# 3883 : : bool fCheckMerkleRoot)
# 3884 : 35964 : {
# 3885 : 35964 : AssertLockHeld(cs_main);
# 3886 : 35964 : assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
# 3887 : 0 : CCoinsViewCache viewNew(&chainstate.CoinsTip());
# 3888 : 35964 : uint256 block_hash(block.GetHash());
# 3889 : 35964 : CBlockIndex indexDummy(block);
# 3890 : 35964 : indexDummy.pprev = pindexPrev;
# 3891 : 35964 : indexDummy.nHeight = pindexPrev->nHeight + 1;
# 3892 : 35964 : indexDummy.phashBlock = &block_hash;
# 3893 : :
# 3894 : : // NOTE: CheckBlockHeader is called by CheckBlock
# 3895 [ + + ]: 35964 : if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev, adjusted_time_callback()))
# 3896 : 3 : return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
# 3897 [ + + ]: 35961 : if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
# 3898 : 7 : return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
# 3899 [ + + ]: 35954 : if (!ContextualCheckBlock(block, state, chainstate.m_chainman, pindexPrev))
# 3900 : 1 : return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
# 3901 [ + + ]: 35953 : if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, true)) {
# 3902 : 8 : return false;
# 3903 : 8 : }
# 3904 : 35945 : assert(state.IsValid());
# 3905 : :
# 3906 : 0 : return true;
# 3907 : 35953 : }
# 3908 : :
# 3909 : : /* This function is called from the RPC code for pruneblockchain */
# 3910 : : void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
# 3911 : 0 : {
# 3912 : 0 : BlockValidationState state;
# 3913 [ # # ]: 0 : if (!active_chainstate.FlushStateToDisk(
# 3914 : 0 : state, FlushStateMode::NONE, nManualPruneHeight)) {
# 3915 : 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
# 3916 : 0 : }
# 3917 : 0 : }
# 3918 : :
# 3919 : : void CChainState::LoadMempool(const fs::path& load_path, FopenFn mockable_fopen_function)
# 3920 : 741 : {
# 3921 [ - + ]: 741 : if (!m_mempool) return;
# 3922 : 741 : ::LoadMempool(*m_mempool, load_path, *this, mockable_fopen_function);
# 3923 : 741 : m_mempool->SetLoadTried(!ShutdownRequested());
# 3924 : 741 : }
# 3925 : :
# 3926 : : bool CChainState::LoadChainTip()
# 3927 : 474 : {
# 3928 : 474 : AssertLockHeld(cs_main);
# 3929 : 474 : const CCoinsViewCache& coins_cache = CoinsTip();
# 3930 : 474 : assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
# 3931 : 0 : const CBlockIndex* tip = m_chain.Tip();
# 3932 : :
# 3933 [ + + ][ + + ]: 474 : if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
# [ + - ]
# 3934 : 2 : return true;
# 3935 : 2 : }
# 3936 : :
# 3937 : : // Load pointer to end of best chain
# 3938 : 472 : CBlockIndex* pindex = m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
# 3939 [ - + ]: 472 : if (!pindex) {
# 3940 : 0 : return false;
# 3941 : 0 : }
# 3942 : 472 : m_chain.SetTip(*pindex);
# 3943 : 472 : PruneBlockIndexCandidates();
# 3944 : :
# 3945 : 472 : tip = m_chain.Tip();
# 3946 : 472 : LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
# 3947 : 472 : tip->GetBlockHash().ToString(),
# 3948 : 472 : m_chain.Height(),
# 3949 : 472 : FormatISO8601DateTime(tip->GetBlockTime()),
# 3950 : 472 : GuessVerificationProgress(m_params.TxData(), tip));
# 3951 : 472 : return true;
# 3952 : 472 : }
# 3953 : :
# 3954 : : CVerifyDB::CVerifyDB()
# 3955 : 471 : {
# 3956 : 471 : uiInterface.ShowProgress(_("Verifying blocks…").translated, 0, false);
# 3957 : 471 : }
# 3958 : :
# 3959 : : CVerifyDB::~CVerifyDB()
# 3960 : 471 : {
# 3961 : 471 : uiInterface.ShowProgress("", 100, false);
# 3962 : 471 : }
# 3963 : :
# 3964 : : bool CVerifyDB::VerifyDB(
# 3965 : : CChainState& chainstate,
# 3966 : : const Consensus::Params& consensus_params,
# 3967 : : CCoinsView& coinsview,
# 3968 : : int nCheckLevel, int nCheckDepth)
# 3969 : 471 : {
# 3970 : 471 : AssertLockHeld(cs_main);
# 3971 : :
# 3972 [ - + ][ + + ]: 471 : if (chainstate.m_chain.Tip() == nullptr || chainstate.m_chain.Tip()->pprev == nullptr) {
# 3973 : 71 : return true;
# 3974 : 71 : }
# 3975 : :
# 3976 : : // Verify blocks in the best chain
# 3977 [ + + ][ + + ]: 400 : if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height()) {
# 3978 : 20 : nCheckDepth = chainstate.m_chain.Height();
# 3979 : 20 : }
# 3980 : 400 : nCheckLevel = std::max(0, std::min(4, nCheckLevel));
# 3981 : 400 : LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
# 3982 : 400 : CCoinsViewCache coins(&coinsview);
# 3983 : 400 : CBlockIndex* pindex;
# 3984 : 400 : CBlockIndex* pindexFailure = nullptr;
# 3985 : 400 : int nGoodTransactions = 0;
# 3986 : 400 : BlockValidationState state;
# 3987 : 400 : int reportDone = 0;
# 3988 : 400 : LogPrintf("[0%%]..."); /* Continued */
# 3989 : :
# 3990 : 400 : const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
# 3991 : :
# 3992 [ + - ][ + + ]: 3175 : for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
# 3993 [ + + ]: 3155 : const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
# 3994 [ + + ]: 3155 : if (reportDone < percentageDone / 10) {
# 3995 : : // report every 10% step
# 3996 : 2283 : LogPrintf("[%d%%]...", percentageDone); /* Continued */
# 3997 : 2283 : reportDone = percentageDone / 10;
# 3998 : 2283 : }
# 3999 : 3155 : uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
# 4000 [ + + ]: 3155 : if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) {
# 4001 : 377 : break;
# 4002 : 377 : }
# 4003 [ + + ][ + - ]: 2778 : if ((fPruneMode || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
# [ - + ]
# 4004 : : // If pruning or running under an assumeutxo snapshot, only go
# 4005 : : // back as far as we have data.
# 4006 : 0 : LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
# 4007 : 0 : break;
# 4008 : 0 : }
# 4009 : 2778 : CBlock block;
# 4010 : : // check level 0: read from disk
# 4011 [ - + ]: 2778 : if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
# 4012 : 0 : return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4013 : 0 : }
# 4014 : : // check level 1: verify block validity
# 4015 [ + - ][ - + ]: 2778 : if (nCheckLevel >= 1 && !CheckBlock(block, state, consensus_params)) {
# 4016 : 0 : return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
# 4017 : 0 : pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
# 4018 : 0 : }
# 4019 : : // check level 2: verify undo validity
# 4020 [ + - ][ + - ]: 2778 : if (nCheckLevel >= 2 && pindex) {
# 4021 : 2778 : CBlockUndo undo;
# 4022 [ + - ]: 2778 : if (!pindex->GetUndoPos().IsNull()) {
# 4023 [ + + ]: 2778 : if (!UndoReadFromDisk(undo, pindex)) {
# 4024 : 1 : return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4025 : 1 : }
# 4026 : 2778 : }
# 4027 : 2778 : }
# 4028 : : // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
# 4029 : 2777 : size_t curr_coins_usage = coins.DynamicMemoryUsage() + chainstate.CoinsTip().DynamicMemoryUsage();
# 4030 : :
# 4031 [ + - ][ + - ]: 2777 : if (nCheckLevel >= 3 && curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
# 4032 : 2777 : assert(coins.GetBestBlock() == pindex->GetBlockHash());
# 4033 : 0 : DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
# 4034 [ - + ]: 2777 : if (res == DISCONNECT_FAILED) {
# 4035 : 0 : return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4036 : 0 : }
# 4037 [ - + ]: 2777 : if (res == DISCONNECT_UNCLEAN) {
# 4038 : 0 : nGoodTransactions = 0;
# 4039 : 0 : pindexFailure = pindex;
# 4040 : 2777 : } else {
# 4041 : 2777 : nGoodTransactions += block.vtx.size();
# 4042 : 2777 : }
# 4043 : 2777 : }
# 4044 [ + + ]: 2777 : if (ShutdownRequested()) return true;
# 4045 : 2777 : }
# 4046 [ - + ]: 397 : if (pindexFailure) {
# 4047 : 0 : return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
# 4048 : 0 : }
# 4049 : :
# 4050 : : // store block count as we move pindex at check level >= 4
# 4051 : 397 : int block_count = chainstate.m_chain.Height() - pindex->nHeight;
# 4052 : :
# 4053 : : // check level 4: try reconnecting blocks
# 4054 [ + + ]: 397 : if (nCheckLevel >= 4) {
# 4055 [ + + ]: 288 : while (pindex != chainstate.m_chain.Tip()) {
# 4056 : 287 : const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
# 4057 [ + + ]: 287 : if (reportDone < percentageDone / 10) {
# 4058 : : // report every 10% step
# 4059 : 5 : LogPrintf("[%d%%]...", percentageDone); /* Continued */
# 4060 : 5 : reportDone = percentageDone / 10;
# 4061 : 5 : }
# 4062 : 287 : uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
# 4063 : 287 : pindex = chainstate.m_chain.Next(pindex);
# 4064 : 287 : CBlock block;
# 4065 [ - + ]: 287 : if (!ReadBlockFromDisk(block, pindex, consensus_params))
# 4066 : 0 : return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4067 [ - + ]: 287 : if (!chainstate.ConnectBlock(block, state, pindex, coins)) {
# 4068 : 0 : return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
# 4069 : 0 : }
# 4070 [ - + ]: 287 : if (ShutdownRequested()) return true;
# 4071 : 287 : }
# 4072 : 1 : }
# 4073 : :
# 4074 : 397 : LogPrintf("[DONE].\n");
# 4075 : 397 : LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
# 4076 : :
# 4077 : 397 : return true;
# 4078 : 397 : }
# 4079 : :
# 4080 : : /** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
# 4081 : : bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs)
# 4082 : 0 : {
# 4083 : 0 : AssertLockHeld(cs_main);
# 4084 : : // TODO: merge with ConnectBlock
# 4085 : 0 : CBlock block;
# 4086 [ # # ]: 0 : if (!ReadBlockFromDisk(block, pindex, m_params.GetConsensus())) {
# 4087 : 0 : return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4088 : 0 : }
# 4089 : :
# 4090 [ # # ]: 0 : for (const CTransactionRef& tx : block.vtx) {
# 4091 [ # # ]: 0 : if (!tx->IsCoinBase()) {
# 4092 [ # # ]: 0 : for (const CTxIn &txin : tx->vin) {
# 4093 : 0 : inputs.SpendCoin(txin.prevout);
# 4094 : 0 : }
# 4095 : 0 : }
# 4096 : : // Pass check = true as every addition may be an overwrite.
# 4097 : 0 : AddCoins(inputs, *tx, pindex->nHeight, true);
# 4098 : 0 : }
# 4099 : 0 : return true;
# 4100 : 0 : }
# 4101 : :
# 4102 : : bool CChainState::ReplayBlocks()
# 4103 : 957 : {
# 4104 : 957 : LOCK(cs_main);
# 4105 : :
# 4106 : 957 : CCoinsView& db = this->CoinsDB();
# 4107 : 957 : CCoinsViewCache cache(&db);
# 4108 : :
# 4109 : 957 : std::vector<uint256> hashHeads = db.GetHeadBlocks();
# 4110 [ + - ]: 957 : if (hashHeads.empty()) return true; // We're already in a consistent state.
# 4111 [ # # ]: 0 : if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
# 4112 : :
# 4113 : 0 : uiInterface.ShowProgress(_("Replaying blocks…").translated, 0, false);
# 4114 : 0 : LogPrintf("Replaying blocks\n");
# 4115 : :
# 4116 : 0 : const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
# 4117 : 0 : const CBlockIndex* pindexNew; // New tip during the interrupted flush.
# 4118 : 0 : const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
# 4119 : :
# 4120 [ # # ]: 0 : if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
# 4121 : 0 : return error("ReplayBlocks(): reorganization to unknown block requested");
# 4122 : 0 : }
# 4123 : 0 : pindexNew = &(m_blockman.m_block_index[hashHeads[0]]);
# 4124 : :
# 4125 [ # # ]: 0 : if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
# 4126 [ # # ]: 0 : if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
# 4127 : 0 : return error("ReplayBlocks(): reorganization from unknown block requested");
# 4128 : 0 : }
# 4129 : 0 : pindexOld = &(m_blockman.m_block_index[hashHeads[1]]);
# 4130 : 0 : pindexFork = LastCommonAncestor(pindexOld, pindexNew);
# 4131 : 0 : assert(pindexFork != nullptr);
# 4132 : 0 : }
# 4133 : :
# 4134 : : // Rollback along the old branch.
# 4135 [ # # ]: 0 : while (pindexOld != pindexFork) {
# 4136 [ # # ]: 0 : if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
# 4137 : 0 : CBlock block;
# 4138 [ # # ]: 0 : if (!ReadBlockFromDisk(block, pindexOld, m_params.GetConsensus())) {
# 4139 : 0 : return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
# 4140 : 0 : }
# 4141 : 0 : LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
# 4142 : 0 : DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
# 4143 [ # # ]: 0 : if (res == DISCONNECT_FAILED) {
# 4144 : 0 : return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
# 4145 : 0 : }
# 4146 : : // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
# 4147 : : // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
# 4148 : : // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
# 4149 : : // the result is still a version of the UTXO set with the effects of that block undone.
# 4150 : 0 : }
# 4151 : 0 : pindexOld = pindexOld->pprev;
# 4152 : 0 : }
# 4153 : :
# 4154 : : // Roll forward from the forking point to the new tip.
# 4155 [ # # ]: 0 : int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
# 4156 [ # # ]: 0 : for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
# 4157 : 0 : const CBlockIndex& pindex{*Assert(pindexNew->GetAncestor(nHeight))};
# 4158 : :
# 4159 : 0 : LogPrintf("Rolling forward %s (%i)\n", pindex.GetBlockHash().ToString(), nHeight);
# 4160 : 0 : uiInterface.ShowProgress(_("Replaying blocks…").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
# 4161 [ # # ]: 0 : if (!RollforwardBlock(&pindex, cache)) return false;
# 4162 : 0 : }
# 4163 : :
# 4164 : 0 : cache.SetBestBlock(pindexNew->GetBlockHash());
# 4165 : 0 : cache.Flush();
# 4166 : 0 : uiInterface.ShowProgress("", 100, false);
# 4167 : 0 : return true;
# 4168 : 0 : }
# 4169 : :
# 4170 : : bool CChainState::NeedsRedownload() const
# 4171 : 947 : {
# 4172 : 947 : AssertLockHeld(cs_main);
# 4173 : :
# 4174 : : // At and above m_params.SegwitHeight, segwit consensus rules must be validated
# 4175 : 947 : CBlockIndex* block{m_chain.Tip()};
# 4176 : :
# 4177 [ + + ][ + + ]: 73722 : while (block != nullptr && DeploymentActiveAt(*block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
# 4178 [ + + ]: 72776 : if (!(block->nStatus & BLOCK_OPT_WITNESS)) {
# 4179 : : // block is insufficiently validated for a segwit client
# 4180 : 1 : return true;
# 4181 : 1 : }
# 4182 : 72775 : block = block->pprev;
# 4183 : 72775 : }
# 4184 : :
# 4185 : 946 : return false;
# 4186 : 947 : }
# 4187 : :
# 4188 : : void CChainState::UnloadBlockIndex()
# 4189 : 3 : {
# 4190 : 3 : AssertLockHeld(::cs_main);
# 4191 : 3 : nBlockSequenceId = 1;
# 4192 : 3 : setBlockIndexCandidates.clear();
# 4193 : 3 : }
# 4194 : :
# 4195 : : bool ChainstateManager::LoadBlockIndex()
# 4196 : 962 : {
# 4197 : 962 : AssertLockHeld(cs_main);
# 4198 : : // Load block index from databases
# 4199 : 962 : bool needs_init = fReindex;
# 4200 [ + + ]: 962 : if (!fReindex) {
# 4201 : 952 : bool ret = m_blockman.LoadBlockIndexDB(GetConsensus());
# 4202 [ + + ]: 952 : if (!ret) return false;
# 4203 : :
# 4204 : 951 : std::vector<CBlockIndex*> vSortedByHeight{m_blockman.GetAllBlockIndices()};
# 4205 : 951 : std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
# 4206 : 951 : CBlockIndexHeightOnlyComparator());
# 4207 : :
# 4208 : : // Find start of assumed-valid region.
# 4209 : 951 : int first_assumed_valid_height = std::numeric_limits<int>::max();
# 4210 : :
# 4211 [ + + ]: 74235 : for (const CBlockIndex* block : vSortedByHeight) {
# 4212 [ + + ]: 74235 : if (block->IsAssumedValid()) {
# 4213 : 1 : auto chainstates = GetAll();
# 4214 : :
# 4215 : : // If we encounter an assumed-valid block index entry, ensure that we have
# 4216 : : // one chainstate that tolerates assumed-valid entries and another that does
# 4217 : : // not (i.e. the background validation chainstate), since assumed-valid
# 4218 : : // entries should always be pending validation by a fully-validated chainstate.
# 4219 : 2 : auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
# 4220 : 1 : assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
# 4221 : 0 : assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
# 4222 : :
# 4223 : 0 : first_assumed_valid_height = block->nHeight;
# 4224 : 1 : break;
# 4225 : 1 : }
# 4226 : 74235 : }
# 4227 : :
# 4228 [ + + ]: 74116 : for (CBlockIndex* pindex : vSortedByHeight) {
# 4229 [ + + ]: 74116 : if (ShutdownRequested()) return false;
# 4230 [ + + ]: 74115 : if (pindex->IsAssumedValid() ||
# 4231 [ + + ]: 74115 : (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
# 4232 [ + - ][ # # ]: 74095 : (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
# 4233 : :
# 4234 : : // Fill each chainstate's block candidate set. Only add assumed-valid
# 4235 : : // blocks to the tip candidate set if the chainstate is allowed to rely on
# 4236 : : // assumed-valid blocks.
# 4237 : : //
# 4238 : : // If all setBlockIndexCandidates contained the assumed-valid blocks, the
# 4239 : : // background chainstate's ActivateBestChain() call would add assumed-valid
# 4240 : : // blocks to the chain (based on how FindMostWorkChain() works). Obviously
# 4241 : : // we don't want this since the purpose of the background validation chain
# 4242 : : // is to validate assued-valid blocks.
# 4243 : : //
# 4244 : : // Note: This is considering all blocks whose height is greater or equal to
# 4245 : : // the first assumed-valid block to be assumed-valid blocks, and excluding
# 4246 : : // them from the background chainstate's setBlockIndexCandidates set. This
# 4247 : : // does mean that some blocks which are not technically assumed-valid
# 4248 : : // (later blocks on a fork beginning before the first assumed-valid block)
# 4249 : : // might not get added to the background chainstate, but this is ok,
# 4250 : : // because they will still be attached to the active chainstate if they
# 4251 : : // actually contain more work.
# 4252 : : //
# 4253 : : // Instead of this height-based approach, an earlier attempt was made at
# 4254 : : // detecting "holistically" whether the block index under consideration
# 4255 : : // relied on an assumed-valid ancestor, but this proved to be too slow to
# 4256 : : // be practical.
# 4257 [ + + ]: 73657 : for (CChainState* chainstate : GetAll()) {
# 4258 [ + + ]: 73657 : if (chainstate->reliesOnAssumedValid() ||
# 4259 [ + + ]: 73657 : pindex->nHeight < first_assumed_valid_height) {
# 4260 : 73576 : chainstate->setBlockIndexCandidates.insert(pindex);
# 4261 : 73576 : }
# 4262 : 73657 : }
# 4263 : 73556 : }
# 4264 [ + + ][ + - ]: 74115 : if (pindex->nStatus & BLOCK_FAILED_MASK && (!m_best_invalid || pindex->nChainWork > m_best_invalid->nChainWork)) {
# [ # # ]
# 4265 : 11 : m_best_invalid = pindex;
# 4266 : 11 : }
# 4267 [ + + ][ + + ]: 74115 : if (pindex->IsValid(BLOCK_VALID_TREE) && (m_best_header == nullptr || CBlockIndexWorkComparator()(m_best_header, pindex)))
# [ + + ][ + + ]
# 4268 : 73898 : m_best_header = pindex;
# 4269 : 74115 : }
# 4270 : :
# 4271 : 950 : needs_init = m_blockman.m_block_index.empty();
# 4272 : 950 : }
# 4273 : :
# 4274 [ + + ]: 960 : if (needs_init) {
# 4275 : : // Everything here is for *new* reindex/DBs. Thus, though
# 4276 : : // LoadBlockIndexDB may have set fReindex if we shut down
# 4277 : : // mid-reindex previously, we don't check fReindex and
# 4278 : : // instead only check it prior to LoadBlockIndexDB to set
# 4279 : : // needs_init.
# 4280 : :
# 4281 : 483 : LogPrintf("Initializing databases...\n");
# 4282 : 483 : }
# 4283 : 960 : return true;
# 4284 : 962 : }
# 4285 : :
# 4286 : : bool CChainState::LoadGenesisBlock()
# 4287 : 962 : {
# 4288 : 962 : LOCK(cs_main);
# 4289 : :
# 4290 : : // Check whether we're already initialized by checking for genesis in
# 4291 : : // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
# 4292 : : // set based on the coins db, not the block index db, which is the only
# 4293 : : // thing loaded at this point.
# 4294 [ + + ]: 962 : if (m_blockman.m_block_index.count(m_params.GenesisBlock().GetHash()))
# 4295 : 486 : return true;
# 4296 : :
# 4297 : 476 : try {
# 4298 : 476 : const CBlock& block = m_params.GenesisBlock();
# 4299 : 476 : FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, m_chain, m_params, nullptr)};
# 4300 [ - + ]: 476 : if (blockPos.IsNull()) {
# 4301 : 0 : return error("%s: writing genesis block to disk failed", __func__);
# 4302 : 0 : }
# 4303 : 476 : CBlockIndex* pindex = m_blockman.AddToBlockIndex(block, m_chainman.m_best_header);
# 4304 : 476 : ReceivedBlockTransactions(block, pindex, blockPos);
# 4305 : 476 : } catch (const std::runtime_error& e) {
# 4306 : 0 : return error("%s: failed to write genesis block: %s", __func__, e.what());
# 4307 : 0 : }
# 4308 : :
# 4309 : 476 : return true;
# 4310 : 476 : }
# 4311 : :
# 4312 : : void CChainState::LoadExternalBlockFile(
# 4313 : : FILE* fileIn,
# 4314 : : FlatFilePos* dbp,
# 4315 : : std::multimap<uint256, FlatFilePos>* blocks_with_unknown_parent)
# 4316 : 11 : {
# 4317 : 11 : AssertLockNotHeld(m_chainstate_mutex);
# 4318 : :
# 4319 : : // Either both should be specified (-reindex), or neither (-loadblock).
# 4320 : 11 : assert(!dbp == !blocks_with_unknown_parent);
# 4321 : :
# 4322 : 0 : int64_t nStart = GetTimeMillis();
# 4323 : :
# 4324 : 11 : int nLoaded = 0;
# 4325 : 11 : try {
# 4326 : : // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
# 4327 : 11 : CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE+8, SER_DISK, CLIENT_VERSION);
# 4328 : 11 : uint64_t nRewind = blkdat.GetPos();
# 4329 [ + + ]: 1563 : while (!blkdat.eof()) {
# 4330 [ - + ]: 1562 : if (ShutdownRequested()) return;
# 4331 : :
# 4332 : 1562 : blkdat.SetPos(nRewind);
# 4333 : 1562 : nRewind++; // start one byte further next time, in case of failure
# 4334 : 1562 : blkdat.SetLimit(); // remove former limit
# 4335 : 1562 : unsigned int nSize = 0;
# 4336 : 1562 : try {
# 4337 : : // locate a header
# 4338 : 1562 : unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
# 4339 : 1562 : blkdat.FindByte(m_params.MessageStart()[0]);
# 4340 : 1562 : nRewind = blkdat.GetPos() + 1;
# 4341 : 1562 : blkdat >> buf;
# 4342 [ - + ]: 1562 : if (memcmp(buf, m_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE)) {
# 4343 : 0 : continue;
# 4344 : 0 : }
# 4345 : : // read size
# 4346 : 1562 : blkdat >> nSize;
# 4347 [ + + ][ - + ]: 1562 : if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
# 4348 : 0 : continue;
# 4349 : 1562 : } catch (const std::exception&) {
# 4350 : : // no valid block header found; don't complain
# 4351 : 10 : break;
# 4352 : 10 : }
# 4353 : 1552 : try {
# 4354 : : // read block
# 4355 : 1552 : uint64_t nBlockPos = blkdat.GetPos();
# 4356 [ + + ]: 1552 : if (dbp)
# 4357 : 1451 : dbp->nPos = nBlockPos;
# 4358 : 1552 : blkdat.SetLimit(nBlockPos + nSize);
# 4359 : 1552 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
# 4360 : 1552 : CBlock& block = *pblock;
# 4361 : 1552 : blkdat >> block;
# 4362 : 1552 : nRewind = blkdat.GetPos();
# 4363 : :
# 4364 : 1552 : uint256 hash = block.GetHash();
# 4365 : 1552 : {
# 4366 : 1552 : LOCK(cs_main);
# 4367 : : // detect out of order blocks, and store them for later
# 4368 [ + + ][ + + ]: 1552 : if (hash != m_params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) {
# 4369 [ + - ]: 71 : LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
# 4370 : 71 : block.hashPrevBlock.ToString());
# 4371 [ + - ][ + - ]: 71 : if (dbp && blocks_with_unknown_parent) {
# 4372 : 71 : blocks_with_unknown_parent->emplace(block.hashPrevBlock, *dbp);
# 4373 : 71 : }
# 4374 : 71 : continue;
# 4375 : 71 : }
# 4376 : :
# 4377 : : // process in case the block isn't known yet
# 4378 : 1481 : const CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash);
# 4379 [ + + ][ - + ]: 1481 : if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
# 4380 : 1480 : BlockValidationState state;
# 4381 [ + + ]: 1480 : if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr, true)) {
# 4382 : 1476 : nLoaded++;
# 4383 : 1476 : }
# 4384 [ - + ]: 1480 : if (state.IsError()) {
# 4385 : 0 : break;
# 4386 : 0 : }
# 4387 [ - + ][ # # ]: 1480 : } else if (hash != m_params.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
# 4388 [ # # ]: 0 : LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
# 4389 : 0 : }
# 4390 : 1481 : }
# 4391 : :
# 4392 : : // Activate the genesis block so normal node progress can continue
# 4393 [ + + ]: 1481 : if (hash == m_params.GetConsensus().hashGenesisBlock) {
# 4394 : 11 : BlockValidationState state;
# 4395 [ - + ]: 11 : if (!ActivateBestChain(state, nullptr)) {
# 4396 : 0 : break;
# 4397 : 0 : }
# 4398 : 11 : }
# 4399 : :
# 4400 : 1481 : NotifyHeaderTip(*this);
# 4401 : :
# 4402 [ + + ]: 1481 : if (!blocks_with_unknown_parent) continue;
# 4403 : :
# 4404 : : // Recursively process earlier encountered successors of this block
# 4405 : 1380 : std::deque<uint256> queue;
# 4406 : 1380 : queue.push_back(hash);
# 4407 [ + + ]: 2831 : while (!queue.empty()) {
# 4408 : 1451 : uint256 head = queue.front();
# 4409 : 1451 : queue.pop_front();
# 4410 : 1451 : auto range = blocks_with_unknown_parent->equal_range(head);
# 4411 [ + + ]: 1522 : while (range.first != range.second) {
# 4412 : 71 : std::multimap<uint256, FlatFilePos>::iterator it = range.first;
# 4413 : 71 : std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
# 4414 [ + - ]: 71 : if (ReadBlockFromDisk(*pblockrecursive, it->second, m_params.GetConsensus())) {
# 4415 [ + - ]: 71 : LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
# 4416 : 71 : head.ToString());
# 4417 : 71 : LOCK(cs_main);
# 4418 : 71 : BlockValidationState dummy;
# 4419 [ + - ]: 71 : if (AcceptBlock(pblockrecursive, dummy, nullptr, true, &it->second, nullptr, true)) {
# 4420 : 71 : nLoaded++;
# 4421 : 71 : queue.push_back(pblockrecursive->GetHash());
# 4422 : 71 : }
# 4423 : 71 : }
# 4424 : 71 : range.first++;
# 4425 : 71 : blocks_with_unknown_parent->erase(it);
# 4426 : 71 : NotifyHeaderTip(*this);
# 4427 : 71 : }
# 4428 : 1451 : }
# 4429 : 1380 : } catch (const std::exception& e) {
# 4430 : 0 : LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
# 4431 : 0 : }
# 4432 : 1552 : }
# 4433 : 11 : } catch (const std::runtime_error& e) {
# 4434 : 0 : AbortNode(std::string("System error: ") + e.what());
# 4435 : 0 : }
# 4436 : 11 : LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
# 4437 : 11 : }
# 4438 : :
# 4439 : : void CChainState::CheckBlockIndex()
# 4440 : 314199 : {
# 4441 [ + + ]: 314199 : if (!fCheckBlockIndex) {
# 4442 : 98363 : return;
# 4443 : 98363 : }
# 4444 : :
# 4445 : 215836 : LOCK(cs_main);
# 4446 : :
# 4447 : : // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
# 4448 : : // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
# 4449 : : // tests when iterating the block tree require that m_chain has been initialized.)
# 4450 [ + + ]: 215836 : if (m_chain.Height() < 0) {
# 4451 : 20 : assert(m_blockman.m_block_index.size() <= 1);
# 4452 : 0 : return;
# 4453 : 20 : }
# 4454 : :
# 4455 : : // Build forward-pointing map of the entire block tree.
# 4456 : 215816 : std::multimap<CBlockIndex*,CBlockIndex*> forward;
# 4457 [ + + ]: 94893572 : for (auto& [_, block_index] : m_blockman.m_block_index) {
# 4458 : 94893572 : forward.emplace(block_index.pprev, &block_index);
# 4459 : 94893572 : }
# 4460 : :
# 4461 : 215816 : assert(forward.size() == m_blockman.m_block_index.size());
# 4462 : :
# 4463 : 0 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
# 4464 : 215816 : CBlockIndex *pindex = rangeGenesis.first->second;
# 4465 : 215816 : rangeGenesis.first++;
# 4466 : 215816 : assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
# 4467 : :
# 4468 : : // Iterate over the entire block tree, using depth-first search.
# 4469 : : // Along the way, remember whether there are blocks on the path from genesis
# 4470 : : // block being explored which are the first to have certain properties.
# 4471 : 0 : size_t nNodes = 0;
# 4472 : 215816 : int nHeight = 0;
# 4473 : 215816 : CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
# 4474 : 215816 : CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
# 4475 : 215816 : CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
# 4476 : 215816 : CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
# 4477 : 215816 : CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
# 4478 : 215816 : CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
# 4479 : 215816 : CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
# 4480 [ + + ]: 95109388 : while (pindex != nullptr) {
# 4481 : 94893572 : nNodes++;
# 4482 [ + + ][ + + ]: 94893572 : if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
# 4483 : : // Assumed-valid index entries will not have data since we haven't downloaded the
# 4484 : : // full block yet.
# 4485 [ + + ][ + + ]: 94893572 : if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA) && !pindex->IsAssumedValid()) {
# [ + - ]
# 4486 : 431747 : pindexFirstMissing = pindex;
# 4487 : 431747 : }
# 4488 [ + + ][ + + ]: 94893572 : if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
# 4489 [ + + ][ + - ]: 94893572 : if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
# [ - + ]
# 4490 : :
# 4491 [ + + ][ + - ]: 94893572 : if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) {
# 4492 : : // Skip validity flag checks for BLOCK_ASSUMED_VALID index entries, since these
# 4493 : : // *_VALID_MASK flags will not be present for index entries we are temporarily assuming
# 4494 : : // valid.
# 4495 [ + + ]: 94677756 : if (pindexFirstNotTransactionsValid == nullptr &&
# 4496 [ + + ]: 94677756 : (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) {
# 4497 : 431747 : pindexFirstNotTransactionsValid = pindex;
# 4498 : 431747 : }
# 4499 : :
# 4500 [ + + ]: 94677756 : if (pindexFirstNotChainValid == nullptr &&
# 4501 [ + + ]: 94677756 : (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) {
# 4502 : 12096385 : pindexFirstNotChainValid = pindex;
# 4503 : 12096385 : }
# 4504 : :
# 4505 [ + + ]: 94677756 : if (pindexFirstNotScriptsValid == nullptr &&
# 4506 [ + + ]: 94677756 : (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) {
# 4507 : 12096385 : pindexFirstNotScriptsValid = pindex;
# 4508 : 12096385 : }
# 4509 : 94677756 : }
# 4510 : :
# 4511 : : // Begin: actual consistency checks.
# 4512 [ + + ]: 94893572 : if (pindex->pprev == nullptr) {
# 4513 : : // Genesis block checks.
# 4514 : 215816 : assert(pindex->GetBlockHash() == m_params.GetConsensus().hashGenesisBlock); // Genesis block's hash must match.
# 4515 : 0 : assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
# 4516 : 215816 : }
# 4517 [ + + ]: 94893572 : if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
# 4518 : : // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
# 4519 : : // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
# 4520 : : // Unless these indexes are assumed valid and pending block download on a
# 4521 : : // background chainstate.
# 4522 [ + - ][ + - ]: 94893572 : if (!m_blockman.m_have_pruned && !pindex->IsAssumedValid()) {
# 4523 : : // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
# 4524 : 94893572 : assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
# 4525 : 0 : assert(pindexFirstMissing == pindexFirstNeverProcessed);
# 4526 : 94893572 : } else {
# 4527 : : // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
# 4528 [ # # ]: 0 : if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
# 4529 : 0 : }
# 4530 [ + + ]: 94893572 : if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
# 4531 [ - + ]: 94893572 : if (pindex->IsAssumedValid()) {
# 4532 : : // Assumed-valid blocks should have some nTx value.
# 4533 : 0 : assert(pindex->nTx > 0);
# 4534 : : // Assumed-valid blocks should connect to the main chain.
# 4535 : 0 : assert((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE);
# 4536 : 94893572 : } else {
# 4537 : : // Otherwise there should only be an nTx value if we have
# 4538 : : // actually seen a block's transactions.
# 4539 : 94893572 : assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
# 4540 : 94893572 : }
# 4541 : : // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
# 4542 : 0 : assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
# 4543 : 0 : assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
# 4544 : 0 : assert(pindex->nHeight == nHeight); // nHeight must be consistent.
# 4545 : 0 : assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
# 4546 : 0 : assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
# 4547 : 0 : assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
# 4548 [ + - ]: 94893572 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
# 4549 [ + + ]: 94893572 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
# 4550 [ + + ]: 94893572 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
# 4551 [ + + ]: 94893572 : if (pindexFirstInvalid == nullptr) {
# 4552 : : // Checks for not-invalid blocks.
# 4553 : 83185386 : assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
# 4554 : 83185386 : }
# 4555 [ + + ][ + + ]: 94893572 : if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
# [ + + ]
# 4556 [ + + ]: 669419 : if (pindexFirstInvalid == nullptr) {
# 4557 : 579596 : const bool is_active = this == &m_chainman.ActiveChainstate();
# 4558 : :
# 4559 : : // If this block sorts at least as good as the current tip and
# 4560 : : // is valid and we have all data for its parents, it must be in
# 4561 : : // setBlockIndexCandidates. m_chain.Tip() must also be there
# 4562 : : // even if some data has been pruned.
# 4563 : : //
# 4564 : : // Don't perform this check for the background chainstate since
# 4565 : : // its setBlockIndexCandidates shouldn't have some entries (i.e. those past the
# 4566 : : // snapshot block) which do exist in the block index for the active chainstate.
# 4567 [ + + ][ + - ]: 579596 : if (is_active && (pindexFirstMissing == nullptr || pindex == m_chain.Tip())) {
# [ # # ]
# 4568 : 579589 : assert(setBlockIndexCandidates.count(pindex));
# 4569 : 579589 : }
# 4570 : : // If some parent is missing, then it could be that this block was in
# 4571 : : // setBlockIndexCandidates but had to be removed because of the missing data.
# 4572 : : // In this case it must be in m_blocks_unlinked -- see test below.
# 4573 : 579596 : }
# 4574 : 94224153 : } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
# 4575 : 94224153 : assert(setBlockIndexCandidates.count(pindex) == 0);
# 4576 : 94224153 : }
# 4577 : : // Check whether this block is in m_blocks_unlinked.
# 4578 : 0 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
# 4579 : 94893572 : bool foundInUnlinked = false;
# 4580 [ + + ]: 94894590 : while (rangeUnlinked.first != rangeUnlinked.second) {
# 4581 : 125966 : assert(rangeUnlinked.first->first == pindex->pprev);
# 4582 [ + + ]: 125966 : if (rangeUnlinked.first->second == pindex) {
# 4583 : 124948 : foundInUnlinked = true;
# 4584 : 124948 : break;
# 4585 : 124948 : }
# 4586 : 1018 : rangeUnlinked.first++;
# 4587 : 1018 : }
# 4588 [ + + ][ + + ]: 94893572 : if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
# [ + + ][ + - ]
# 4589 : : // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
# 4590 : 124948 : assert(foundInUnlinked);
# 4591 : 124948 : }
# 4592 [ + + ]: 94893572 : if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
# 4593 [ + + ]: 94893572 : if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
# 4594 [ + + ][ + + ]: 94893572 : if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
# [ + + ][ - + ]
# 4595 : : // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
# 4596 : 0 : assert(m_blockman.m_have_pruned); // We must have pruned.
# 4597 : : // This block may have entered m_blocks_unlinked if:
# 4598 : : // - it has a descendant that at some point had more work than the
# 4599 : : // tip, and
# 4600 : : // - we tried switching to that descendant but were missing
# 4601 : : // data for some intermediate block between m_chain and the
# 4602 : : // tip.
# 4603 : : // So if this block is itself better than m_chain.Tip() and it wasn't in
# 4604 : : // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
# 4605 [ # # ][ # # ]: 0 : if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
# [ # # ]
# 4606 [ # # ]: 0 : if (pindexFirstInvalid == nullptr) {
# 4607 : 0 : assert(foundInUnlinked);
# 4608 : 0 : }
# 4609 : 0 : }
# 4610 : 0 : }
# 4611 : : // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
# 4612 : : // End: actual consistency checks.
# 4613 : :
# 4614 : : // Try descending into the first subnode.
# 4615 : 0 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
# 4616 [ + + ]: 94893572 : if (range.first != range.second) {
# 4617 : : // A subnode was found.
# 4618 : 82617108 : pindex = range.first->second;
# 4619 : 82617108 : nHeight++;
# 4620 : 82617108 : continue;
# 4621 : 82617108 : }
# 4622 : : // This is a leaf node.
# 4623 : : // Move upwards until we reach a node of which we have not yet visited the last child.
# 4624 [ + + ]: 95109388 : while (pindex) {
# 4625 : : // We are going to either move to a parent or a sibling of pindex.
# 4626 : : // If pindex was the first with a certain property, unset the corresponding variable.
# 4627 [ + + ]: 94893572 : if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
# 4628 [ + + ]: 94893572 : if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
# 4629 [ + + ]: 94893572 : if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
# 4630 [ - + ]: 94893572 : if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
# 4631 [ + + ]: 94893572 : if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
# 4632 [ + + ]: 94893572 : if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
# 4633 [ + + ]: 94893572 : if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
# 4634 : : // Find our parent.
# 4635 : 94893572 : CBlockIndex* pindexPar = pindex->pprev;
# 4636 : : // Find which child we just visited.
# 4637 : 94893572 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
# 4638 [ + + ]: 114500148 : while (rangePar.first->second != pindex) {
# 4639 : 19606576 : assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
# 4640 : 0 : rangePar.first++;
# 4641 : 19606576 : }
# 4642 : : // Proceed to the next one.
# 4643 : 94893572 : rangePar.first++;
# 4644 [ + + ]: 94893572 : if (rangePar.first != rangePar.second) {
# 4645 : : // Move to the sibling.
# 4646 : 12060648 : pindex = rangePar.first->second;
# 4647 : 12060648 : break;
# 4648 : 82832924 : } else {
# 4649 : : // Move up further.
# 4650 : 82832924 : pindex = pindexPar;
# 4651 : 82832924 : nHeight--;
# 4652 : 82832924 : continue;
# 4653 : 82832924 : }
# 4654 : 94893572 : }
# 4655 : 12276464 : }
# 4656 : :
# 4657 : : // Check that we actually traversed the entire map.
# 4658 : 215816 : assert(nNodes == forward.size());
# 4659 : 215816 : }
# 4660 : :
# 4661 : : std::string CChainState::ToString()
# 4662 : 1019 : {
# 4663 : 1019 : AssertLockHeld(::cs_main);
# 4664 : 1019 : CBlockIndex* tip = m_chain.Tip();
# 4665 : 1019 : return strprintf("Chainstate [%s] @ height %d (%s)",
# 4666 [ + + ]: 1019 : m_from_snapshot_blockhash ? "snapshot" : "ibd",
# 4667 [ + + ][ + + ]: 1019 : tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
# 4668 : 1019 : }
# 4669 : :
# 4670 : : bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
# 4671 : 23 : {
# 4672 : 23 : AssertLockHeld(::cs_main);
# 4673 [ - + ]: 23 : if (coinstip_size == m_coinstip_cache_size_bytes &&
# 4674 [ # # ]: 23 : coinsdb_size == m_coinsdb_cache_size_bytes) {
# 4675 : : // Cache sizes are unchanged, no need to continue.
# 4676 : 0 : return true;
# 4677 : 0 : }
# 4678 : 23 : size_t old_coinstip_size = m_coinstip_cache_size_bytes;
# 4679 : 23 : m_coinstip_cache_size_bytes = coinstip_size;
# 4680 : 23 : m_coinsdb_cache_size_bytes = coinsdb_size;
# 4681 : 23 : CoinsDB().ResizeCache(coinsdb_size);
# 4682 : :
# 4683 : 23 : LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
# 4684 : 23 : this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
# 4685 : 23 : LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
# 4686 : 23 : this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
# 4687 : :
# 4688 : 23 : BlockValidationState state;
# 4689 : 23 : bool ret;
# 4690 : :
# 4691 [ + + ]: 23 : if (coinstip_size > old_coinstip_size) {
# 4692 : : // Likely no need to flush if cache sizes have grown.
# 4693 : 9 : ret = FlushStateToDisk(state, FlushStateMode::IF_NEEDED);
# 4694 : 14 : } else {
# 4695 : : // Otherwise, flush state to disk and deallocate the in-memory coins map.
# 4696 : 14 : ret = FlushStateToDisk(state, FlushStateMode::ALWAYS);
# 4697 : 14 : CoinsTip().ReallocateCache();
# 4698 : 14 : }
# 4699 : 23 : return ret;
# 4700 : 23 : }
# 4701 : :
# 4702 : : //! Guess how far we are in the verification process at the given block index
# 4703 : : //! require cs_main if pindex has not been validated yet (because nChainTx might be unset)
# 4704 : 204271 : double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
# 4705 [ + + ]: 204271 : if (pindex == nullptr)
# 4706 : 1 : return 0.0;
# 4707 : :
# 4708 : 204270 : int64_t nNow = time(nullptr);
# 4709 : :
# 4710 : 204270 : double fTxTotal;
# 4711 : :
# 4712 [ + + ]: 204270 : if (pindex->nChainTx <= data.nTxCount) {
# 4713 : 351 : fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
# 4714 : 203919 : } else {
# 4715 : 203919 : fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
# 4716 : 203919 : }
# 4717 : :
# 4718 : 204270 : return std::min<double>(pindex->nChainTx / fTxTotal, 1.0);
# 4719 : 204271 : }
# 4720 : :
# 4721 : : std::optional<uint256> ChainstateManager::SnapshotBlockhash() const
# 4722 : 15 : {
# 4723 : 15 : LOCK(::cs_main);
# 4724 [ + + ][ + + ]: 15 : if (m_active_chainstate && m_active_chainstate->m_from_snapshot_blockhash) {
# 4725 : : // If a snapshot chainstate exists, it will always be our active.
# 4726 : 4 : return m_active_chainstate->m_from_snapshot_blockhash;
# 4727 : 4 : }
# 4728 : 11 : return std::nullopt;
# 4729 : 15 : }
# 4730 : :
# 4731 : : std::vector<CChainState*> ChainstateManager::GetAll()
# 4732 : 78678 : {
# 4733 : 78678 : LOCK(::cs_main);
# 4734 : 78678 : std::vector<CChainState*> out;
# 4735 : :
# 4736 [ + - ][ + - ]: 78678 : if (!IsSnapshotValidated() && m_ibd_chainstate) {
# 4737 : 78678 : out.push_back(m_ibd_chainstate.get());
# 4738 : 78678 : }
# 4739 : :
# 4740 [ + + ]: 78678 : if (m_snapshot_chainstate) {
# 4741 : 108 : out.push_back(m_snapshot_chainstate.get());
# 4742 : 108 : }
# 4743 : :
# 4744 : 78678 : return out;
# 4745 : 78678 : }
# 4746 : :
# 4747 : : CChainState& ChainstateManager::InitializeChainstate(
# 4748 : : CTxMemPool* mempool, const std::optional<uint256>& snapshot_blockhash)
# 4749 : 969 : {
# 4750 : 969 : AssertLockHeld(::cs_main);
# 4751 : 969 : bool is_snapshot = snapshot_blockhash.has_value();
# 4752 : 969 : std::unique_ptr<CChainState>& to_modify =
# 4753 [ + + ]: 969 : is_snapshot ? m_snapshot_chainstate : m_ibd_chainstate;
# 4754 : :
# 4755 [ - + ]: 969 : if (to_modify) {
# 4756 : 0 : throw std::logic_error("should not be overwriting a chainstate");
# 4757 : 0 : }
# 4758 : 969 : to_modify.reset(new CChainState(mempool, m_blockman, *this, snapshot_blockhash));
# 4759 : :
# 4760 : : // Snapshot chainstates and initial IBD chaintates always become active.
# 4761 [ + + ][ + - ]: 969 : if (is_snapshot || (!is_snapshot && !m_active_chainstate)) {
# [ + - ]
# 4762 : 969 : LogPrintf("Switching active chainstate to %s\n", to_modify->ToString());
# 4763 : 969 : m_active_chainstate = to_modify.get();
# 4764 : 969 : } else {
# 4765 : 0 : throw std::logic_error("unexpected chainstate activation");
# 4766 : 0 : }
# 4767 : :
# 4768 : 969 : return *to_modify;
# 4769 : 969 : }
# 4770 : :
# 4771 : : const AssumeutxoData* ExpectedAssumeutxo(
# 4772 : : const int height, const CChainParams& chainparams)
# 4773 : 15 : {
# 4774 : 15 : const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo();
# 4775 : 15 : const auto assumeutxo_found = valid_assumeutxos_map.find(height);
# 4776 : :
# 4777 [ + + ]: 15 : if (assumeutxo_found != valid_assumeutxos_map.end()) {
# 4778 : 8 : return &assumeutxo_found->second;
# 4779 : 8 : }
# 4780 : 7 : return nullptr;
# 4781 : 15 : }
# 4782 : :
# 4783 : : bool ChainstateManager::ActivateSnapshot(
# 4784 : : AutoFile& coins_file,
# 4785 : : const SnapshotMetadata& metadata,
# 4786 : : bool in_memory)
# 4787 : 9 : {
# 4788 : 9 : uint256 base_blockhash = metadata.m_base_blockhash;
# 4789 : :
# 4790 [ + + ]: 9 : if (this->SnapshotBlockhash()) {
# 4791 : 1 : LogPrintf("[snapshot] can't activate a snapshot-based chainstate more than once\n");
# 4792 : 1 : return false;
# 4793 : 1 : }
# 4794 : :
# 4795 : 8 : int64_t current_coinsdb_cache_size{0};
# 4796 : 8 : int64_t current_coinstip_cache_size{0};
# 4797 : :
# 4798 : : // Cache percentages to allocate to each chainstate.
# 4799 : : //
# 4800 : : // These particular percentages don't matter so much since they will only be
# 4801 : : // relevant during snapshot activation; caches are rebalanced at the conclusion of
# 4802 : : // this function. We want to give (essentially) all available cache capacity to the
# 4803 : : // snapshot to aid the bulk load later in this function.
# 4804 : 8 : static constexpr double IBD_CACHE_PERC = 0.01;
# 4805 : 8 : static constexpr double SNAPSHOT_CACHE_PERC = 0.99;
# 4806 : :
# 4807 : 8 : {
# 4808 : 8 : LOCK(::cs_main);
# 4809 : : // Resize the coins caches to ensure we're not exceeding memory limits.
# 4810 : : //
# 4811 : : // Allocate the majority of the cache to the incoming snapshot chainstate, since
# 4812 : : // (optimistically) getting to its tip will be the top priority. We'll need to call
# 4813 : : // `MaybeRebalanceCaches()` once we're done with this function to ensure
# 4814 : : // the right allocation (including the possibility that no snapshot was activated
# 4815 : : // and that we should restore the active chainstate caches to their original size).
# 4816 : : //
# 4817 : 8 : current_coinsdb_cache_size = this->ActiveChainstate().m_coinsdb_cache_size_bytes;
# 4818 : 8 : current_coinstip_cache_size = this->ActiveChainstate().m_coinstip_cache_size_bytes;
# 4819 : :
# 4820 : : // Temporarily resize the active coins cache to make room for the newly-created
# 4821 : : // snapshot chain.
# 4822 : 8 : this->ActiveChainstate().ResizeCoinsCaches(
# 4823 : 8 : static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC),
# 4824 : 8 : static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC));
# 4825 : 8 : }
# 4826 : :
# 4827 : 8 : auto snapshot_chainstate = WITH_LOCK(::cs_main,
# 4828 : 8 : return std::make_unique<CChainState>(
# 4829 : 8 : /*mempool=*/nullptr, m_blockman, *this, base_blockhash));
# 4830 : :
# 4831 : 8 : {
# 4832 : 8 : LOCK(::cs_main);
# 4833 : 8 : snapshot_chainstate->InitCoinsDB(
# 4834 : 8 : static_cast<size_t>(current_coinsdb_cache_size * SNAPSHOT_CACHE_PERC),
# 4835 : 8 : in_memory, false, "chainstate");
# 4836 : 8 : snapshot_chainstate->InitCoinsCache(
# 4837 : 8 : static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC));
# 4838 : 8 : }
# 4839 : :
# 4840 : 8 : const bool snapshot_ok = this->PopulateAndValidateSnapshot(
# 4841 : 8 : *snapshot_chainstate, coins_file, metadata);
# 4842 : :
# 4843 [ + + ]: 8 : if (!snapshot_ok) {
# 4844 : 6 : WITH_LOCK(::cs_main, this->MaybeRebalanceCaches());
# 4845 : 6 : return false;
# 4846 : 6 : }
# 4847 : :
# 4848 : 2 : {
# 4849 : 2 : LOCK(::cs_main);
# 4850 : 2 : assert(!m_snapshot_chainstate);
# 4851 : 0 : m_snapshot_chainstate.swap(snapshot_chainstate);
# 4852 : 2 : const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip();
# 4853 : 2 : assert(chaintip_loaded);
# 4854 : :
# 4855 : 0 : m_active_chainstate = m_snapshot_chainstate.get();
# 4856 : :
# 4857 : 2 : LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString());
# 4858 : 2 : LogPrintf("[snapshot] (%.2f MB)\n",
# 4859 : 2 : m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000));
# 4860 : :
# 4861 : 2 : this->MaybeRebalanceCaches();
# 4862 : 2 : }
# 4863 : 0 : return true;
# 4864 : 8 : }
# 4865 : :
# 4866 : : static void FlushSnapshotToDisk(CCoinsViewCache& coins_cache, bool snapshot_loaded)
# 4867 : 3 : {
# 4868 [ + - ]: 3 : LOG_TIME_MILLIS_WITH_CATEGORY_MSG_ONCE(
# 4869 : 3 : strprintf("%s (%.2f MB)",
# 4870 : 3 : snapshot_loaded ? "saving snapshot chainstate" : "flushing coins cache",
# 4871 : 3 : coins_cache.DynamicMemoryUsage() / (1000 * 1000)),
# 4872 : 3 : BCLog::LogFlags::ALL);
# 4873 : :
# 4874 : 3 : coins_cache.Flush();
# 4875 : 3 : }
# 4876 : :
# 4877 : : bool ChainstateManager::PopulateAndValidateSnapshot(
# 4878 : : CChainState& snapshot_chainstate,
# 4879 : : AutoFile& coins_file,
# 4880 : : const SnapshotMetadata& metadata)
# 4881 : 8 : {
# 4882 : : // It's okay to release cs_main before we're done using `coins_cache` because we know
# 4883 : : // that nothing else will be referencing the newly created snapshot_chainstate yet.
# 4884 : 8 : CCoinsViewCache& coins_cache = *WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip());
# 4885 : :
# 4886 : 8 : uint256 base_blockhash = metadata.m_base_blockhash;
# 4887 : :
# 4888 : 8 : CBlockIndex* snapshot_start_block = WITH_LOCK(::cs_main, return m_blockman.LookupBlockIndex(base_blockhash));
# 4889 : :
# 4890 [ + + ]: 8 : if (!snapshot_start_block) {
# 4891 : : // Needed for ComputeUTXOStats and ExpectedAssumeutxo to determine the
# 4892 : : // height and to avoid a crash when base_blockhash.IsNull()
# 4893 : 2 : LogPrintf("[snapshot] Did not find snapshot start blockheader %s\n",
# 4894 : 2 : base_blockhash.ToString());
# 4895 : 2 : return false;
# 4896 : 2 : }
# 4897 : :
# 4898 : 6 : int base_height = snapshot_start_block->nHeight;
# 4899 : 6 : auto maybe_au_data = ExpectedAssumeutxo(base_height, GetParams());
# 4900 : :
# 4901 [ + + ]: 6 : if (!maybe_au_data) {
# 4902 : 1 : LogPrintf("[snapshot] assumeutxo height in snapshot metadata not recognized " /* Continued */
# 4903 : 1 : "(%d) - refusing to load snapshot\n", base_height);
# 4904 : 1 : return false;
# 4905 : 1 : }
# 4906 : :
# 4907 : 5 : const AssumeutxoData& au_data = *maybe_au_data;
# 4908 : :
# 4909 : 5 : COutPoint outpoint;
# 4910 : 5 : Coin coin;
# 4911 : 5 : const uint64_t coins_count = metadata.m_coins_count;
# 4912 : 5 : uint64_t coins_left = metadata.m_coins_count;
# 4913 : :
# 4914 : 5 : LogPrintf("[snapshot] loading coins from snapshot %s\n", base_blockhash.ToString());
# 4915 : 5 : int64_t coins_processed{0};
# 4916 : :
# 4917 [ + + ]: 553 : while (coins_left > 0) {
# 4918 : 549 : try {
# 4919 : 549 : coins_file >> outpoint;
# 4920 : 549 : coins_file >> coin;
# 4921 : 549 : } catch (const std::ios_base::failure&) {
# 4922 : 1 : LogPrintf("[snapshot] bad snapshot format or truncated snapshot after deserializing %d coins\n",
# 4923 : 1 : coins_count - coins_left);
# 4924 : 1 : return false;
# 4925 : 1 : }
# 4926 [ - + ]: 548 : if (coin.nHeight > base_height ||
# 4927 [ - + ]: 548 : outpoint.n >= std::numeric_limits<decltype(outpoint.n)>::max() // Avoid integer wrap-around in coinstats.cpp:ApplyHash
# 4928 : 548 : ) {
# 4929 : 0 : LogPrintf("[snapshot] bad snapshot data after deserializing %d coins\n",
# 4930 : 0 : coins_count - coins_left);
# 4931 : 0 : return false;
# 4932 : 0 : }
# 4933 : :
# 4934 : 548 : coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin));
# 4935 : :
# 4936 : 548 : --coins_left;
# 4937 : 548 : ++coins_processed;
# 4938 : :
# 4939 [ - + ]: 548 : if (coins_processed % 1000000 == 0) {
# 4940 : 0 : LogPrintf("[snapshot] %d coins loaded (%.2f%%, %.2f MB)\n",
# 4941 : 0 : coins_processed,
# 4942 : 0 : static_cast<float>(coins_processed) * 100 / static_cast<float>(coins_count),
# 4943 : 0 : coins_cache.DynamicMemoryUsage() / (1000 * 1000));
# 4944 : 0 : }
# 4945 : :
# 4946 : : // Batch write and flush (if we need to) every so often.
# 4947 : : //
# 4948 : : // If our average Coin size is roughly 41 bytes, checking every 120,000 coins
# 4949 : : // means <5MB of memory imprecision.
# 4950 [ - + ]: 548 : if (coins_processed % 120000 == 0) {
# 4951 [ # # ]: 0 : if (ShutdownRequested()) {
# 4952 : 0 : return false;
# 4953 : 0 : }
# 4954 : :
# 4955 : 0 : const auto snapshot_cache_state = WITH_LOCK(::cs_main,
# 4956 : 0 : return snapshot_chainstate.GetCoinsCacheSizeState());
# 4957 : :
# 4958 [ # # ]: 0 : if (snapshot_cache_state >= CoinsCacheSizeState::CRITICAL) {
# 4959 : : // This is a hack - we don't know what the actual best block is, but that
# 4960 : : // doesn't matter for the purposes of flushing the cache here. We'll set this
# 4961 : : // to its correct value (`base_blockhash`) below after the coins are loaded.
# 4962 : 0 : coins_cache.SetBestBlock(GetRandHash());
# 4963 : :
# 4964 : : // No need to acquire cs_main since this chainstate isn't being used yet.
# 4965 : 0 : FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/false);
# 4966 : 0 : }
# 4967 : 0 : }
# 4968 : 548 : }
# 4969 : :
# 4970 : : // Important that we set this. This and the coins_cache accesses above are
# 4971 : : // sort of a layer violation, but either we reach into the innards of
# 4972 : : // CCoinsViewCache here or we have to invert some of the CChainState to
# 4973 : : // embed them in a snapshot-activation-specific CCoinsViewCache bulk load
# 4974 : : // method.
# 4975 : 4 : coins_cache.SetBestBlock(base_blockhash);
# 4976 : :
# 4977 : 4 : bool out_of_coins{false};
# 4978 : 4 : try {
# 4979 : 4 : coins_file >> outpoint;
# 4980 : 4 : } catch (const std::ios_base::failure&) {
# 4981 : : // We expect an exception since we should be out of coins.
# 4982 : 3 : out_of_coins = true;
# 4983 : 3 : }
# 4984 [ + + ]: 4 : if (!out_of_coins) {
# 4985 : 1 : LogPrintf("[snapshot] bad snapshot - coins left over after deserializing %d coins\n",
# 4986 : 1 : coins_count);
# 4987 : 1 : return false;
# 4988 : 1 : }
# 4989 : :
# 4990 : 3 : LogPrintf("[snapshot] loaded %d (%.2f MB) coins from snapshot %s\n",
# 4991 : 3 : coins_count,
# 4992 : 3 : coins_cache.DynamicMemoryUsage() / (1000 * 1000),
# 4993 : 3 : base_blockhash.ToString());
# 4994 : :
# 4995 : : // No need to acquire cs_main since this chainstate isn't being used yet.
# 4996 : 3 : FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/true);
# 4997 : :
# 4998 : 3 : assert(coins_cache.GetBestBlock() == base_blockhash);
# 4999 : :
# 5000 : 329 : auto breakpoint_fnc = [] { /* TODO insert breakpoint here? */ };
# 5001 : :
# 5002 : : // As above, okay to immediately release cs_main here since no other context knows
# 5003 : : // about the snapshot_chainstate.
# 5004 : 3 : CCoinsViewDB* snapshot_coinsdb = WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB());
# 5005 : :
# 5006 : 3 : const std::optional<CCoinsStats> maybe_stats = ComputeUTXOStats(CoinStatsHashType::HASH_SERIALIZED, snapshot_coinsdb, m_blockman, breakpoint_fnc);
# 5007 [ - + ]: 3 : if (!maybe_stats.has_value()) {
# 5008 : 0 : LogPrintf("[snapshot] failed to generate coins stats\n");
# 5009 : 0 : return false;
# 5010 : 0 : }
# 5011 : :
# 5012 : : // Assert that the deserialized chainstate contents match the expected assumeutxo value.
# 5013 [ + + ]: 3 : if (AssumeutxoHash{maybe_stats->hashSerialized} != au_data.hash_serialized) {
# 5014 : 1 : LogPrintf("[snapshot] bad snapshot content hash: expected %s, got %s\n",
# 5015 : 1 : au_data.hash_serialized.ToString(), maybe_stats->hashSerialized.ToString());
# 5016 : 1 : return false;
# 5017 : 1 : }
# 5018 : :
# 5019 : 2 : snapshot_chainstate.m_chain.SetTip(*snapshot_start_block);
# 5020 : :
# 5021 : : // The remainder of this function requires modifying data protected by cs_main.
# 5022 : 2 : LOCK(::cs_main);
# 5023 : :
# 5024 : : // Fake various pieces of CBlockIndex state:
# 5025 : 2 : CBlockIndex* index = nullptr;
# 5026 : :
# 5027 : : // Don't make any modifications to the genesis block.
# 5028 : : // This is especially important because we don't want to erroneously
# 5029 : : // apply BLOCK_ASSUMED_VALID to genesis, which would happen if we didn't skip
# 5030 : : // it here (since it apparently isn't BLOCK_VALID_SCRIPTS).
# 5031 : 2 : constexpr int AFTER_GENESIS_START{1};
# 5032 : :
# 5033 [ + + ]: 222 : for (int i = AFTER_GENESIS_START; i <= snapshot_chainstate.m_chain.Height(); ++i) {
# 5034 : 220 : index = snapshot_chainstate.m_chain[i];
# 5035 : :
# 5036 : : // Fake nTx so that LoadBlockIndex() loads assumed-valid CBlockIndex
# 5037 : : // entries (among other things)
# 5038 [ - + ]: 220 : if (!index->nTx) {
# 5039 : 0 : index->nTx = 1;
# 5040 : 0 : }
# 5041 : : // Fake nChainTx so that GuessVerificationProgress reports accurately
# 5042 : 220 : index->nChainTx = index->pprev->nChainTx + index->nTx;
# 5043 : :
# 5044 : : // Mark unvalidated block index entries beneath the snapshot base block as assumed-valid.
# 5045 [ - + ]: 220 : if (!index->IsValid(BLOCK_VALID_SCRIPTS)) {
# 5046 : : // This flag will be removed once the block is fully validated by a
# 5047 : : // background chainstate.
# 5048 : 0 : index->nStatus |= BLOCK_ASSUMED_VALID;
# 5049 : 0 : }
# 5050 : :
# 5051 : : // Fake BLOCK_OPT_WITNESS so that CChainState::NeedsRedownload()
# 5052 : : // won't ask to rewind the entire assumed-valid chain on startup.
# 5053 [ + - ]: 220 : if (DeploymentActiveAt(*index, *this, Consensus::DEPLOYMENT_SEGWIT)) {
# 5054 : 220 : index->nStatus |= BLOCK_OPT_WITNESS;
# 5055 : 220 : }
# 5056 : :
# 5057 : 220 : m_blockman.m_dirty_blockindex.insert(index);
# 5058 : : // Changes to the block index will be flushed to disk after this call
# 5059 : : // returns in `ActivateSnapshot()`, when `MaybeRebalanceCaches()` is
# 5060 : : // called, since we've added a snapshot chainstate and therefore will
# 5061 : : // have to downsize the IBD chainstate, which will result in a call to
# 5062 : : // `FlushStateToDisk(ALWAYS)`.
# 5063 : 220 : }
# 5064 : :
# 5065 : 2 : assert(index);
# 5066 : 0 : index->nChainTx = au_data.nChainTx;
# 5067 : 2 : snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block);
# 5068 : :
# 5069 : 2 : LogPrintf("[snapshot] validated snapshot (%.2f MB)\n",
# 5070 : 2 : coins_cache.DynamicMemoryUsage() / (1000 * 1000));
# 5071 : 2 : return true;
# 5072 : 3 : }
# 5073 : :
# 5074 : : CChainState& ChainstateManager::ActiveChainstate() const
# 5075 : 7313730 : {
# 5076 : 7313730 : LOCK(::cs_main);
# 5077 : 7313730 : assert(m_active_chainstate);
# 5078 : 0 : return *m_active_chainstate;
# 5079 : 7313730 : }
# 5080 : :
# 5081 : : bool ChainstateManager::IsSnapshotActive() const
# 5082 : 90 : {
# 5083 : 90 : LOCK(::cs_main);
# 5084 [ + + ][ + - ]: 90 : return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get();
# 5085 : 90 : }
# 5086 : :
# 5087 : : void ChainstateManager::MaybeRebalanceCaches()
# 5088 : 10 : {
# 5089 : 10 : AssertLockHeld(::cs_main);
# 5090 [ + - ][ + + ]: 10 : if (m_ibd_chainstate && !m_snapshot_chainstate) {
# 5091 : 7 : LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
# 5092 : : // Allocate everything to the IBD chainstate.
# 5093 : 7 : m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
# 5094 : 7 : }
# 5095 [ + - ][ - + ]: 3 : else if (m_snapshot_chainstate && !m_ibd_chainstate) {
# 5096 : 0 : LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n");
# 5097 : : // Allocate everything to the snapshot chainstate.
# 5098 : 0 : m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
# 5099 : 0 : }
# 5100 [ + - ][ + - ]: 3 : else if (m_ibd_chainstate && m_snapshot_chainstate) {
# 5101 : : // If both chainstates exist, determine who needs more cache based on IBD status.
# 5102 : : //
# 5103 : : // Note: shrink caches first so that we don't inadvertently overwhelm available memory.
# 5104 [ + + ]: 3 : if (m_snapshot_chainstate->IsInitialBlockDownload()) {
# 5105 : 1 : m_ibd_chainstate->ResizeCoinsCaches(
# 5106 : 1 : m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
# 5107 : 1 : m_snapshot_chainstate->ResizeCoinsCaches(
# 5108 : 1 : m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
# 5109 : 2 : } else {
# 5110 : 2 : m_snapshot_chainstate->ResizeCoinsCaches(
# 5111 : 2 : m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
# 5112 : 2 : m_ibd_chainstate->ResizeCoinsCaches(
# 5113 : 2 : m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
# 5114 : 2 : }
# 5115 : 3 : }
# 5116 : 10 : }
# 5117 : :
# 5118 : : ChainstateManager::~ChainstateManager()
# 5119 : 968 : {
# 5120 : 968 : LOCK(::cs_main);
# 5121 : :
# 5122 : 968 : m_versionbitscache.Clear();
# 5123 : :
# 5124 : : // TODO: The warning cache should probably become non-global
# 5125 [ + + ]: 28072 : for (auto& i : warningcache) {
# 5126 : 28072 : i.clear();
# 5127 : 28072 : }
# 5128 : 968 : }
|