Branch data Line data Source code
# 1 : : // Copyright (c) 2009-2010 Satoshi Nakamoto
# 2 : : // Copyright (c) 2009-2021 The Bitcoin Core developers
# 3 : : // Distributed under the MIT software license, see the accompanying
# 4 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
# 5 : :
# 6 : : #include <validation.h>
# 7 : :
# 8 : : #include <arith_uint256.h>
# 9 : : #include <chain.h>
# 10 : : #include <chainparams.h>
# 11 : : #include <checkqueue.h>
# 12 : : #include <consensus/amount.h>
# 13 : : #include <consensus/consensus.h>
# 14 : : #include <consensus/merkle.h>
# 15 : : #include <consensus/tx_check.h>
# 16 : : #include <consensus/tx_verify.h>
# 17 : : #include <consensus/validation.h>
# 18 : : #include <cuckoocache.h>
# 19 : : #include <deploymentstatus.h>
# 20 : : #include <flatfile.h>
# 21 : : #include <hash.h>
# 22 : : #include <index/blockfilterindex.h>
# 23 : : #include <logging.h>
# 24 : : #include <logging/timer.h>
# 25 : : #include <node/blockstorage.h>
# 26 : : #include <node/coinstats.h>
# 27 : : #include <node/ui_interface.h>
# 28 : : #include <node/utxo_snapshot.h>
# 29 : : #include <policy/policy.h>
# 30 : : #include <policy/rbf.h>
# 31 : : #include <policy/settings.h>
# 32 : : #include <pow.h>
# 33 : : #include <primitives/block.h>
# 34 : : #include <primitives/transaction.h>
# 35 : : #include <random.h>
# 36 : : #include <reverse_iterator.h>
# 37 : : #include <script/script.h>
# 38 : : #include <script/sigcache.h>
# 39 : : #include <shutdown.h>
# 40 : : #include <signet.h>
# 41 : : #include <timedata.h>
# 42 : : #include <tinyformat.h>
# 43 : : #include <txdb.h>
# 44 : : #include <txmempool.h>
# 45 : : #include <uint256.h>
# 46 : : #include <undo.h>
# 47 : : #include <util/check.h> // For NDEBUG compile time check
# 48 : : #include <util/hasher.h>
# 49 : : #include <util/moneystr.h>
# 50 : : #include <util/rbf.h>
# 51 : : #include <util/strencodings.h>
# 52 : : #include <util/system.h>
# 53 : : #include <util/trace.h>
# 54 : : #include <util/translation.h>
# 55 : : #include <validationinterface.h>
# 56 : : #include <warnings.h>
# 57 : :
# 58 : : #include <algorithm>
# 59 : : #include <numeric>
# 60 : : #include <optional>
# 61 : : #include <string>
# 62 : :
# 63 : : #include <boost/algorithm/string/replace.hpp>
# 64 : :
# 65 : : using node::BLOCKFILE_CHUNK_SIZE;
# 66 : : using node::BlockManager;
# 67 : : using node::BlockMap;
# 68 : : using node::CBlockIndexHeightOnlyComparator;
# 69 : : using node::CBlockIndexWorkComparator;
# 70 : : using node::CCoinsStats;
# 71 : : using node::CoinStatsHashType;
# 72 : : using node::fHavePruned;
# 73 : : using node::fImporting;
# 74 : : using node::fPruneMode;
# 75 : : using node::fReindex;
# 76 : : using node::GetUTXOStats;
# 77 : : using node::nPruneTarget;
# 78 : : using node::OpenBlockFile;
# 79 : : using node::ReadBlockFromDisk;
# 80 : : using node::SnapshotMetadata;
# 81 : : using node::UNDOFILE_CHUNK_SIZE;
# 82 : : using node::UndoReadFromDisk;
# 83 : : using node::UnlinkPrunedFiles;
# 84 : :
# 85 : : #define MICRO 0.000001
# 86 : : #define MILLI 0.001
# 87 : :
# 88 : : /**
# 89 : : * An extra transaction can be added to a package, as long as it only has one
# 90 : : * ancestor and is no larger than this. Not really any reason to make this
# 91 : : * configurable as it doesn't materially change DoS parameters.
# 92 : : */
# 93 : : static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
# 94 : : /** Maximum kilobytes for transactions to store for processing during reorg */
# 95 : : static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
# 96 : : /** Time to wait between writing blocks/block index to disk. */
# 97 : : static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
# 98 : : /** Time to wait between flushing chainstate to disk. */
# 99 : : static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
# 100 : : /** Maximum age of our tip for us to be considered current for fee estimation */
# 101 : : static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
# 102 : : const std::vector<std::string> CHECKLEVEL_DOC {
# 103 : : "level 0 reads the blocks from disk",
# 104 : : "level 1 verifies block validity",
# 105 : : "level 2 verifies undo data",
# 106 : : "level 3 checks disconnection of tip blocks",
# 107 : : "level 4 tries to reconnect the blocks",
# 108 : : "each level includes the checks of the previous levels",
# 109 : : };
# 110 : :
# 111 : : /**
# 112 : : * Mutex to guard access to validation specific variables, such as reading
# 113 : : * or changing the chainstate.
# 114 : : *
# 115 : : * This may also need to be locked when updating the transaction pool, e.g. on
# 116 : : * AcceptToMemoryPool. See CTxMemPool::cs comment for details.
# 117 : : *
# 118 : : * The transaction pool has a separate lock to allow reading from it and the
# 119 : : * chainstate at the same time.
# 120 : : */
# 121 : : RecursiveMutex cs_main;
# 122 : :
# 123 : : CBlockIndex *pindexBestHeader = nullptr;
# 124 : : Mutex g_best_block_mutex;
# 125 : : std::condition_variable g_best_block_cv;
# 126 : : uint256 g_best_block;
# 127 : : bool g_parallel_script_checks{false};
# 128 : : bool fRequireStandard = true;
# 129 : : bool fCheckBlockIndex = false;
# 130 : : bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
# 131 : : int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
# 132 : :
# 133 : : uint256 hashAssumeValid;
# 134 : : arith_uint256 nMinimumChainWork;
# 135 : :
# 136 : : CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
# 137 : :
# 138 : : const CBlockIndex* CChainState::FindForkInGlobalIndex(const CBlockLocator& locator) const
# 139 : 1727 : {
# 140 : 1727 : AssertLockHeld(cs_main);
# 141 : :
# 142 : : // Find the latest block common to locator and chain - we expect that
# 143 : : // locator.vHave is sorted descending by height.
# 144 [ + + ]: 2050 : for (const uint256& hash : locator.vHave) {
# 145 : 2050 : const CBlockIndex* pindex{m_blockman.LookupBlockIndex(hash)};
# 146 [ + + ]: 2050 : if (pindex) {
# 147 [ + + ]: 1745 : if (m_chain.Contains(pindex)) {
# 148 : 1716 : return pindex;
# 149 : 1716 : }
# 150 [ + + ]: 29 : if (pindex->GetAncestor(m_chain.Height()) == m_chain.Tip()) {
# 151 : 5 : return m_chain.Tip();
# 152 : 5 : }
# 153 : 29 : }
# 154 : 2050 : }
# 155 : 6 : return m_chain.Genesis();
# 156 : 1727 : }
# 157 : :
# 158 : : bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
# 159 : : const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
# 160 : : bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
# 161 : : std::vector<CScriptCheck>* pvChecks = nullptr)
# 162 : : EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 163 : :
# 164 : : bool CheckFinalTxAtTip(const CBlockIndex* active_chain_tip, const CTransaction& tx)
# 165 : 37408 : {
# 166 : 37408 : AssertLockHeld(cs_main);
# 167 : 37408 : assert(active_chain_tip); // TODO: Make active_chain_tip a reference
# 168 : :
# 169 : : // CheckFinalTxAtTip() uses active_chain_tip.Height()+1 to evaluate
# 170 : : // nLockTime because when IsFinalTx() is called within
# 171 : : // AcceptBlock(), the height of the block *being*
# 172 : : // evaluated is what is used. Thus if we want to know if a
# 173 : : // transaction can be part of the *next* block, we need to call
# 174 : : // IsFinalTx() with one more than active_chain_tip.Height().
# 175 : 0 : const int nBlockHeight = active_chain_tip->nHeight + 1;
# 176 : :
# 177 : : // BIP113 requires that time-locked transactions have nLockTime set to
# 178 : : // less than the median time of the previous block they're contained in.
# 179 : : // When the next block is created its previous block will be the current
# 180 : : // chain tip, so we use that to calculate the median time passed to
# 181 : : // IsFinalTx().
# 182 : 37408 : const int64_t nBlockTime{active_chain_tip->GetMedianTimePast()};
# 183 : :
# 184 : 37408 : return IsFinalTx(tx, nBlockHeight, nBlockTime);
# 185 : 37408 : }
# 186 : :
# 187 : : bool CheckSequenceLocksAtTip(CBlockIndex* tip,
# 188 : : const CCoinsView& coins_view,
# 189 : : const CTransaction& tx,
# 190 : : LockPoints* lp,
# 191 : : bool useExistingLockPoints)
# 192 : 33717 : {
# 193 : 33717 : assert(tip != nullptr);
# 194 : :
# 195 : 0 : CBlockIndex index;
# 196 : 33717 : index.pprev = tip;
# 197 : : // CheckSequenceLocksAtTip() uses active_chainstate.m_chain.Height()+1 to evaluate
# 198 : : // height based locks because when SequenceLocks() is called within
# 199 : : // ConnectBlock(), the height of the block *being*
# 200 : : // evaluated is what is used.
# 201 : : // Thus if we want to know if a transaction can be part of the
# 202 : : // *next* block, we need to use one more than active_chainstate.m_chain.Height()
# 203 : 33717 : index.nHeight = tip->nHeight + 1;
# 204 : :
# 205 : 33717 : std::pair<int, int64_t> lockPair;
# 206 [ + + ]: 33717 : if (useExistingLockPoints) {
# 207 : 803 : assert(lp);
# 208 : 0 : lockPair.first = lp->height;
# 209 : 803 : lockPair.second = lp->time;
# 210 : 803 : }
# 211 : 32914 : else {
# 212 : 32914 : std::vector<int> prevheights;
# 213 : 32914 : prevheights.resize(tx.vin.size());
# 214 [ + + ]: 97143 : for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
# 215 : 64229 : const CTxIn& txin = tx.vin[txinIndex];
# 216 : 64229 : Coin coin;
# 217 [ - + ]: 64229 : if (!coins_view.GetCoin(txin.prevout, coin)) {
# 218 : 0 : return error("%s: Missing input", __func__);
# 219 : 0 : }
# 220 [ + + ]: 64229 : if (coin.nHeight == MEMPOOL_HEIGHT) {
# 221 : : // Assume all mempool transaction confirm in the next block
# 222 : 7742 : prevheights[txinIndex] = tip->nHeight + 1;
# 223 : 56487 : } else {
# 224 : 56487 : prevheights[txinIndex] = coin.nHeight;
# 225 : 56487 : }
# 226 : 64229 : }
# 227 : 32914 : lockPair = CalculateSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, prevheights, index);
# 228 [ + + ]: 32914 : if (lp) {
# 229 : 32898 : lp->height = lockPair.first;
# 230 : 32898 : lp->time = lockPair.second;
# 231 : : // Also store the hash of the block with the highest height of
# 232 : : // all the blocks which have sequence locked prevouts.
# 233 : : // This hash needs to still be on the chain
# 234 : : // for these LockPoint calculations to be valid
# 235 : : // Note: It is impossible to correctly calculate a maxInputBlock
# 236 : : // if any of the sequence locked inputs depend on unconfirmed txs,
# 237 : : // except in the special case where the relative lock time/height
# 238 : : // is 0, which is equivalent to no sequence lock. Since we assume
# 239 : : // input height of tip+1 for mempool txs and test the resulting
# 240 : : // lockPair from CalculateSequenceLocks against tip+1. We know
# 241 : : // EvaluateSequenceLocks will fail if there was a non-zero sequence
# 242 : : // lock on a mempool input, so we can use the return value of
# 243 : : // CheckSequenceLocksAtTip to indicate the LockPoints validity
# 244 : 32898 : int maxInputHeight = 0;
# 245 [ + + ]: 64213 : for (const int height : prevheights) {
# 246 : : // Can ignore mempool inputs since we'll fail if they had non-zero locks
# 247 [ + + ]: 64213 : if (height != tip->nHeight+1) {
# 248 : 62452 : maxInputHeight = std::max(maxInputHeight, height);
# 249 : 62452 : }
# 250 : 64213 : }
# 251 : 32898 : lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
# 252 : 32898 : }
# 253 : 32914 : }
# 254 : 33717 : return EvaluateSequenceLocks(index, lockPair);
# 255 : 33717 : }
# 256 : :
# 257 : : // Returns the script flags which should be checked for a given block
# 258 : : static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const Consensus::Params& chainparams);
# 259 : :
# 260 : : static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, size_t limit, std::chrono::seconds age)
# 261 : : EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs)
# 262 : 20744 : {
# 263 : 20744 : AssertLockHeld(::cs_main);
# 264 : 20744 : AssertLockHeld(pool.cs);
# 265 : 20744 : int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
# 266 [ + + ]: 20744 : if (expired != 0) {
# 267 [ + - ]: 2 : LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
# 268 : 2 : }
# 269 : :
# 270 : 20744 : std::vector<COutPoint> vNoSpendsRemaining;
# 271 : 20744 : pool.TrimToSize(limit, &vNoSpendsRemaining);
# 272 [ + + ]: 20744 : for (const COutPoint& removed : vNoSpendsRemaining)
# 273 : 11 : coins_cache.Uncache(removed);
# 274 : 20744 : }
# 275 : :
# 276 : : static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
# 277 : 19702 : {
# 278 : 19702 : AssertLockHeld(cs_main);
# 279 [ + + ]: 19702 : if (active_chainstate.IsInitialBlockDownload())
# 280 : 69 : return false;
# 281 [ + + ]: 19633 : if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
# 282 : 114 : return false;
# 283 [ + + ]: 19519 : if (active_chainstate.m_chain.Height() < pindexBestHeader->nHeight - 1)
# 284 : 1 : return false;
# 285 : 19518 : return true;
# 286 : 19519 : }
# 287 : :
# 288 : : void CChainState::MaybeUpdateMempoolForReorg(
# 289 : : DisconnectedBlockTransactions& disconnectpool,
# 290 : : bool fAddToMempool)
# 291 : 1036 : {
# 292 [ - + ]: 1036 : if (!m_mempool) return;
# 293 : :
# 294 : 1036 : AssertLockHeld(cs_main);
# 295 : 1036 : AssertLockHeld(m_mempool->cs);
# 296 : 1036 : std::vector<uint256> vHashUpdate;
# 297 : : // disconnectpool's insertion_order index sorts the entries from
# 298 : : // oldest to newest, but the oldest entry will be the last tx from the
# 299 : : // latest mined block that was disconnected.
# 300 : : // Iterate disconnectpool in reverse, so that we add transactions
# 301 : : // back to the mempool starting with the earliest transaction that had
# 302 : : // been previously seen in a block.
# 303 : 1036 : auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
# 304 [ + + ]: 7701 : while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
# 305 : : // ignore validation errors in resurrected transactions
# 306 [ + + ][ + + ]: 6665 : if (!fAddToMempool || (*it)->IsCoinBase() ||
# [ + + ]
# 307 [ + + ]: 6665 : AcceptToMemoryPool(*this, *it, GetTime(),
# 308 : 3988 : /*bypass_limits=*/true, /*test_accept=*/false).m_result_type !=
# 309 : 6371 : MempoolAcceptResult::ResultType::VALID) {
# 310 : : // If the transaction doesn't make it in to the mempool, remove any
# 311 : : // transactions that depend on it (which would now be orphans).
# 312 : 6371 : m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG);
# 313 [ + - ]: 6371 : } else if (m_mempool->exists(GenTxid::Txid((*it)->GetHash()))) {
# 314 : 294 : vHashUpdate.push_back((*it)->GetHash());
# 315 : 294 : }
# 316 : 6665 : ++it;
# 317 : 6665 : }
# 318 : 1036 : disconnectpool.queuedTx.clear();
# 319 : : // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
# 320 : : // no in-mempool children, which is generally not true when adding
# 321 : : // previously-confirmed transactions back to the mempool.
# 322 : : // UpdateTransactionsFromBlock finds descendants of any transactions in
# 323 : : // the disconnectpool that were added back and cleans up the mempool state.
# 324 : 1036 : const uint64_t ancestor_count_limit = gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
# 325 : 1036 : const uint64_t ancestor_size_limit = gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000;
# 326 : 1036 : m_mempool->UpdateTransactionsFromBlock(vHashUpdate, ancestor_size_limit, ancestor_count_limit);
# 327 : :
# 328 : : // Predicate to use for filtering transactions in removeForReorg.
# 329 : : // Checks whether the transaction is still final and, if it spends a coinbase output, mature.
# 330 : : // Also updates valid entries' cached LockPoints if needed.
# 331 : : // If false, the tx is still valid and its lockpoints are updated.
# 332 : : // If true, the tx would be invalid in the next block; remove this entry and all of its descendants.
# 333 : 1036 : const auto filter_final_and_mature = [this](CTxMemPool::txiter it)
# 334 : 1036 : EXCLUSIVE_LOCKS_REQUIRED(m_mempool->cs, ::cs_main) {
# 335 : 814 : AssertLockHeld(m_mempool->cs);
# 336 : 814 : AssertLockHeld(::cs_main);
# 337 : 814 : const CTransaction& tx = it->GetTx();
# 338 : :
# 339 : : // The transaction must be final.
# 340 [ + + ]: 814 : if (!CheckFinalTxAtTip(m_chain.Tip(), tx)) return true;
# 341 : 812 : LockPoints lp = it->GetLockPoints();
# 342 : 812 : const bool validLP{TestLockPointValidity(m_chain, lp)};
# 343 : 812 : CCoinsViewMemPool view_mempool(&CoinsTip(), *m_mempool);
# 344 : : // CheckSequenceLocksAtTip checks if the transaction will be final in the next block to be
# 345 : : // created on top of the new chain. We use useExistingLockPoints=false so that, instead of
# 346 : : // using the information in lp (which might now refer to a block that no longer exists in
# 347 : : // the chain), it will update lp to contain LockPoints relevant to the new chain.
# 348 [ + + ]: 812 : if (!CheckSequenceLocksAtTip(m_chain.Tip(), view_mempool, tx, &lp, validLP)) {
# 349 : : // If CheckSequenceLocksAtTip fails, remove the tx and don't depend on the LockPoints.
# 350 : 7 : return true;
# 351 [ + + ]: 805 : } else if (!validLP) {
# 352 : : // If CheckSequenceLocksAtTip succeeded, it also updated the LockPoints.
# 353 : : // Now update the mempool entry lockpoints as well.
# 354 : 7 : m_mempool->mapTx.modify(it, [&lp](CTxMemPoolEntry& e) { e.UpdateLockPoints(lp); });
# 355 : 7 : }
# 356 : :
# 357 : : // If the transaction spends any coinbase outputs, it must be mature.
# 358 [ + + ]: 805 : if (it->GetSpendsCoinbase()) {
# 359 [ + + ]: 201 : for (const CTxIn& txin : tx.vin) {
# 360 : 201 : auto it2 = m_mempool->mapTx.find(txin.prevout.hash);
# 361 [ - + ]: 201 : if (it2 != m_mempool->mapTx.end())
# 362 : 0 : continue;
# 363 : 201 : const Coin& coin{CoinsTip().AccessCoin(txin.prevout)};
# 364 : 201 : assert(!coin.IsSpent());
# 365 : 0 : const auto mempool_spend_height{m_chain.Tip()->nHeight + 1};
# 366 [ + + ][ + + ]: 201 : if (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY) {
# 367 : 7 : return true;
# 368 : 7 : }
# 369 : 201 : }
# 370 : 197 : }
# 371 : : // Transaction is still valid and cached LockPoints are updated.
# 372 : 798 : return false;
# 373 : 805 : };
# 374 : :
# 375 : : // We also need to remove any now-immature transactions
# 376 : 1036 : m_mempool->removeForReorg(m_chain, filter_final_and_mature);
# 377 : : // Re-limit mempool size, in case we added any transactions
# 378 : 1036 : LimitMempoolSize(
# 379 : 1036 : *m_mempool,
# 380 : 1036 : this->CoinsTip(),
# 381 : 1036 : gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
# 382 : 1036 : std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
# 383 : 1036 : }
# 384 : :
# 385 : : /**
# 386 : : * Checks to avoid mempool polluting consensus critical paths since cached
# 387 : : * signature and script validity results will be reused if we validate this
# 388 : : * transaction again during block validation.
# 389 : : * */
# 390 : : static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state,
# 391 : : const CCoinsViewCache& view, const CTxMemPool& pool,
# 392 : : unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip)
# 393 : : EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs)
# 394 : 25176 : {
# 395 : 25176 : AssertLockHeld(cs_main);
# 396 : 25176 : AssertLockHeld(pool.cs);
# 397 : :
# 398 : 25176 : assert(!tx.IsCoinBase());
# 399 [ + + ]: 50985 : for (const CTxIn& txin : tx.vin) {
# 400 : 50985 : const Coin& coin = view.AccessCoin(txin.prevout);
# 401 : :
# 402 : : // This coin was checked in PreChecks and MemPoolAccept
# 403 : : // has been holding cs_main since then.
# 404 : 50985 : Assume(!coin.IsSpent());
# 405 [ - + ]: 50985 : if (coin.IsSpent()) return false;
# 406 : :
# 407 : : // If the Coin is available, there are 2 possibilities:
# 408 : : // it is available in our current ChainstateActive UTXO set,
# 409 : : // or it's a UTXO provided by a transaction in our mempool.
# 410 : : // Ensure the scriptPubKeys in Coins from CoinsView are correct.
# 411 : 50985 : const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
# 412 [ + + ]: 50985 : if (txFrom) {
# 413 : 6878 : assert(txFrom->GetHash() == txin.prevout.hash);
# 414 : 0 : assert(txFrom->vout.size() > txin.prevout.n);
# 415 : 0 : assert(txFrom->vout[txin.prevout.n] == coin.out);
# 416 : 44107 : } else {
# 417 : 44107 : const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
# 418 : 44107 : assert(!coinFromUTXOSet.IsSpent());
# 419 : 0 : assert(coinFromUTXOSet.out == coin.out);
# 420 : 44107 : }
# 421 : 50985 : }
# 422 : :
# 423 : : // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
# 424 : 25176 : return CheckInputScripts(tx, state, view, flags, /* cacheSigStore= */ true, /* cacheFullScriptStore= */ true, txdata);
# 425 : 25176 : }
# 426 : :
# 427 : : namespace {
# 428 : :
# 429 : : class MemPoolAccept
# 430 : : {
# 431 : : public:
# 432 : : explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate),
# 433 : : m_limit_ancestors(gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
# 434 : : m_limit_ancestor_size(gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
# 435 : : m_limit_descendants(gArgs.GetIntArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
# 436 : 37624 : m_limit_descendant_size(gArgs.GetIntArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {
# 437 : 37624 : }
# 438 : :
# 439 : : // We put the arguments we're handed into a struct, so we can pass them
# 440 : : // around easier.
# 441 : : struct ATMPArgs {
# 442 : : const CChainParams& m_chainparams;
# 443 : : const int64_t m_accept_time;
# 444 : : const bool m_bypass_limits;
# 445 : : /*
# 446 : : * Return any outpoints which were not previously present in the coins
# 447 : : * cache, but were added as a result of validating the tx for mempool
# 448 : : * acceptance. This allows the caller to optionally remove the cache
# 449 : : * additions if the associated transaction ends up being rejected by
# 450 : : * the mempool.
# 451 : : */
# 452 : : std::vector<COutPoint>& m_coins_to_uncache;
# 453 : : const bool m_test_accept;
# 454 : : /** Whether we allow transactions to replace mempool transactions by BIP125 rules. If false,
# 455 : : * any transaction spending the same inputs as a transaction in the mempool is considered
# 456 : : * a conflict. */
# 457 : : const bool m_allow_bip125_replacement;
# 458 : : /** When true, the mempool will not be trimmed when individual transactions are submitted in
# 459 : : * Finalize(). Instead, limits should be enforced at the end to ensure the package is not
# 460 : : * partially submitted.
# 461 : : */
# 462 : : const bool m_package_submission;
# 463 : : /** When true, use package feerates instead of individual transaction feerates for fee-based
# 464 : : * policies such as mempool min fee and min relay fee.
# 465 : : */
# 466 : : const bool m_package_feerates;
# 467 : :
# 468 : : /** Parameters for single transaction mempool validation. */
# 469 : : static ATMPArgs SingleAccept(const CChainParams& chainparams, int64_t accept_time,
# 470 : : bool bypass_limits, std::vector<COutPoint>& coins_to_uncache,
# 471 : 37528 : bool test_accept) {
# 472 : 37528 : return ATMPArgs{/* m_chainparams */ chainparams,
# 473 : 37528 : /* m_accept_time */ accept_time,
# 474 : 37528 : /* m_bypass_limits */ bypass_limits,
# 475 : 37528 : /* m_coins_to_uncache */ coins_to_uncache,
# 476 : 37528 : /* m_test_accept */ test_accept,
# 477 : 37528 : /* m_allow_bip125_replacement */ true,
# 478 : 37528 : /* m_package_submission */ false,
# 479 : 37528 : /* m_package_feerates */ false,
# 480 : 37528 : };
# 481 : 37528 : }
# 482 : :
# 483 : : /** Parameters for test package mempool validation through testmempoolaccept. */
# 484 : : static ATMPArgs PackageTestAccept(const CChainParams& chainparams, int64_t accept_time,
# 485 : 66 : std::vector<COutPoint>& coins_to_uncache) {
# 486 : 66 : return ATMPArgs{/* m_chainparams */ chainparams,
# 487 : 66 : /* m_accept_time */ accept_time,
# 488 : 66 : /* m_bypass_limits */ false,
# 489 : 66 : /* m_coins_to_uncache */ coins_to_uncache,
# 490 : 66 : /* m_test_accept */ true,
# 491 : 66 : /* m_allow_bip125_replacement */ false,
# 492 : 66 : /* m_package_submission */ false, // not submitting to mempool
# 493 : 66 : /* m_package_feerates */ false,
# 494 : 66 : };
# 495 : 66 : }
# 496 : :
# 497 : : /** Parameters for child-with-unconfirmed-parents package validation. */
# 498 : : static ATMPArgs PackageChildWithParents(const CChainParams& chainparams, int64_t accept_time,
# 499 : 30 : std::vector<COutPoint>& coins_to_uncache) {
# 500 : 30 : return ATMPArgs{/* m_chainparams */ chainparams,
# 501 : 30 : /* m_accept_time */ accept_time,
# 502 : 30 : /* m_bypass_limits */ false,
# 503 : 30 : /* m_coins_to_uncache */ coins_to_uncache,
# 504 : 30 : /* m_test_accept */ false,
# 505 : 30 : /* m_allow_bip125_replacement */ false,
# 506 : 30 : /* m_package_submission */ true,
# 507 : 30 : /* m_package_feerates */ true,
# 508 : 30 : };
# 509 : 30 : }
# 510 : :
# 511 : : /** Parameters for a single transaction within a package. */
# 512 : 24 : static ATMPArgs SingleInPackageAccept(const ATMPArgs& package_args) {
# 513 : 24 : return ATMPArgs{/* m_chainparams */ package_args.m_chainparams,
# 514 : 24 : /* m_accept_time */ package_args.m_accept_time,
# 515 : 24 : /* m_bypass_limits */ false,
# 516 : 24 : /* m_coins_to_uncache */ package_args.m_coins_to_uncache,
# 517 : 24 : /* m_test_accept */ package_args.m_test_accept,
# 518 : 24 : /* m_allow_bip125_replacement */ true,
# 519 : 24 : /* m_package_submission */ false,
# 520 : 24 : /* m_package_feerates */ false, // only 1 transaction
# 521 : 24 : };
# 522 : 24 : }
# 523 : :
# 524 : : private:
# 525 : : // Private ctor to avoid exposing details to clients and allowing the possibility of
# 526 : : // mixing up the order of the arguments. Use static functions above instead.
# 527 : : ATMPArgs(const CChainParams& chainparams,
# 528 : : int64_t accept_time,
# 529 : : bool bypass_limits,
# 530 : : std::vector<COutPoint>& coins_to_uncache,
# 531 : : bool test_accept,
# 532 : : bool allow_bip125_replacement,
# 533 : : bool package_submission,
# 534 : : bool package_feerates)
# 535 : : : m_chainparams{chainparams},
# 536 : : m_accept_time{accept_time},
# 537 : : m_bypass_limits{bypass_limits},
# 538 : : m_coins_to_uncache{coins_to_uncache},
# 539 : : m_test_accept{test_accept},
# 540 : : m_allow_bip125_replacement{allow_bip125_replacement},
# 541 : : m_package_submission{package_submission},
# 542 : : m_package_feerates{package_feerates}
# 543 : 37648 : {
# 544 : 37648 : }
# 545 : : };
# 546 : :
# 547 : : // Single transaction acceptance
# 548 : : MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 549 : :
# 550 : : /**
# 551 : : * Multiple transaction acceptance. Transactions may or may not be interdependent, but must not
# 552 : : * conflict with each other, and the transactions cannot already be in the mempool. Parents must
# 553 : : * come before children if any dependencies exist.
# 554 : : */
# 555 : : PackageMempoolAcceptResult AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 556 : :
# 557 : : /**
# 558 : : * Package (more specific than just multiple transactions) acceptance. Package must be a child
# 559 : : * with all of its unconfirmed parents, and topologically sorted.
# 560 : : */
# 561 : : PackageMempoolAcceptResult AcceptPackage(const Package& package, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
# 562 : :
# 563 : : private:
# 564 : : // All the intermediate state that gets passed between the various levels
# 565 : : // of checking a given transaction.
# 566 : : struct Workspace {
# 567 : 38146 : explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
# 568 : : /** Txids of mempool transactions that this transaction directly conflicts with. */
# 569 : : std::set<uint256> m_conflicts;
# 570 : : /** Iterators to mempool entries that this transaction directly conflicts with. */
# 571 : : CTxMemPool::setEntries m_iters_conflicting;
# 572 : : /** Iterators to all mempool entries that would be replaced by this transaction, including
# 573 : : * those it directly conflicts with and their descendants. */
# 574 : : CTxMemPool::setEntries m_all_conflicting;
# 575 : : /** All mempool ancestors of this transaction. */
# 576 : : CTxMemPool::setEntries m_ancestors;
# 577 : : /** Mempool entry constructed for this transaction. Constructed in PreChecks() but not
# 578 : : * inserted into the mempool until Finalize(). */
# 579 : : std::unique_ptr<CTxMemPoolEntry> m_entry;
# 580 : : /** Pointers to the transactions that have been removed from the mempool and replaced by
# 581 : : * this transaction, used to return to the MemPoolAccept caller. Only populated if
# 582 : : * validation is successful and the original transactions are removed. */
# 583 : : std::list<CTransactionRef> m_replaced_transactions;
# 584 : :
# 585 : : /** Virtual size of the transaction as used by the mempool, calculated using serialized size
# 586 : : * of the transaction and sigops. */
# 587 : : int64_t m_vsize;
# 588 : : /** Fees paid by this transaction: total input amounts subtracted by total output amounts. */
# 589 : : CAmount m_base_fees;
# 590 : : /** Base fees + any fee delta set by the user with prioritisetransaction. */
# 591 : : CAmount m_modified_fees;
# 592 : : /** Total modified fees of all transactions being replaced. */
# 593 : : CAmount m_conflicting_fees{0};
# 594 : : /** Total virtual size of all transactions being replaced. */
# 595 : : size_t m_conflicting_size{0};
# 596 : :
# 597 : : const CTransactionRef& m_ptx;
# 598 : : /** Txid. */
# 599 : : const uint256& m_hash;
# 600 : : TxValidationState m_state;
# 601 : : /** A temporary cache containing serialized transaction data for signature verification.
# 602 : : * Reused across PolicyScriptChecks and ConsensusScriptChecks. */
# 603 : : PrecomputedTransactionData m_precomputed_txdata;
# 604 : : };
# 605 : :
# 606 : : // Run the policy checks on a given transaction, excluding any script checks.
# 607 : : // Looks up inputs, calculates feerate, considers replacement, evaluates
# 608 : : // package limits, etc. As this function can be invoked for "free" by a peer,
# 609 : : // only tests that are fast should be done here (to avoid CPU DoS).
# 610 : : bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 611 : :
# 612 : : // Run checks for mempool replace-by-fee.
# 613 : : bool ReplacementChecks(Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 614 : :
# 615 : : // Enforce package mempool ancestor/descendant limits (distinct from individual
# 616 : : // ancestor/descendant limits done in PreChecks).
# 617 : : bool PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
# 618 : : PackageValidationState& package_state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 619 : :
# 620 : : // Run the script checks using our policy flags. As this can be slow, we should
# 621 : : // only invoke this on transactions that have otherwise passed policy checks.
# 622 : : bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 623 : :
# 624 : : // Re-run the script checks, using consensus flags, and try to cache the
# 625 : : // result in the scriptcache. This should be done after
# 626 : : // PolicyScriptChecks(). This requires that all inputs either be in our
# 627 : : // utxo set or in the mempool.
# 628 : : bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 629 : :
# 630 : : // Try to add the transaction to the mempool, removing any conflicts first.
# 631 : : // Returns true if the transaction is in the mempool after any size
# 632 : : // limiting is performed, false otherwise.
# 633 : : bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 634 : :
# 635 : : // Submit all transactions to the mempool and call ConsensusScriptChecks to add to the script
# 636 : : // cache - should only be called after successful validation of all transactions in the package.
# 637 : : // The package may end up partially-submitted after size limiting; returns true if all
# 638 : : // transactions are successfully added to the mempool, false otherwise.
# 639 : : bool SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state,
# 640 : : std::map<const uint256, const MempoolAcceptResult>& results)
# 641 : : EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
# 642 : :
# 643 : : // Compare a package's feerate against minimum allowed.
# 644 : : bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_pool.cs)
# 645 : 31829 : {
# 646 : 31829 : AssertLockHeld(::cs_main);
# 647 : 31829 : AssertLockHeld(m_pool.cs);
# 648 : 31829 : CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
# 649 [ + + ][ + + ]: 31829 : if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
# 650 : 11 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
# 651 : 11 : }
# 652 : :
# 653 [ + + ]: 31818 : if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
# 654 : 4067 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
# 655 : 4067 : }
# 656 : 27751 : return true;
# 657 : 31818 : }
# 658 : :
# 659 : : private:
# 660 : : CTxMemPool& m_pool;
# 661 : : CCoinsViewCache m_view;
# 662 : : CCoinsViewMemPool m_viewmempool;
# 663 : : CCoinsView m_dummy;
# 664 : :
# 665 : : CChainState& m_active_chainstate;
# 666 : :
# 667 : : // The package limits in effect at the time of invocation.
# 668 : : const size_t m_limit_ancestors;
# 669 : : const size_t m_limit_ancestor_size;
# 670 : : // These may be modified while evaluating a transaction (eg to account for
# 671 : : // in-mempool conflicts; see below).
# 672 : : size_t m_limit_descendants;
# 673 : : size_t m_limit_descendant_size;
# 674 : :
# 675 : : /** Whether the transaction(s) would replace any mempool transactions. If so, RBF rules apply. */
# 676 : : bool m_rbf{false};
# 677 : : };
# 678 : :
# 679 : : bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
# 680 : 38143 : {
# 681 : 38143 : AssertLockHeld(cs_main);
# 682 : 38143 : AssertLockHeld(m_pool.cs);
# 683 : 38143 : const CTransactionRef& ptx = ws.m_ptx;
# 684 : 38143 : const CTransaction& tx = *ws.m_ptx;
# 685 : 38143 : const uint256& hash = ws.m_hash;
# 686 : :
# 687 : : // Copy/alias what we need out of args
# 688 : 38143 : const int64_t nAcceptTime = args.m_accept_time;
# 689 : 38143 : const bool bypass_limits = args.m_bypass_limits;
# 690 : 38143 : std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
# 691 : :
# 692 : : // Alias what we need out of ws
# 693 : 38143 : TxValidationState& state = ws.m_state;
# 694 : 38143 : std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
# 695 : :
# 696 [ + + ]: 38143 : if (!CheckTransaction(tx, state)) {
# 697 : 14 : return false; // state filled in by CheckTransaction
# 698 : 14 : }
# 699 : :
# 700 : : // Coinbase is only valid in a block, not as a loose transaction
# 701 [ + + ]: 38129 : if (tx.IsCoinBase())
# 702 : 3 : return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
# 703 : :
# 704 : : // Rather not work on nonstandard transactions (unless -testnet/-regtest)
# 705 : 38126 : std::string reason;
# 706 [ + + ][ + + ]: 38126 : if (fRequireStandard && !IsStandardTx(tx, reason))
# 707 : 1536 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
# 708 : :
# 709 : : // Do not work on transactions that are too small.
# 710 : : // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
# 711 : : // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
# 712 : : // 64-byte transactions.
# 713 [ + + ]: 36590 : if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
# 714 : 6 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
# 715 : :
# 716 : : // Only accept nLockTime-using transactions that can be mined in the next
# 717 : : // block; we don't want our mempool filled up with transactions that can't
# 718 : : // be mined yet.
# 719 [ + + ]: 36584 : if (!CheckFinalTxAtTip(m_active_chainstate.m_chain.Tip(), tx)) {
# 720 : 69 : return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
# 721 : 69 : }
# 722 : :
# 723 [ + + ]: 36515 : if (m_pool.exists(GenTxid::Wtxid(tx.GetWitnessHash()))) {
# 724 : : // Exact transaction already exists in the mempool.
# 725 : 4 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
# 726 [ + + ]: 36511 : } else if (m_pool.exists(GenTxid::Txid(tx.GetHash()))) {
# 727 : : // Transaction with the same non-witness data but different witness (same txid, different
# 728 : : // wtxid) already exists in the mempool.
# 729 : 1 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-same-nonwitness-data-in-mempool");
# 730 : 1 : }
# 731 : :
# 732 : : // Check for conflicts with in-memory transactions
# 733 [ + + ]: 36510 : for (const CTxIn &txin : tx.vin)
# 734 : 71172 : {
# 735 : 71172 : const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
# 736 [ + + ]: 71172 : if (ptxConflicting) {
# 737 [ + + ]: 1884 : if (!args.m_allow_bip125_replacement) {
# 738 : : // Transaction conflicts with a mempool tx, but we're not allowing replacements.
# 739 : 1 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "bip125-replacement-disallowed");
# 740 : 1 : }
# 741 [ + + ]: 1883 : if (!ws.m_conflicts.count(ptxConflicting->GetHash()))
# 742 : 1875 : {
# 743 : : // Transactions that don't explicitly signal replaceability are
# 744 : : // *not* replaceable with the current logic, even if one of their
# 745 : : // unconfirmed ancestors signals replaceability. This diverges
# 746 : : // from BIP125's inherited signaling description (see CVE-2021-31876).
# 747 : : // Applications relying on first-seen mempool behavior should
# 748 : : // check all unconfirmed ancestors; otherwise an opt-in ancestor
# 749 : : // might be replaced, causing removal of this descendant.
# 750 [ + + ]: 1875 : if (!SignalsOptInRBF(*ptxConflicting)) {
# 751 : 10 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
# 752 : 10 : }
# 753 : :
# 754 : 1865 : ws.m_conflicts.insert(ptxConflicting->GetHash());
# 755 : 1865 : }
# 756 : 1883 : }
# 757 : 71172 : }
# 758 : :
# 759 : 36499 : LockPoints lp;
# 760 : 36499 : m_view.SetBackend(m_viewmempool);
# 761 : :
# 762 : 36499 : const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip();
# 763 : : // do all inputs exist?
# 764 [ + + ]: 67818 : for (const CTxIn& txin : tx.vin) {
# 765 [ + + ]: 67818 : if (!coins_cache.HaveCoinInCache(txin.prevout)) {
# 766 : 15099 : coins_to_uncache.push_back(txin.prevout);
# 767 : 15099 : }
# 768 : :
# 769 : : // Note: this call may add txin.prevout to the coins cache
# 770 : : // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
# 771 : : // later (via coins_to_uncache) if this tx turns out to be invalid.
# 772 [ + + ]: 67818 : if (!m_view.HaveCoin(txin.prevout)) {
# 773 : : // Are inputs missing because we already have the tx?
# 774 [ + + ]: 7231 : for (size_t out = 0; out < tx.vout.size(); out++) {
# 775 : : // Optimistically just do efficient check of cache for outputs
# 776 [ + + ]: 3630 : if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
# 777 : 9 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
# 778 : 9 : }
# 779 : 3630 : }
# 780 : : // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
# 781 : 3601 : return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
# 782 : 3610 : }
# 783 : 67818 : }
# 784 : :
# 785 : : // This is const, but calls into the back end CoinsViews. The CCoinsViewDB at the bottom of the
# 786 : : // hierarchy brings the best block into scope. See CCoinsViewDB::GetBestBlock().
# 787 : 32889 : m_view.GetBestBlock();
# 788 : :
# 789 : : // we have all inputs cached now, so switch back to dummy (to protect
# 790 : : // against bugs where we pull more inputs from disk that miss being added
# 791 : : // to coins_to_uncache)
# 792 : 32889 : m_view.SetBackend(m_dummy);
# 793 : :
# 794 : 32889 : assert(m_active_chainstate.m_blockman.LookupBlockIndex(m_view.GetBestBlock()) == m_active_chainstate.m_chain.Tip());
# 795 : :
# 796 : : // Only accept BIP68 sequence locked transactions that can be mined in the next
# 797 : : // block; we don't want our mempool filled up with transactions that can't
# 798 : : // be mined yet.
# 799 : : // Pass in m_view which has all of the relevant inputs cached. Note that, since m_view's
# 800 : : // backend was removed, it no longer pulls coins from the mempool.
# 801 [ + + ]: 32889 : if (!CheckSequenceLocksAtTip(m_active_chainstate.m_chain.Tip(), m_view, tx, &lp)) {
# 802 : 369 : return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
# 803 : 369 : }
# 804 : :
# 805 : : // The mempool holds txs for the next block, so pass height+1 to CheckTxInputs
# 806 [ + + ]: 32520 : if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_chain.Height() + 1, ws.m_base_fees)) {
# 807 : 4 : return false; // state filled in by CheckTxInputs
# 808 : 4 : }
# 809 : :
# 810 : : // Check for non-standard pay-to-script-hash in inputs
# 811 [ + + ][ + + ]: 32516 : if (fRequireStandard && !AreInputsStandard(tx, m_view)) {
# 812 : 199 : return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
# 813 : 199 : }
# 814 : :
# 815 : : // Check for non-standard witnesses.
# 816 [ + + ][ + + ]: 32317 : if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
# [ + + ]
# 817 : 142 : return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
# 818 : :
# 819 : 32175 : int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
# 820 : :
# 821 : : // ws.m_modified_fees includes any fee deltas from PrioritiseTransaction
# 822 : 32175 : ws.m_modified_fees = ws.m_base_fees;
# 823 : 32175 : m_pool.ApplyDelta(hash, ws.m_modified_fees);
# 824 : :
# 825 : : // Keep track of transactions that spend a coinbase, which we re-scan
# 826 : : // during reorgs to ensure COINBASE_MATURITY is still met.
# 827 : 32175 : bool fSpendsCoinbase = false;
# 828 [ + + ]: 57016 : for (const CTxIn &txin : tx.vin) {
# 829 : 57016 : const Coin &coin = m_view.AccessCoin(txin.prevout);
# 830 [ + + ]: 57016 : if (coin.IsCoinBase()) {
# 831 : 3793 : fSpendsCoinbase = true;
# 832 : 3793 : break;
# 833 : 3793 : }
# 834 : 57016 : }
# 835 : :
# 836 : 32175 : entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(),
# 837 : 32175 : fSpendsCoinbase, nSigOpsCost, lp));
# 838 : 32175 : ws.m_vsize = entry->GetTxSize();
# 839 : :
# 840 [ + + ]: 32175 : if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
# 841 : 4 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
# 842 : 4 : strprintf("%d", nSigOpsCost));
# 843 : :
# 844 : : // No individual transactions are allowed below minRelayTxFee and mempool min fee except from
# 845 : : // disconnected blocks and transactions in a package. Package transactions will be checked using
# 846 : : // package feerate later.
# 847 [ + + ][ + + ]: 32171 : if (!bypass_limits && !args.m_package_feerates && !CheckFeeRate(ws.m_vsize, ws.m_modified_fees, state)) return false;
# [ + + ]
# 848 : :
# 849 : 28099 : ws.m_iters_conflicting = m_pool.GetIterSet(ws.m_conflicts);
# 850 : : // Calculate in-mempool ancestors, up to a limit.
# 851 [ + + ]: 28099 : if (ws.m_conflicts.size() == 1) {
# 852 : : // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
# 853 : : // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
# 854 : : // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
# 855 : : // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
# 856 : : // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
# 857 : : // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
# 858 : : // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
# 859 : : // for off-chain contract systems (see link in the comment below).
# 860 : : //
# 861 : : // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
# 862 : : // conflict directly with exactly one other transaction (but may evict children of said transaction),
# 863 : : // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
# 864 : : // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
# 865 : : // amended, we may need to move that check to here instead of removing it wholesale.
# 866 : : //
# 867 : : // Such transactions are clearly not merging any existing packages, so we are only concerned with
# 868 : : // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
# 869 : : // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
# 870 : : // to.
# 871 : : //
# 872 : : // To check these we first check if we meet the RBF criteria, above, and increment the descendant
# 873 : : // limits by the direct conflict and its descendants (as these are recalculated in
# 874 : : // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
# 875 : : // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
# 876 : : // the ancestor limits should be the same for both our new transaction and any conflicts).
# 877 : : // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
# 878 : : // into force here (as we're only adding a single transaction).
# 879 : 1463 : assert(ws.m_iters_conflicting.size() == 1);
# 880 : 0 : CTxMemPool::txiter conflict = *ws.m_iters_conflicting.begin();
# 881 : :
# 882 : 1463 : m_limit_descendants += 1;
# 883 : 1463 : m_limit_descendant_size += conflict->GetSizeWithDescendants();
# 884 : 1463 : }
# 885 : :
# 886 : 0 : std::string errString;
# 887 [ + + ]: 28099 : if (!m_pool.CalculateMemPoolAncestors(*entry, ws.m_ancestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
# 888 : 71 : ws.m_ancestors.clear();
# 889 : : // If CalculateMemPoolAncestors fails second time, we want the original error string.
# 890 : 71 : std::string dummy_err_string;
# 891 : : // Contracting/payment channels CPFP carve-out:
# 892 : : // If the new transaction is relatively small (up to 40k weight)
# 893 : : // and has at most one ancestor (ie ancestor limit of 2, including
# 894 : : // the new transaction), allow it if its parent has exactly the
# 895 : : // descendant limit descendants.
# 896 : : //
# 897 : : // This allows protocols which rely on distrusting counterparties
# 898 : : // being able to broadcast descendants of an unconfirmed transaction
# 899 : : // to be secure by simply only having two immediately-spendable
# 900 : : // outputs - one for each counterparty. For more info on the uses for
# 901 : : // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
# 902 [ + + ]: 71 : if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
# 903 [ + + ]: 71 : !m_pool.CalculateMemPoolAncestors(*entry, ws.m_ancestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
# 904 : 65 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
# 905 : 65 : }
# 906 : 71 : }
# 907 : :
# 908 : : // A transaction that spends outputs that would be replaced by it is invalid. Now
# 909 : : // that we have the set of all ancestors we can detect this
# 910 : : // pathological case by making sure ws.m_conflicts and ws.m_ancestors don't
# 911 : : // intersect.
# 912 [ + + ]: 28034 : if (const auto err_string{EntriesAndTxidsDisjoint(ws.m_ancestors, ws.m_conflicts, hash)}) {
# 913 : : // We classify this as a consensus error because a transaction depending on something it
# 914 : : // conflicts with would be inconsistent.
# 915 : 4 : return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx", *err_string);
# 916 : 4 : }
# 917 : :
# 918 : 28030 : m_rbf = !ws.m_conflicts.empty();
# 919 : 28030 : return true;
# 920 : 28034 : }
# 921 : :
# 922 : : bool MemPoolAccept::ReplacementChecks(Workspace& ws)
# 923 : 1463 : {
# 924 : 1463 : AssertLockHeld(cs_main);
# 925 : 1463 : AssertLockHeld(m_pool.cs);
# 926 : :
# 927 : 1463 : const CTransaction& tx = *ws.m_ptx;
# 928 : 1463 : const uint256& hash = ws.m_hash;
# 929 : 1463 : TxValidationState& state = ws.m_state;
# 930 : :
# 931 : 1463 : CFeeRate newFeeRate(ws.m_modified_fees, ws.m_vsize);
# 932 : : // The replacement transaction must have a higher feerate than its direct conflicts.
# 933 : : // - The motivation for this check is to ensure that the replacement transaction is preferable for
# 934 : : // block-inclusion, compared to what would be removed from the mempool.
# 935 : : // - This logic predates ancestor feerate-based transaction selection, which is why it doesn't
# 936 : : // consider feerates of descendants.
# 937 : : // - Note: Ancestor feerate-based transaction selection has made this comparison insufficient to
# 938 : : // guarantee that this is incentive-compatible for miners, because it is possible for a
# 939 : : // descendant transaction of a direct conflict to pay a higher feerate than the transaction that
# 940 : : // might replace them, under these rules.
# 941 [ + + ]: 1463 : if (const auto err_string{PaysMoreThanConflicts(ws.m_iters_conflicting, newFeeRate, hash)}) {
# 942 : 12 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
# 943 : 12 : }
# 944 : :
# 945 : : // Calculate all conflicting entries and enforce BIP125 Rule #5.
# 946 [ + + ]: 1451 : if (const auto err_string{GetEntriesForConflicts(tx, m_pool, ws.m_iters_conflicting, ws.m_all_conflicting)}) {
# 947 : 6 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
# 948 : 6 : "too many potential replacements", *err_string);
# 949 : 6 : }
# 950 : : // Enforce BIP125 Rule #2.
# 951 [ + + ]: 1445 : if (const auto err_string{HasNoNewUnconfirmed(tx, m_pool, ws.m_iters_conflicting)}) {
# 952 : 2 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
# 953 : 2 : "replacement-adds-unconfirmed", *err_string);
# 954 : 2 : }
# 955 : : // Check if it's economically rational to mine this transaction rather than the ones it
# 956 : : // replaces and pays for its own relay fees. Enforce BIP125 Rules #3 and #4.
# 957 [ + + ]: 2207 : for (CTxMemPool::txiter it : ws.m_all_conflicting) {
# 958 : 2207 : ws.m_conflicting_fees += it->GetModifiedFee();
# 959 : 2207 : ws.m_conflicting_size += it->GetTxSize();
# 960 : 2207 : }
# 961 [ + + ]: 1443 : if (const auto err_string{PaysForRBF(ws.m_conflicting_fees, ws.m_modified_fees, ws.m_vsize,
# 962 : 1443 : ::incrementalRelayFee, hash)}) {
# 963 : 6 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
# 964 : 6 : }
# 965 : 1437 : return true;
# 966 : 1443 : }
# 967 : :
# 968 : : bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
# 969 : : PackageValidationState& package_state)
# 970 : 64 : {
# 971 : 64 : AssertLockHeld(cs_main);
# 972 : 64 : AssertLockHeld(m_pool.cs);
# 973 : :
# 974 : : // CheckPackageLimits expects the package transactions to not already be in the mempool.
# 975 : 64 : assert(std::all_of(txns.cbegin(), txns.cend(), [this](const auto& tx)
# 976 : 64 : { return !m_pool.exists(GenTxid::Txid(tx->GetHash()));}));
# 977 : :
# 978 : 0 : std::string err_string;
# 979 [ + + ]: 64 : if (!m_pool.CheckPackageLimits(txns, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants,
# 980 : 64 : m_limit_descendant_size, err_string)) {
# 981 : : // This is a package-wide error, separate from an individual transaction error.
# 982 : 10 : return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", err_string);
# 983 : 10 : }
# 984 : 54 : return true;
# 985 : 64 : }
# 986 : :
# 987 : : bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws)
# 988 : 27933 : {
# 989 : 27933 : AssertLockHeld(cs_main);
# 990 : 27933 : AssertLockHeld(m_pool.cs);
# 991 : 27933 : const CTransaction& tx = *ws.m_ptx;
# 992 : 27933 : TxValidationState& state = ws.m_state;
# 993 : :
# 994 : 27933 : constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
# 995 : :
# 996 : : // Check input scripts and signatures.
# 997 : : // This is done last to help prevent CPU exhaustion denial-of-service attacks.
# 998 [ + + ]: 27933 : if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata)) {
# 999 : : // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
# 1000 : : // need to turn both off, and compare against just turning off CLEANSTACK
# 1001 : : // to see if the failure is specifically due to witness validation.
# 1002 : 2264 : TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
# 1003 [ + + ][ + + ]: 2264 : if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata) &&
# 1004 [ + - ]: 2264 : !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata)) {
# 1005 : : // Only the witness is missing, so the transaction itself may be fine.
# 1006 : 17 : state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED,
# 1007 : 17 : state.GetRejectReason(), state.GetDebugMessage());
# 1008 : 17 : }
# 1009 : 2264 : return false; // state filled in by CheckInputScripts
# 1010 : 2264 : }
# 1011 : :
# 1012 : 25669 : return true;
# 1013 : 27933 : }
# 1014 : :
# 1015 : : bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws)
# 1016 : 25176 : {
# 1017 : 25176 : AssertLockHeld(cs_main);
# 1018 : 25176 : AssertLockHeld(m_pool.cs);
# 1019 : 25176 : const CTransaction& tx = *ws.m_ptx;
# 1020 : 25176 : const uint256& hash = ws.m_hash;
# 1021 : 25176 : TxValidationState& state = ws.m_state;
# 1022 : 25176 : const CChainParams& chainparams = args.m_chainparams;
# 1023 : :
# 1024 : : // Check again against the current block tip's script verification
# 1025 : : // flags to cache our script execution flags. This is, of course,
# 1026 : : // useless if the next block has different script flags from the
# 1027 : : // previous one, but because the cache tracks script flags for us it
# 1028 : : // will auto-invalidate and we'll just have a few blocks of extra
# 1029 : : // misses on soft-fork activation.
# 1030 : : //
# 1031 : : // This is also useful in case of bugs in the standard flags that cause
# 1032 : : // transactions to pass as valid when they're actually invalid. For
# 1033 : : // instance the STRICTENC flag was incorrectly allowing certain
# 1034 : : // CHECKSIG NOT scripts to pass, even though they were invalid.
# 1035 : : //
# 1036 : : // There is a similar check in CreateNewBlock() to prevent creating
# 1037 : : // invalid blocks (using TestBlockValidity), however allowing such
# 1038 : : // transactions into the mempool can be exploited as a DoS attack.
# 1039 : 25176 : unsigned int currentBlockScriptVerifyFlags{GetBlockScriptFlags(*m_active_chainstate.m_chain.Tip(), chainparams.GetConsensus())};
# 1040 [ - + ]: 25176 : if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags,
# 1041 : 25176 : ws.m_precomputed_txdata, m_active_chainstate.CoinsTip())) {
# 1042 : 0 : LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s\n", hash.ToString(), state.ToString());
# 1043 : 0 : return Assume(false);
# 1044 : 0 : }
# 1045 : :
# 1046 : 25176 : return true;
# 1047 : 25176 : }
# 1048 : :
# 1049 : : bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
# 1050 : 20008 : {
# 1051 : 20008 : AssertLockHeld(cs_main);
# 1052 : 20008 : AssertLockHeld(m_pool.cs);
# 1053 : 20008 : const CTransaction& tx = *ws.m_ptx;
# 1054 : 20008 : const uint256& hash = ws.m_hash;
# 1055 : 20008 : TxValidationState& state = ws.m_state;
# 1056 : 20008 : const bool bypass_limits = args.m_bypass_limits;
# 1057 : :
# 1058 : 20008 : std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
# 1059 : :
# 1060 : : // Remove conflicting transactions from the mempool
# 1061 [ + + ]: 20008 : for (CTxMemPool::txiter it : ws.m_all_conflicting)
# 1062 : 1312 : {
# 1063 [ + - ]: 1312 : LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
# 1064 : 1312 : it->GetTx().GetHash().ToString(),
# 1065 : 1312 : hash.ToString(),
# 1066 : 1312 : FormatMoney(ws.m_modified_fees - ws.m_conflicting_fees),
# 1067 : 1312 : (int)entry->GetTxSize() - (int)ws.m_conflicting_size);
# 1068 : 1312 : ws.m_replaced_transactions.push_back(it->GetSharedTx());
# 1069 : 1312 : }
# 1070 : 20008 : m_pool.RemoveStaged(ws.m_all_conflicting, false, MemPoolRemovalReason::REPLACED);
# 1071 : :
# 1072 : : // This transaction should only count for fee estimation if:
# 1073 : : // - it's not being re-added during a reorg which bypasses typical mempool fee limits
# 1074 : : // - the node is not behind
# 1075 : : // - the transaction is not dependent on any other transactions in the mempool
# 1076 : : // - it's not part of a package. Since package relay is not currently supported, this
# 1077 : : // transaction has not necessarily been accepted to miners' mempools.
# 1078 [ + + ][ + + ]: 20008 : bool validForFeeEstimation = !bypass_limits && !args.m_package_submission && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx);
# [ + + ][ + + ]
# 1079 : :
# 1080 : : // Store transaction in memory
# 1081 : 20008 : m_pool.addUnchecked(*entry, ws.m_ancestors, validForFeeEstimation);
# 1082 : :
# 1083 : : // trim mempool and check if tx was trimmed
# 1084 : : // If we are validating a package, don't trim here because we could evict a previous transaction
# 1085 : : // in the package. LimitMempoolSize() should be called at the very end to make sure the mempool
# 1086 : : // is still within limits and package submission happens atomically.
# 1087 [ + + ][ + + ]: 20008 : if (!args.m_package_submission && !bypass_limits) {
# 1088 : 19702 : LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
# 1089 [ - + ]: 19702 : if (!m_pool.exists(GenTxid::Txid(hash)))
# 1090 : 0 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
# 1091 : 19702 : }
# 1092 : 20008 : return true;
# 1093 : 20008 : }
# 1094 : :
# 1095 : : bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces,
# 1096 : : PackageValidationState& package_state,
# 1097 : : std::map<const uint256, const MempoolAcceptResult>& results)
# 1098 : 6 : {
# 1099 : 6 : AssertLockHeld(cs_main);
# 1100 : 6 : AssertLockHeld(m_pool.cs);
# 1101 : : // Sanity check: none of the transactions should be in the mempool, and none of the transactions
# 1102 : : // should have a same-txid-different-witness equivalent in the mempool.
# 1103 : 6 : assert(std::all_of(workspaces.cbegin(), workspaces.cend(), [this](const auto& ws){
# 1104 : 6 : return !m_pool.exists(GenTxid::Txid(ws.m_ptx->GetHash())); }));
# 1105 : :
# 1106 : 0 : bool all_submitted = true;
# 1107 : : // ConsensusScriptChecks adds to the script cache and is therefore consensus-critical;
# 1108 : : // CheckInputsFromMempoolAndCache asserts that transactions only spend coins available from the
# 1109 : : // mempool or UTXO set. Submit each transaction to the mempool immediately after calling
# 1110 : : // ConsensusScriptChecks to make the outputs available for subsequent transactions.
# 1111 [ + + ]: 12 : for (Workspace& ws : workspaces) {
# 1112 [ - + ]: 12 : if (!ConsensusScriptChecks(args, ws)) {
# 1113 : 0 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1114 : : // Since PolicyScriptChecks() passed, this should never fail.
# 1115 : 0 : all_submitted = false;
# 1116 : 0 : package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
# 1117 : 0 : strprintf("BUG! PolicyScriptChecks succeeded but ConsensusScriptChecks failed: %s",
# 1118 : 0 : ws.m_ptx->GetHash().ToString()));
# 1119 : 0 : }
# 1120 : :
# 1121 : : // Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the
# 1122 : : // last calculation done in PreChecks, since package ancestors have already been submitted.
# 1123 : 12 : std::string unused_err_string;
# 1124 [ - + ]: 12 : if(!m_pool.CalculateMemPoolAncestors(*ws.m_entry, ws.m_ancestors, m_limit_ancestors,
# 1125 : 12 : m_limit_ancestor_size, m_limit_descendants,
# 1126 : 12 : m_limit_descendant_size, unused_err_string)) {
# 1127 : 0 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1128 : : // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail.
# 1129 : 0 : all_submitted = false;
# 1130 : 0 : package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
# 1131 : 0 : strprintf("BUG! Mempool ancestors or descendants were underestimated: %s",
# 1132 : 0 : ws.m_ptx->GetHash().ToString()));
# 1133 : 0 : }
# 1134 : : // If we call LimitMempoolSize() for each individual Finalize(), the mempool will not take
# 1135 : : // the transaction's descendant feerate into account because it hasn't seen them yet. Also,
# 1136 : : // we risk evicting a transaction that a subsequent package transaction depends on. Instead,
# 1137 : : // allow the mempool to temporarily bypass limits, the maximum package size) while
# 1138 : : // submitting transactions individually and then trim at the very end.
# 1139 [ - + ]: 12 : if (!Finalize(args, ws)) {
# 1140 : 0 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1141 : : // Since LimitMempoolSize() won't be called, this should never fail.
# 1142 : 0 : all_submitted = false;
# 1143 : 0 : package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
# 1144 : 0 : strprintf("BUG! Adding to mempool failed: %s", ws.m_ptx->GetHash().ToString()));
# 1145 : 0 : }
# 1146 : 12 : }
# 1147 : :
# 1148 : : // It may or may not be the case that all the transactions made it into the mempool. Regardless,
# 1149 : : // make sure we haven't exceeded max mempool size.
# 1150 : 6 : LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(),
# 1151 : 6 : gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
# 1152 : 6 : std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
# 1153 : :
# 1154 : : // Find the wtxids of the transactions that made it into the mempool. Allow partial submission,
# 1155 : : // but don't report success unless they all made it into the mempool.
# 1156 [ + + ]: 12 : for (Workspace& ws : workspaces) {
# 1157 [ + - ]: 12 : if (m_pool.exists(GenTxid::Wtxid(ws.m_ptx->GetWitnessHash()))) {
# 1158 : 12 : results.emplace(ws.m_ptx->GetWitnessHash(),
# 1159 : 12 : MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees));
# 1160 : 12 : GetMainSignals().TransactionAddedToMempool(ws.m_ptx, m_pool.GetAndIncrementSequence());
# 1161 : 12 : } else {
# 1162 : 0 : all_submitted = false;
# 1163 : 0 : ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
# 1164 : 0 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1165 : 0 : }
# 1166 : 12 : }
# 1167 : 6 : return all_submitted;
# 1168 : 6 : }
# 1169 : :
# 1170 : : MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
# 1171 : 37562 : {
# 1172 : 37562 : AssertLockHeld(cs_main);
# 1173 : 37562 : LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
# 1174 : :
# 1175 : 37562 : Workspace ws(ptx);
# 1176 : :
# 1177 [ + + ]: 37562 : if (!PreChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1178 : :
# 1179 [ + + ][ + + ]: 27453 : if (m_rbf && !ReplacementChecks(ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1180 : :
# 1181 : : // Perform the inexpensive checks first and avoid hashing and signature verification unless
# 1182 : : // those checks pass, to mitigate CPU exhaustion denial-of-service attacks.
# 1183 [ + + ]: 27427 : if (!PolicyScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1184 : :
# 1185 [ - + ]: 25164 : if (!ConsensusScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1186 : :
# 1187 : : // Tx was accepted, but not added
# 1188 [ + + ]: 25164 : if (args.m_test_accept) {
# 1189 : 5168 : return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees);
# 1190 : 5168 : }
# 1191 : :
# 1192 [ - + ]: 19996 : if (!Finalize(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
# 1193 : :
# 1194 : 19996 : GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence());
# 1195 : :
# 1196 : 19996 : return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees);
# 1197 : 19996 : }
# 1198 : :
# 1199 : : PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args)
# 1200 : 78 : {
# 1201 : 78 : AssertLockHeld(cs_main);
# 1202 : :
# 1203 : : // These context-free package limits can be done before taking the mempool lock.
# 1204 : 78 : PackageValidationState package_state;
# 1205 [ + + ]: 78 : if (!CheckPackage(txns, package_state)) return PackageMempoolAcceptResult(package_state, {});
# 1206 : :
# 1207 : 74 : std::vector<Workspace> workspaces{};
# 1208 : 74 : workspaces.reserve(txns.size());
# 1209 : 74 : std::transform(txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
# 1210 : 584 : [](const auto& tx) { return Workspace(tx); });
# 1211 : 74 : std::map<const uint256, const MempoolAcceptResult> results;
# 1212 : :
# 1213 : 74 : LOCK(m_pool.cs);
# 1214 : :
# 1215 : : // Do all PreChecks first and fail fast to avoid running expensive script checks when unnecessary.
# 1216 [ + + ]: 581 : for (Workspace& ws : workspaces) {
# 1217 [ + + ]: 581 : if (!PreChecks(args, ws)) {
# 1218 : 4 : package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
# 1219 : : // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
# 1220 : 4 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1221 : 4 : return PackageMempoolAcceptResult(package_state, std::move(results));
# 1222 : 4 : }
# 1223 : : // Make the coins created by this transaction available for subsequent transactions in the
# 1224 : : // package to spend. Since we already checked conflicts in the package and we don't allow
# 1225 : : // replacements, we don't need to track the coins spent. Note that this logic will need to be
# 1226 : : // updated if package replace-by-fee is allowed in the future.
# 1227 : 577 : assert(!args.m_allow_bip125_replacement);
# 1228 : 0 : m_viewmempool.PackageAddTransaction(ws.m_ptx);
# 1229 : 577 : }
# 1230 : :
# 1231 : : // Transactions must meet two minimum feerates: the mempool minimum fee and min relay fee.
# 1232 : : // For transactions consisting of exactly one child and its parents, it suffices to use the
# 1233 : : // package feerate (total modified fees / total virtual size) to check this requirement.
# 1234 : 70 : const auto m_total_vsize = std::accumulate(workspaces.cbegin(), workspaces.cend(), int64_t{0},
# 1235 : 574 : [](int64_t sum, auto& ws) { return sum + ws.m_vsize; });
# 1236 : 70 : const auto m_total_modified_fees = std::accumulate(workspaces.cbegin(), workspaces.cend(), CAmount{0},
# 1237 : 574 : [](CAmount sum, auto& ws) { return sum + ws.m_modified_fees; });
# 1238 : 70 : const CFeeRate package_feerate(m_total_modified_fees, m_total_vsize);
# 1239 : 70 : TxValidationState placeholder_state;
# 1240 [ + + ]: 70 : if (args.m_package_feerates &&
# 1241 [ + + ]: 70 : !CheckFeeRate(m_total_vsize, m_total_modified_fees, placeholder_state)) {
# 1242 : 6 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-fee-too-low");
# 1243 : 6 : return PackageMempoolAcceptResult(package_state, package_feerate, {});
# 1244 : 6 : }
# 1245 : :
# 1246 : : // Apply package mempool ancestor/descendant limits. Skip if there is only one transaction,
# 1247 : : // because it's unnecessary. Also, CPFP carve out can increase the limit for individual
# 1248 : : // transactions, but this exemption is not extended to packages in CheckPackageLimits().
# 1249 : 64 : std::string err_string;
# 1250 [ + - ][ + + ]: 64 : if (txns.size() > 1 && !PackageMempoolChecks(txns, package_state)) {
# 1251 : 10 : return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1252 : 10 : }
# 1253 : :
# 1254 [ + + ]: 506 : for (Workspace& ws : workspaces) {
# 1255 [ + + ]: 506 : if (!PolicyScriptChecks(args, ws)) {
# 1256 : : // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
# 1257 : 1 : package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
# 1258 : 1 : results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
# 1259 : 1 : return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1260 : 1 : }
# 1261 [ + + ]: 505 : if (args.m_test_accept) {
# 1262 : : // When test_accept=true, transactions that pass PolicyScriptChecks are valid because there are
# 1263 : : // no further mempool checks (passing PolicyScriptChecks implies passing ConsensusScriptChecks).
# 1264 : 493 : results.emplace(ws.m_ptx->GetWitnessHash(),
# 1265 : 493 : MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions),
# 1266 : 493 : ws.m_vsize, ws.m_base_fees));
# 1267 : 493 : }
# 1268 : 505 : }
# 1269 : :
# 1270 [ + + ]: 53 : if (args.m_test_accept) return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1271 : :
# 1272 [ - + ]: 6 : if (!SubmitPackage(args, workspaces, package_state, results)) {
# 1273 : : // PackageValidationState filled in by SubmitPackage().
# 1274 : 0 : return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1275 : 0 : }
# 1276 : :
# 1277 : 6 : return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
# 1278 : 6 : }
# 1279 : :
# 1280 : : PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package, ATMPArgs& args)
# 1281 : 30 : {
# 1282 : 30 : AssertLockHeld(cs_main);
# 1283 : 30 : PackageValidationState package_state;
# 1284 : :
# 1285 : : // Check that the package is well-formed. If it isn't, we won't try to validate any of the
# 1286 : : // transactions and thus won't return any MempoolAcceptResults, just a package-wide error.
# 1287 : :
# 1288 : : // Context-free package checks.
# 1289 [ - + ]: 30 : if (!CheckPackage(package, package_state)) return PackageMempoolAcceptResult(package_state, {});
# 1290 : :
# 1291 : : // All transactions in the package must be a parent of the last transaction. This is just an
# 1292 : : // opportunity for us to fail fast on a context-free check without taking the mempool lock.
# 1293 [ + + ]: 30 : if (!IsChildWithParents(package)) {
# 1294 : 4 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-parents");
# 1295 : 4 : return PackageMempoolAcceptResult(package_state, {});
# 1296 : 4 : }
# 1297 : :
# 1298 : : // IsChildWithParents() guarantees the package is > 1 transactions.
# 1299 : 26 : assert(package.size() > 1);
# 1300 : : // The package must be 1 child with all of its unconfirmed parents. The package is expected to
# 1301 : : // be sorted, so the last transaction is the child.
# 1302 : 0 : const auto& child = package.back();
# 1303 : 26 : std::unordered_set<uint256, SaltedTxidHasher> unconfirmed_parent_txids;
# 1304 : 26 : std::transform(package.cbegin(), package.cend() - 1,
# 1305 : 26 : std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()),
# 1306 : 30 : [](const auto& tx) { return tx->GetHash(); });
# 1307 : :
# 1308 : : // All child inputs must refer to a preceding package transaction or a confirmed UTXO. The only
# 1309 : : // way to verify this is to look up the child's inputs in our current coins view (not including
# 1310 : : // mempool), and enforce that all parents not present in the package be available at chain tip.
# 1311 : : // Since this check can bring new coins into the coins cache, keep track of these coins and
# 1312 : : // uncache them if we don't end up submitting this package to the mempool.
# 1313 : 26 : const CCoinsViewCache& coins_tip_cache = m_active_chainstate.CoinsTip();
# 1314 [ + + ]: 32 : for (const auto& input : child->vin) {
# 1315 [ + - ]: 32 : if (!coins_tip_cache.HaveCoinInCache(input.prevout)) {
# 1316 : 32 : args.m_coins_to_uncache.push_back(input.prevout);
# 1317 : 32 : }
# 1318 : 32 : }
# 1319 : : // Using the MemPoolAccept m_view cache allows us to look up these same coins faster later.
# 1320 : : // This should be connecting directly to CoinsTip, not to m_viewmempool, because we specifically
# 1321 : : // require inputs to be confirmed if they aren't in the package.
# 1322 : 26 : m_view.SetBackend(m_active_chainstate.CoinsTip());
# 1323 : 32 : const auto package_or_confirmed = [this, &unconfirmed_parent_txids](const auto& input) {
# 1324 [ + + ][ - + ]: 32 : return unconfirmed_parent_txids.count(input.prevout.hash) > 0 || m_view.HaveCoin(input.prevout);
# 1325 : 32 : };
# 1326 [ + + ]: 26 : if (!std::all_of(child->vin.cbegin(), child->vin.cend(), package_or_confirmed)) {
# 1327 : 2 : package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-unconfirmed-parents");
# 1328 : 2 : return PackageMempoolAcceptResult(package_state, {});
# 1329 : 2 : }
# 1330 : : // Protect against bugs where we pull more inputs from disk that miss being added to
# 1331 : : // coins_to_uncache. The backend will be connected again when needed in PreChecks.
# 1332 : 24 : m_view.SetBackend(m_dummy);
# 1333 : :
# 1334 : 24 : LOCK(m_pool.cs);
# 1335 : 24 : std::map<const uint256, const MempoolAcceptResult> results;
# 1336 : : // Node operators are free to set their mempool policies however they please, nodes may receive
# 1337 : : // transactions in different orders, and malicious counterparties may try to take advantage of
# 1338 : : // policy differences to pin or delay propagation of transactions. As such, it's possible for
# 1339 : : // some package transaction(s) to already be in the mempool, and we don't want to reject the
# 1340 : : // entire package in that case (as that could be a censorship vector). De-duplicate the
# 1341 : : // transactions that are already in the mempool, and only call AcceptMultipleTransactions() with
# 1342 : : // the new transactions. This ensures we don't double-count transaction counts and sizes when
# 1343 : : // checking ancestor/descendant limits, or double-count transaction fees for fee-related policy.
# 1344 : 24 : ATMPArgs single_args = ATMPArgs::SingleInPackageAccept(args);
# 1345 : 24 : bool quit_early{false};
# 1346 : 24 : std::vector<CTransactionRef> txns_new;
# 1347 [ + + ]: 52 : for (const auto& tx : package) {
# 1348 : 52 : const auto& wtxid = tx->GetWitnessHash();
# 1349 : 52 : const auto& txid = tx->GetHash();
# 1350 : : // There are 3 possibilities: already in mempool, same-txid-diff-wtxid already in mempool,
# 1351 : : // or not in mempool. An already confirmed tx is treated as one not in mempool, because all
# 1352 : : // we know is that the inputs aren't available.
# 1353 [ + + ]: 52 : if (m_pool.exists(GenTxid::Wtxid(wtxid))) {
# 1354 : : // Exact transaction already exists in the mempool.
# 1355 : 12 : auto iter = m_pool.GetIter(txid);
# 1356 : 12 : assert(iter != std::nullopt);
# 1357 : 0 : results.emplace(wtxid, MempoolAcceptResult::MempoolTx(iter.value()->GetTxSize(), iter.value()->GetFee()));
# 1358 [ + + ]: 40 : } else if (m_pool.exists(GenTxid::Txid(txid))) {
# 1359 : : // Transaction with the same non-witness data but different witness (same txid,
# 1360 : : // different wtxid) already exists in the mempool.
# 1361 : : //
# 1362 : : // We don't allow replacement transactions right now, so just swap the package
# 1363 : : // transaction for the mempool one. Note that we are ignoring the validity of the
# 1364 : : // package transaction passed in.
# 1365 : : // TODO: allow witness replacement in packages.
# 1366 : 6 : auto iter = m_pool.GetIter(txid);
# 1367 : 6 : assert(iter != std::nullopt);
# 1368 : : // Provide the wtxid of the mempool tx so that the caller can look it up in the mempool.
# 1369 : 0 : results.emplace(wtxid, MempoolAcceptResult::MempoolTxDifferentWitness(iter.value()->GetTx().GetWitnessHash()));
# 1370 : 34 : } else {
# 1371 : : // Transaction does not already exist in the mempool.
# 1372 : : // Try submitting the transaction on its own.
# 1373 : 34 : const auto single_res = AcceptSingleTransaction(tx, single_args);
# 1374 [ + + ]: 34 : if (single_res.m_result_type == MempoolAcceptResult::ResultType::VALID) {
# 1375 : : // The transaction succeeded on its own and is now in the mempool. Don't include it
# 1376 : : // in package validation, because its fees should only be "used" once.
# 1377 : 12 : assert(m_pool.exists(GenTxid::Wtxid(wtxid)));
# 1378 : 0 : results.emplace(wtxid, single_res);
# 1379 [ + + ]: 22 : } else if (single_res.m_state.GetResult() != TxValidationResult::TX_MEMPOOL_POLICY &&
# 1380 [ - + ]: 22 : single_res.m_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
# 1381 : : // Package validation policy only differs from individual policy in its evaluation
# 1382 : : // of feerate. For example, if a transaction fails here due to violation of a
# 1383 : : // consensus rule, the result will not change when it is submitted as part of a
# 1384 : : // package. To minimize the amount of repeated work, unless the transaction fails
# 1385 : : // due to feerate or missing inputs (its parent is a previous transaction in the
# 1386 : : // package that failed due to feerate), don't run package validation. Note that this
# 1387 : : // decision might not make sense if different types of packages are allowed in the
# 1388 : : // future. Continue individually validating the rest of the transactions, because
# 1389 : : // some of them may still be valid.
# 1390 : 0 : quit_early = true;
# 1391 : 22 : } else {
# 1392 : 22 : txns_new.push_back(tx);
# 1393 : 22 : }
# 1394 : 34 : }
# 1395 : 52 : }
# 1396 : :
# 1397 : : // Nothing to do if the entire package has already been submitted.
# 1398 [ - + ][ + + ]: 24 : if (quit_early || txns_new.empty()) {
# 1399 : : // No package feerate when no package validation was done.
# 1400 : 12 : return PackageMempoolAcceptResult(package_state, std::move(results));
# 1401 : 12 : }
# 1402 : : // Validate the (deduplicated) transactions as a package.
# 1403 : 12 : auto submission_result = AcceptMultipleTransactions(txns_new, args);
# 1404 : : // Include already-in-mempool transaction results in the final result.
# 1405 [ + + ]: 12 : for (const auto& [wtxid, mempoolaccept_res] : results) {
# 1406 : 6 : submission_result.m_tx_results.emplace(wtxid, mempoolaccept_res);
# 1407 : 6 : }
# 1408 [ + + ]: 12 : if (submission_result.m_state.IsValid()) assert(submission_result.m_package_feerate.has_value());
# 1409 : 0 : return submission_result;
# 1410 : 24 : }
# 1411 : :
# 1412 : : } // anon namespace
# 1413 : :
# 1414 : : MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, const CTransactionRef& tx,
# 1415 : : int64_t accept_time, bool bypass_limits, bool test_accept)
# 1416 : : EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
# 1417 : 37528 : {
# 1418 : 37528 : AssertLockHeld(::cs_main);
# 1419 : 37528 : const CChainParams& chainparams{active_chainstate.m_params};
# 1420 : 37528 : assert(active_chainstate.GetMempool() != nullptr);
# 1421 : 0 : CTxMemPool& pool{*active_chainstate.GetMempool()};
# 1422 : :
# 1423 : 37528 : std::vector<COutPoint> coins_to_uncache;
# 1424 : 37528 : auto args = MemPoolAccept::ATMPArgs::SingleAccept(chainparams, accept_time, bypass_limits, coins_to_uncache, test_accept);
# 1425 : 37528 : const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args);
# 1426 [ + + ]: 37528 : if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) {
# 1427 : : // Remove coins that were not present in the coins cache before calling
# 1428 : : // AcceptSingleTransaction(); this is to prevent memory DoS in case we receive a large
# 1429 : : // number of invalid transactions that attempt to overrun the in-memory coins cache
# 1430 : : // (`CCoinsViewCache::cacheCoins`).
# 1431 : :
# 1432 [ + + ]: 12376 : for (const COutPoint& hashTx : coins_to_uncache)
# 1433 : 4018 : active_chainstate.CoinsTip().Uncache(hashTx);
# 1434 : 12376 : }
# 1435 : : // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
# 1436 : 37528 : BlockValidationState state_dummy;
# 1437 : 37528 : active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
# 1438 : 37528 : return result;
# 1439 : 37528 : }
# 1440 : :
# 1441 : : PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTxMemPool& pool,
# 1442 : : const Package& package, bool test_accept)
# 1443 : 96 : {
# 1444 : 96 : AssertLockHeld(cs_main);
# 1445 : 96 : assert(!package.empty());
# 1446 : 0 : assert(std::all_of(package.cbegin(), package.cend(), [](const auto& tx){return tx != nullptr;}));
# 1447 : :
# 1448 : 0 : std::vector<COutPoint> coins_to_uncache;
# 1449 : 96 : const CChainParams& chainparams = Params();
# 1450 : 96 : const auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
# 1451 : 96 : AssertLockHeld(cs_main);
# 1452 [ + + ]: 96 : if (test_accept) {
# 1453 : 66 : auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(chainparams, GetTime(), coins_to_uncache);
# 1454 : 66 : return MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args);
# 1455 : 66 : } else {
# 1456 : 30 : auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(chainparams, GetTime(), coins_to_uncache);
# 1457 : 30 : return MemPoolAccept(pool, active_chainstate).AcceptPackage(package, args);
# 1458 : 30 : }
# 1459 : 96 : }();
# 1460 : :
# 1461 : : // Uncache coins pertaining to transactions that were not submitted to the mempool.
# 1462 [ + + ][ + + ]: 96 : if (test_accept || result.m_state.IsInvalid()) {
# 1463 [ + + ]: 539 : for (const COutPoint& hashTx : coins_to_uncache) {
# 1464 : 539 : active_chainstate.CoinsTip().Uncache(hashTx);
# 1465 : 539 : }
# 1466 : 78 : }
# 1467 : : // Ensure the coins cache is still within limits.
# 1468 : 96 : BlockValidationState state_dummy;
# 1469 : 96 : active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
# 1470 : 96 : return result;
# 1471 : 96 : }
# 1472 : :
# 1473 : : CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
# 1474 : 133298 : {
# 1475 : 133298 : int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
# 1476 : : // Force block reward to zero when right shift is undefined.
# 1477 [ + + ]: 133298 : if (halvings >= 64)
# 1478 : 563 : return 0;
# 1479 : :
# 1480 : 132735 : CAmount nSubsidy = 50 * COIN;
# 1481 : : // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
# 1482 : 132735 : nSubsidy >>= halvings;
# 1483 : 132735 : return nSubsidy;
# 1484 : 133298 : }
# 1485 : :
# 1486 : : CoinsViews::CoinsViews(
# 1487 : : std::string ldb_name,
# 1488 : : size_t cache_size_bytes,
# 1489 : : bool in_memory,
# 1490 : : bool should_wipe) : m_dbview(
# 1491 : : gArgs.GetDataDirNet() / ldb_name, cache_size_bytes, in_memory, should_wipe),
# 1492 : 949 : m_catcherview(&m_dbview) {}
# 1493 : :
# 1494 : : void CoinsViews::InitCache()
# 1495 : 948 : {
# 1496 : 948 : AssertLockHeld(::cs_main);
# 1497 : 948 : m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
# 1498 : 948 : }
# 1499 : :
# 1500 : : CChainState::CChainState(
# 1501 : : CTxMemPool* mempool,
# 1502 : : BlockManager& blockman,
# 1503 : : ChainstateManager& chainman,
# 1504 : : std::optional<uint256> from_snapshot_blockhash)
# 1505 : : : m_mempool(mempool),
# 1506 : : m_blockman(blockman),
# 1507 : : m_params(::Params()),
# 1508 : : m_chainman(chainman),
# 1509 : 955 : m_from_snapshot_blockhash(from_snapshot_blockhash) {}
# 1510 : :
# 1511 : : void CChainState::InitCoinsDB(
# 1512 : : size_t cache_size_bytes,
# 1513 : : bool in_memory,
# 1514 : : bool should_wipe,
# 1515 : : std::string leveldb_name)
# 1516 : 949 : {
# 1517 [ + + ]: 949 : if (m_from_snapshot_blockhash) {
# 1518 : 10 : leveldb_name += "_" + m_from_snapshot_blockhash->ToString();
# 1519 : 10 : }
# 1520 : :
# 1521 : 949 : m_coins_views = std::make_unique<CoinsViews>(
# 1522 : 949 : leveldb_name, cache_size_bytes, in_memory, should_wipe);
# 1523 : 949 : }
# 1524 : :
# 1525 : : void CChainState::InitCoinsCache(size_t cache_size_bytes)
# 1526 : 948 : {
# 1527 : 948 : AssertLockHeld(::cs_main);
# 1528 : 948 : assert(m_coins_views != nullptr);
# 1529 : 0 : m_coinstip_cache_size_bytes = cache_size_bytes;
# 1530 : 948 : m_coins_views->InitCache();
# 1531 : 948 : }
# 1532 : :
# 1533 : : // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
# 1534 : : // is a performance-related implementation detail. This function must be marked
# 1535 : : // `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
# 1536 : : // can call it.
# 1537 : : //
# 1538 : : bool CChainState::IsInitialBlockDownload() const
# 1539 : 1134491 : {
# 1540 : : // Optimization: pre-test latch before taking the lock.
# 1541 [ + + ]: 1134491 : if (m_cached_finished_ibd.load(std::memory_order_relaxed))
# 1542 : 1079879 : return false;
# 1543 : :
# 1544 : 54612 : LOCK(cs_main);
# 1545 [ - + ]: 54612 : if (m_cached_finished_ibd.load(std::memory_order_relaxed))
# 1546 : 0 : return false;
# 1547 [ + + ][ - + ]: 54612 : if (fImporting || fReindex)
# 1548 : 9730 : return true;
# 1549 [ + + ]: 44882 : if (m_chain.Tip() == nullptr)
# 1550 : 2 : return true;
# 1551 [ + + ]: 44880 : if (m_chain.Tip()->nChainWork < nMinimumChainWork)
# 1552 : 3336 : return true;
# 1553 [ + + ]: 41544 : if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
# 1554 : 40952 : return true;
# 1555 : 592 : LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
# 1556 : 592 : m_cached_finished_ibd.store(true, std::memory_order_relaxed);
# 1557 : 592 : return false;
# 1558 : 41544 : }
# 1559 : :
# 1560 : : static void AlertNotify(const std::string& strMessage)
# 1561 : 2 : {
# 1562 : 2 : uiInterface.NotifyAlertChanged();
# 1563 : 2 : #if HAVE_SYSTEM
# 1564 : 2 : std::string strCmd = gArgs.GetArg("-alertnotify", "");
# 1565 [ - + ]: 2 : if (strCmd.empty()) return;
# 1566 : :
# 1567 : : // Alert text should be plain ascii coming from a trusted source, but to
# 1568 : : // be safe we first strip anything not in safeChars, then add single quotes around
# 1569 : : // the whole string before passing it to the shell:
# 1570 : 2 : std::string singleQuote("'");
# 1571 : 2 : std::string safeStatus = SanitizeString(strMessage);
# 1572 : 2 : safeStatus = singleQuote+safeStatus+singleQuote;
# 1573 : 2 : boost::replace_all(strCmd, "%s", safeStatus);
# 1574 : :
# 1575 : 2 : std::thread t(runCommand, strCmd);
# 1576 : 2 : t.detach(); // thread runs free
# 1577 : 2 : #endif
# 1578 : 2 : }
# 1579 : :
# 1580 : : void CChainState::CheckForkWarningConditions()
# 1581 : 71081 : {
# 1582 : 71081 : AssertLockHeld(cs_main);
# 1583 : :
# 1584 : : // Before we get past initial download, we cannot reliably alert about forks
# 1585 : : // (we assume we don't get stuck on a fork before finishing our initial sync)
# 1586 [ + + ]: 71081 : if (IsInitialBlockDownload()) {
# 1587 : 5520 : return;
# 1588 : 5520 : }
# 1589 : :
# 1590 [ + + ][ + + ]: 65561 : if (m_chainman.m_best_invalid && m_chainman.m_best_invalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) {
# [ + + ]
# 1591 : 23 : LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
# 1592 : 23 : SetfLargeWorkInvalidChainFound(true);
# 1593 : 65538 : } else {
# 1594 : 65538 : SetfLargeWorkInvalidChainFound(false);
# 1595 : 65538 : }
# 1596 : 65561 : }
# 1597 : :
# 1598 : : // Called both upon regular invalid block discovery *and* InvalidateBlock
# 1599 : : void CChainState::InvalidChainFound(CBlockIndex* pindexNew)
# 1600 : 8070 : {
# 1601 : 8070 : AssertLockHeld(cs_main);
# 1602 [ + + ][ + + ]: 8070 : if (!m_chainman.m_best_invalid || pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork) {
# 1603 : 2770 : m_chainman.m_best_invalid = pindexNew;
# 1604 : 2770 : }
# 1605 [ + - ][ + + ]: 8070 : if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
# 1606 : 4063 : pindexBestHeader = m_chain.Tip();
# 1607 : 4063 : }
# 1608 : :
# 1609 : 8070 : LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__,
# 1610 : 8070 : pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
# 1611 : 8070 : log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
# 1612 : 8070 : CBlockIndex *tip = m_chain.Tip();
# 1613 : 8070 : assert (tip);
# 1614 : 8070 : LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__,
# 1615 : 8070 : tip->GetBlockHash().ToString(), m_chain.Height(), log(tip->nChainWork.getdouble())/log(2.0),
# 1616 : 8070 : FormatISO8601DateTime(tip->GetBlockTime()));
# 1617 : 8070 : CheckForkWarningConditions();
# 1618 : 8070 : }
# 1619 : :
# 1620 : : // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
# 1621 : : // which does its own setBlockIndexCandidates management.
# 1622 : : void CChainState::InvalidBlockFound(CBlockIndex* pindex, const BlockValidationState& state)
# 1623 : 3993 : {
# 1624 : 3993 : AssertLockHeld(cs_main);
# 1625 [ + - ]: 3993 : if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
# 1626 : 3993 : pindex->nStatus |= BLOCK_FAILED_VALID;
# 1627 : 3993 : m_chainman.m_failed_blocks.insert(pindex);
# 1628 : 3993 : m_blockman.m_dirty_blockindex.insert(pindex);
# 1629 : 3993 : setBlockIndexCandidates.erase(pindex);
# 1630 : 3993 : InvalidChainFound(pindex);
# 1631 : 3993 : }
# 1632 : 3993 : }
# 1633 : :
# 1634 : : void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
# 1635 : 226872 : {
# 1636 : : // mark inputs spent
# 1637 [ + + ]: 226872 : if (!tx.IsCoinBase()) {
# 1638 : 124357 : txundo.vprevout.reserve(tx.vin.size());
# 1639 [ + + ]: 161374 : for (const CTxIn &txin : tx.vin) {
# 1640 : 161374 : txundo.vprevout.emplace_back();
# 1641 : 161374 : bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
# 1642 : 161374 : assert(is_spent);
# 1643 : 161374 : }
# 1644 : 124357 : }
# 1645 : : // add outputs
# 1646 : 226872 : AddCoins(inputs, tx, nHeight);
# 1647 : 226872 : }
# 1648 : :
# 1649 : 390548 : bool CScriptCheck::operator()() {
# 1650 : 390548 : const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
# 1651 : 390548 : const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
# 1652 : 390548 : return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
# 1653 : 390548 : }
# 1654 : :
# 1655 : : static CuckooCache::cache<uint256, SignatureCacheHasher> g_scriptExecutionCache;
# 1656 : : static CSHA256 g_scriptExecutionCacheHasher;
# 1657 : :
# 1658 : 1631 : void InitScriptExecutionCache() {
# 1659 : : // Setup the salted hasher
# 1660 : 1631 : uint256 nonce = GetRandHash();
# 1661 : : // We want the nonce to be 64 bytes long to force the hasher to process
# 1662 : : // this chunk, which makes later hash computations more efficient. We
# 1663 : : // just write our 32-byte entropy twice to fill the 64 bytes.
# 1664 : 1631 : g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
# 1665 : 1631 : g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
# 1666 : : // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
# 1667 : : // setup_bytes creates the minimum possible cache (2 elements).
# 1668 : 1631 : size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetIntArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
# 1669 : 1631 : size_t nElems = g_scriptExecutionCache.setup_bytes(nMaxCacheSize);
# 1670 : 1631 : LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
# 1671 : 1631 : (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
# 1672 : 1631 : }
# 1673 : :
# 1674 : : /**
# 1675 : : * Check whether all of this transaction's input scripts succeed.
# 1676 : : *
# 1677 : : * This involves ECDSA signature checks so can be computationally intensive. This function should
# 1678 : : * only be called after the cheap sanity checks in CheckTxInputs passed.
# 1679 : : *
# 1680 : : * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any
# 1681 : : * script checks which are not necessary (eg due to script execution cache hits) are, obviously,
# 1682 : : * not pushed onto pvChecks/run.
# 1683 : : *
# 1684 : : * Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache
# 1685 : : * which are matched. This is useful for checking blocks where we will likely never need the cache
# 1686 : : * entry again.
# 1687 : : *
# 1688 : : * Note that we may set state.reason to NOT_STANDARD for extra soft-fork flags in flags, block-checking
# 1689 : : * callers should probably reset it to CONSENSUS in such cases.
# 1690 : : *
# 1691 : : * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
# 1692 : : */
# 1693 : : bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
# 1694 : : const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
# 1695 : : bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
# 1696 : : std::vector<CScriptCheck>* pvChecks)
# 1697 : 394655 : {
# 1698 [ - + ]: 394655 : if (tx.IsCoinBase()) return true;
# 1699 : :
# 1700 [ + + ]: 394655 : if (pvChecks) {
# 1701 : 193647 : pvChecks->reserve(tx.vin.size());
# 1702 : 193647 : }
# 1703 : :
# 1704 : : // First check if script executions have been cached with the same
# 1705 : : // flags. Note that this assumes that the inputs provided are
# 1706 : : // correct (ie that the transaction hash which is in tx's prevouts
# 1707 : : // properly commits to the scriptPubKey in the inputs view of that
# 1708 : : // transaction).
# 1709 : 394655 : uint256 hashCacheEntry;
# 1710 : 394655 : CSHA256 hasher = g_scriptExecutionCacheHasher;
# 1711 : 394655 : hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
# 1712 : 394655 : AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
# 1713 [ + + ]: 394655 : if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
# 1714 : 98874 : return true;
# 1715 : 98874 : }
# 1716 : :
# 1717 [ + + ]: 295781 : if (!txdata.m_spent_outputs_ready) {
# 1718 : 63568 : std::vector<CTxOut> spent_outputs;
# 1719 : 63568 : spent_outputs.reserve(tx.vin.size());
# 1720 : :
# 1721 [ + + ]: 115366 : for (const auto& txin : tx.vin) {
# 1722 : 115366 : const COutPoint& prevout = txin.prevout;
# 1723 : 115366 : const Coin& coin = inputs.AccessCoin(prevout);
# 1724 : 115366 : assert(!coin.IsSpent());
# 1725 : 0 : spent_outputs.emplace_back(coin.out);
# 1726 : 115366 : }
# 1727 : 63568 : txdata.Init(tx, std::move(spent_outputs));
# 1728 : 63568 : }
# 1729 : 295781 : assert(txdata.m_spent_outputs.size() == tx.vin.size());
# 1730 : :
# 1731 [ + + ]: 597198 : for (unsigned int i = 0; i < tx.vin.size(); i++) {
# 1732 : :
# 1733 : : // We very carefully only pass in things to CScriptCheck which
# 1734 : : // are clearly committed to by tx' witness hash. This provides
# 1735 : : // a sanity check that our caching is not introducing consensus
# 1736 : : // failures through additional data in, eg, the coins being
# 1737 : : // spent being checked as a part of CScriptCheck.
# 1738 : :
# 1739 : : // Verify signature
# 1740 : 377696 : CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata);
# 1741 [ + + ]: 377696 : if (pvChecks) {
# 1742 : 112162 : pvChecks->push_back(CScriptCheck());
# 1743 : 112162 : check.swap(pvChecks->back());
# 1744 [ + + ]: 265534 : } else if (!check()) {
# 1745 [ + - ]: 76279 : if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
# 1746 : : // Check whether the failure was caused by a
# 1747 : : // non-mandatory script verification check, such as
# 1748 : : // non-standard DER encodings or non-null dummy
# 1749 : : // arguments; if so, ensure we return NOT_STANDARD
# 1750 : : // instead of CONSENSUS to avoid downstream users
# 1751 : : // splitting the network between upgraded and
# 1752 : : // non-upgraded nodes by banning CONSENSUS-failing
# 1753 : : // data providers.
# 1754 : 76279 : CScriptCheck check2(txdata.m_spent_outputs[i], tx, i,
# 1755 : 76279 : flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
# 1756 [ + + ]: 76279 : if (check2())
# 1757 : 58518 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
# 1758 : 76279 : }
# 1759 : : // MANDATORY flag failures correspond to
# 1760 : : // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
# 1761 : : // failures are the most serious case of validation
# 1762 : : // failures, we may need to consider using
# 1763 : : // RECENT_CONSENSUS_CHANGE for any script failure that
# 1764 : : // could be due to non-upgraded nodes which we may want to
# 1765 : : // support, to avoid splitting the network (but this
# 1766 : : // depends on the details of how net_processing handles
# 1767 : : // such errors).
# 1768 : 17761 : return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
# 1769 : 76279 : }
# 1770 : 377696 : }
# 1771 : :
# 1772 [ + + ][ + + ]: 219502 : if (cacheFullScriptStore && !pvChecks) {
# 1773 : : // We executed all of the provided scripts, and were told to
# 1774 : : // cache the result. Do so now.
# 1775 : 87195 : g_scriptExecutionCache.insert(hashCacheEntry);
# 1776 : 87195 : }
# 1777 : :
# 1778 : 219502 : return true;
# 1779 : 295781 : }
# 1780 : :
# 1781 : : bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage)
# 1782 : 1 : {
# 1783 : 1 : AbortNode(strMessage, userMessage);
# 1784 : 1 : return state.Error(strMessage);
# 1785 : 1 : }
# 1786 : :
# 1787 : : /**
# 1788 : : * Restore the UTXO in a Coin at a given COutPoint
# 1789 : : * @param undo The Coin to be restored.
# 1790 : : * @param view The coins view to which to apply the changes.
# 1791 : : * @param out The out point that corresponds to the tx input.
# 1792 : : * @return A DisconnectResult as an int
# 1793 : : */
# 1794 : : int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
# 1795 : 19694 : {
# 1796 : 19694 : bool fClean = true;
# 1797 : :
# 1798 [ + + ]: 19694 : if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
# 1799 : :
# 1800 [ - + ]: 19694 : if (undo.nHeight == 0) {
# 1801 : : // Missing undo metadata (height and coinbase). Older versions included this
# 1802 : : // information only in undo records for the last spend of a transactions'
# 1803 : : // outputs. This implies that it must be present for some other output of the same tx.
# 1804 : 0 : const Coin& alternate = AccessByTxid(view, out.hash);
# 1805 [ # # ]: 0 : if (!alternate.IsSpent()) {
# 1806 : 0 : undo.nHeight = alternate.nHeight;
# 1807 : 0 : undo.fCoinBase = alternate.fCoinBase;
# 1808 : 0 : } else {
# 1809 : 0 : return DISCONNECT_FAILED; // adding output for transaction without known metadata
# 1810 : 0 : }
# 1811 : 0 : }
# 1812 : : // If the coin already exists as an unspent coin in the cache, then the
# 1813 : : // possible_overwrite parameter to AddCoin must be set to true. We have
# 1814 : : // already checked whether an unspent coin exists above using HaveCoin, so
# 1815 : : // we don't need to guess. When fClean is false, an unspent coin already
# 1816 : : // existed and it is an overwrite.
# 1817 : 19694 : view.AddCoin(out, std::move(undo), !fClean);
# 1818 : :
# 1819 [ + + ]: 19694 : return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
# 1820 : 19694 : }
# 1821 : :
# 1822 : : /** Undo the effects of this block (with given index) on the UTXO set represented by coins.
# 1823 : : * When FAILED is returned, view is left in an indeterminate state. */
# 1824 : : DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
# 1825 : 6622 : {
# 1826 : 6622 : AssertLockHeld(::cs_main);
# 1827 : 6622 : bool fClean = true;
# 1828 : :
# 1829 : 6622 : CBlockUndo blockUndo;
# 1830 [ + + ]: 6622 : if (!UndoReadFromDisk(blockUndo, pindex)) {
# 1831 : 1 : error("DisconnectBlock(): failure reading undo data");
# 1832 : 1 : return DISCONNECT_FAILED;
# 1833 : 1 : }
# 1834 : :
# 1835 [ - + ]: 6621 : if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
# 1836 : 0 : error("DisconnectBlock(): block and undo data inconsistent");
# 1837 : 0 : return DISCONNECT_FAILED;
# 1838 : 0 : }
# 1839 : :
# 1840 : : // undo transactions in reverse order
# 1841 [ + + ]: 21668 : for (int i = block.vtx.size() - 1; i >= 0; i--) {
# 1842 : 15047 : const CTransaction &tx = *(block.vtx[i]);
# 1843 : 15047 : uint256 hash = tx.GetHash();
# 1844 : 15047 : bool is_coinbase = tx.IsCoinBase();
# 1845 : :
# 1846 : : // Check that all outputs are available and match the outputs in the block itself
# 1847 : : // exactly.
# 1848 [ + + ]: 46992 : for (size_t o = 0; o < tx.vout.size(); o++) {
# 1849 [ + + ]: 31945 : if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
# 1850 : 21075 : COutPoint out(hash, o);
# 1851 : 21075 : Coin coin;
# 1852 : 21075 : bool is_spent = view.SpendCoin(out, &coin);
# 1853 [ - + ][ - + ]: 21075 : if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
# [ - + ][ - + ]
# 1854 : 0 : fClean = false; // transaction output mismatch
# 1855 : 0 : }
# 1856 : 21075 : }
# 1857 : 31945 : }
# 1858 : :
# 1859 : : // restore inputs
# 1860 [ + + ]: 15047 : if (i > 0) { // not coinbases
# 1861 : 8426 : CTxUndo &txundo = blockUndo.vtxundo[i-1];
# 1862 [ - + ]: 8426 : if (txundo.vprevout.size() != tx.vin.size()) {
# 1863 : 0 : error("DisconnectBlock(): transaction and undo data inconsistent");
# 1864 : 0 : return DISCONNECT_FAILED;
# 1865 : 0 : }
# 1866 [ + + ]: 24620 : for (unsigned int j = tx.vin.size(); j > 0;) {
# 1867 : 16194 : --j;
# 1868 : 16194 : const COutPoint& out = tx.vin[j].prevout;
# 1869 : 16194 : int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
# 1870 [ - + ]: 16194 : if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
# 1871 [ + - ][ + - ]: 16194 : fClean = fClean && res != DISCONNECT_UNCLEAN;
# 1872 : 16194 : }
# 1873 : : // At this point, all of txundo.vprevout should have been moved out.
# 1874 : 8426 : }
# 1875 : 15047 : }
# 1876 : :
# 1877 : : // move best block pointer to prevout block
# 1878 : 6621 : view.SetBestBlock(pindex->pprev->GetBlockHash());
# 1879 : :
# 1880 [ + - ]: 6621 : return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
# 1881 : 6621 : }
# 1882 : :
# 1883 : : static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
# 1884 : :
# 1885 : : void StartScriptCheckWorkerThreads(int threads_num)
# 1886 : 988 : {
# 1887 : 988 : scriptcheckqueue.StartWorkerThreads(threads_num);
# 1888 : 988 : }
# 1889 : :
# 1890 : : void StopScriptCheckWorkerThreads()
# 1891 : 998 : {
# 1892 : 998 : scriptcheckqueue.StopWorkerThreads();
# 1893 : 998 : }
# 1894 : :
# 1895 : : /**
# 1896 : : * Threshold condition checker that triggers when unknown versionbits are seen on the network.
# 1897 : : */
# 1898 : : class WarningBitsConditionChecker : public AbstractThresholdConditionChecker
# 1899 : : {
# 1900 : : private:
# 1901 : : int bit;
# 1902 : :
# 1903 : : public:
# 1904 : 1720251 : explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
# 1905 : :
# 1906 : 1720251 : int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
# 1907 : 1720251 : int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
# 1908 : 1720251 : int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
# 1909 : 1720251 : int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
# 1910 : :
# 1911 : : bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
# 1912 : 780624 : {
# 1913 [ + - ]: 780624 : return pindex->nHeight >= params.MinBIP9WarningHeight &&
# 1914 [ + + ]: 780624 : ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
# 1915 [ + + ]: 780624 : ((pindex->nVersion >> bit) & 1) != 0 &&
# 1916 [ + + ]: 780624 : ((g_versionbitscache.ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
# 1917 : 780624 : }
# 1918 : : };
# 1919 : :
# 1920 : : static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main);
# 1921 : :
# 1922 : : static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const Consensus::Params& consensusparams)
# 1923 : 119155 : {
# 1924 : : // BIP16 didn't become active until Apr 1 2012 (on mainnet, and
# 1925 : : // retroactively applied to testnet)
# 1926 : : // However, only one historical block violated the P2SH rules (on both
# 1927 : : // mainnet and testnet).
# 1928 : : // Similarly, only one historical block violated the TAPROOT rules on
# 1929 : : // mainnet.
# 1930 : : // For simplicity, always leave P2SH+WITNESS+TAPROOT on except for the two
# 1931 : : // violating blocks.
# 1932 : 119155 : uint32_t flags{SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_TAPROOT};
# 1933 : 119155 : const auto it{consensusparams.script_flag_exceptions.find(*Assert(block_index.phashBlock))};
# 1934 [ - + ]: 119155 : if (it != consensusparams.script_flag_exceptions.end()) {
# 1935 : 0 : flags = it->second;
# 1936 : 0 : }
# 1937 : :
# 1938 : : // Enforce the DERSIG (BIP66) rule
# 1939 [ + + ]: 119155 : if (DeploymentActiveAt(block_index, consensusparams, Consensus::DEPLOYMENT_DERSIG)) {
# 1940 : 117873 : flags |= SCRIPT_VERIFY_DERSIG;
# 1941 : 117873 : }
# 1942 : :
# 1943 : : // Enforce CHECKLOCKTIMEVERIFY (BIP65)
# 1944 [ + + ]: 119155 : if (DeploymentActiveAt(block_index, consensusparams, Consensus::DEPLOYMENT_CLTV)) {
# 1945 : 118669 : flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
# 1946 : 118669 : }
# 1947 : :
# 1948 : : // Enforce CHECKSEQUENCEVERIFY (BIP112)
# 1949 [ + + ]: 119155 : if (DeploymentActiveAt(block_index, consensusparams, Consensus::DEPLOYMENT_CSV)) {
# 1950 : 117069 : flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
# 1951 : 117069 : }
# 1952 : :
# 1953 : : // Enforce BIP147 NULLDUMMY (activated simultaneously with segwit)
# 1954 [ + + ]: 119155 : if (DeploymentActiveAt(block_index, consensusparams, Consensus::DEPLOYMENT_SEGWIT)) {
# 1955 : 116510 : flags |= SCRIPT_VERIFY_NULLDUMMY;
# 1956 : 116510 : }
# 1957 : :
# 1958 : 119155 : return flags;
# 1959 : 119155 : }
# 1960 : :
# 1961 : :
# 1962 : : static int64_t nTimeCheck = 0;
# 1963 : : static int64_t nTimeForks = 0;
# 1964 : : static int64_t nTimeConnect = 0;
# 1965 : : static int64_t nTimeVerify = 0;
# 1966 : : static int64_t nTimeUndo = 0;
# 1967 : : static int64_t nTimeIndex = 0;
# 1968 : : static int64_t nTimeTotal = 0;
# 1969 : : static int64_t nBlocksTotal = 0;
# 1970 : :
# 1971 : : /** Apply the effects of this block (with given index) on the UTXO set represented by coins.
# 1972 : : * Validity checks that depend on the UTXO set are also done; ConnectBlock()
# 1973 : : * can fail if those validity checks fail (among other reasons). */
# 1974 : : bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex,
# 1975 : : CCoinsViewCache& view, bool fJustCheck)
# 1976 : 94450 : {
# 1977 : 94450 : AssertLockHeld(cs_main);
# 1978 : 94450 : assert(pindex);
# 1979 : :
# 1980 : 0 : uint256 block_hash{block.GetHash()};
# 1981 : 94450 : assert(*pindex->phashBlock == block_hash);
# 1982 : :
# 1983 : 0 : int64_t nTimeStart = GetTimeMicros();
# 1984 : :
# 1985 : : // Check it again in case a previous version let a bad block in
# 1986 : : // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
# 1987 : : // ContextualCheckBlockHeader() here. This means that if we add a new
# 1988 : : // consensus rule that is enforced in one of those two functions, then we
# 1989 : : // may have let in a block that violates the rule prior to updating the
# 1990 : : // software, and we would NOT be enforcing the rule here. Fully solving
# 1991 : : // upgrade from one software version to the next after a consensus rule
# 1992 : : // change is potentially tricky and issue-specific (see NeedsRedownload()
# 1993 : : // for one approach that was used for BIP 141 deployment).
# 1994 : : // Also, currently the rule against blocks more than 2 hours in the future
# 1995 : : // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
# 1996 : : // re-enforce that rule here (at least until we make it impossible for
# 1997 : : // GetAdjustedTime() to go backward).
# 1998 [ - + ]: 94450 : if (!CheckBlock(block, state, m_params.GetConsensus(), !fJustCheck, !fJustCheck)) {
# 1999 [ # # ]: 0 : if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
# 2000 : : // We don't write down blocks to disk if they may have been
# 2001 : : // corrupted, so this should be impossible unless we're having hardware
# 2002 : : // problems.
# 2003 : 0 : return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
# 2004 : 0 : }
# 2005 : 0 : return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
# 2006 : 0 : }
# 2007 : :
# 2008 : : // verify that the view's current state corresponds to the previous block
# 2009 [ + + ]: 94450 : uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
# 2010 : 94450 : assert(hashPrevBlock == view.GetBestBlock());
# 2011 : :
# 2012 : 0 : nBlocksTotal++;
# 2013 : :
# 2014 : : // Special case for the genesis block, skipping connection of its transactions
# 2015 : : // (its coinbase is unspendable)
# 2016 [ + + ]: 94450 : if (block_hash == m_params.GetConsensus().hashGenesisBlock) {
# 2017 [ + - ]: 470 : if (!fJustCheck)
# 2018 : 470 : view.SetBestBlock(pindex->GetBlockHash());
# 2019 : 470 : return true;
# 2020 : 470 : }
# 2021 : :
# 2022 : 93980 : bool fScriptChecks = true;
# 2023 [ + + ]: 93980 : if (!hashAssumeValid.IsNull()) {
# 2024 : : // We've been configured with the hash of a block which has been externally verified to have a valid history.
# 2025 : : // A suitable default value is included with the software and updated from time to time. Because validity
# 2026 : : // relative to a piece of software is an objective fact these defaults can be easily reviewed.
# 2027 : : // This setting doesn't force the selection of any particular chain but makes validating some faster by
# 2028 : : // effectively caching the result of part of the verification.
# 2029 : 2568 : BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
# 2030 [ + + ]: 2568 : if (it != m_blockman.m_block_index.end()) {
# 2031 [ + + ]: 2304 : if (it->second.GetAncestor(pindex->nHeight) == pindex &&
# 2032 [ + - ]: 2304 : pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
# 2033 [ + - ]: 2304 : pindexBestHeader->nChainWork >= nMinimumChainWork) {
# 2034 : : // This block is a member of the assumed verified chain and an ancestor of the best header.
# 2035 : : // Script verification is skipped when connecting blocks under the
# 2036 : : // assumevalid block. Assuming the assumevalid block is valid this
# 2037 : : // is safe because block merkle hashes are still computed and checked,
# 2038 : : // Of course, if an assumed valid block is invalid due to false scriptSigs
# 2039 : : // this optimization would allow an invalid chain to be accepted.
# 2040 : : // The equivalent time check discourages hash power from extorting the network via DOS attack
# 2041 : : // into accepting an invalid block through telling users they must manually set assumevalid.
# 2042 : : // Requiring a software change or burying the invalid block, regardless of the setting, makes
# 2043 : : // it hard to hide the implication of the demand. This also avoids having release candidates
# 2044 : : // that are hardly doing any signature verification at all in testing without having to
# 2045 : : // artificially set the default assumed verified block further back.
# 2046 : : // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
# 2047 : : // least as good as the expected chain.
# 2048 : 204 : fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, m_params.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
# 2049 : 204 : }
# 2050 : 2304 : }
# 2051 : 2568 : }
# 2052 : :
# 2053 : 93980 : int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
# 2054 [ + - ]: 93980 : LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
# 2055 : :
# 2056 : : // Do not allow blocks that contain transactions which 'overwrite' older transactions,
# 2057 : : // unless those are already completely spent.
# 2058 : : // If such overwrites are allowed, coinbases and transactions depending upon those
# 2059 : : // can be duplicated to remove the ability to spend the first instance -- even after
# 2060 : : // being sent to another address.
# 2061 : : // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
# 2062 : : // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
# 2063 : : // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
# 2064 : : // two in the chain that violate it. This prevents exploiting the issue against nodes during their
# 2065 : : // initial block download.
# 2066 [ - + ][ # # ]: 93980 : bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
# 2067 [ - + ][ # # ]: 93980 : (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
# 2068 : :
# 2069 : : // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
# 2070 : : // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
# 2071 : : // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
# 2072 : : // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
# 2073 : : // duplicate transactions descending from the known pairs either.
# 2074 : : // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
# 2075 : :
# 2076 : : // BIP34 requires that a block at height X (block X) has its coinbase
# 2077 : : // scriptSig start with a CScriptNum of X (indicated height X). The above
# 2078 : : // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
# 2079 : : // case that there is a block X before the BIP34 height of 227,931 which has
# 2080 : : // an indicated height Y where Y is greater than X. The coinbase for block
# 2081 : : // X would also be a valid coinbase for block Y, which could be a BIP30
# 2082 : : // violation. An exhaustive search of all mainnet coinbases before the
# 2083 : : // BIP34 height which have an indicated height greater than the block height
# 2084 : : // reveals many occurrences. The 3 lowest indicated heights found are
# 2085 : : // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
# 2086 : : // heights would be the first opportunity for BIP30 to be violated.
# 2087 : :
# 2088 : : // The search reveals a great many blocks which have an indicated height
# 2089 : : // greater than 1,983,702, so we simply remove the optimization to skip
# 2090 : : // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
# 2091 : : // that block in another 25 years or so, we should take advantage of a
# 2092 : : // future consensus change to do a new and improved version of BIP34 that
# 2093 : : // will actually prevent ever creating any duplicate coinbases in the
# 2094 : : // future.
# 2095 : 93980 : static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
# 2096 : :
# 2097 : : // There is no potential to create a duplicate coinbase at block 209,921
# 2098 : : // because this is still before the BIP34 height and so explicit BIP30
# 2099 : : // checking is still active.
# 2100 : :
# 2101 : : // The final case is block 176,684 which has an indicated height of
# 2102 : : // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
# 2103 : : // before block 490,897 so there was not much opportunity to address this
# 2104 : : // case other than to carefully analyze it and determine it would not be a
# 2105 : : // problem. Block 490,897 was, in fact, mined with a different coinbase than
# 2106 : : // block 176,684, but it is important to note that even if it hadn't been or
# 2107 : : // is remined on an alternate fork with a duplicate coinbase, we would still
# 2108 : : // not run into a BIP30 violation. This is because the coinbase for 176,684
# 2109 : : // is spent in block 185,956 in transaction
# 2110 : : // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
# 2111 : : // spending transaction can't be duplicated because it also spends coinbase
# 2112 : : // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
# 2113 : : // coinbase has an indicated height of over 4.2 billion, and wouldn't be
# 2114 : : // duplicatable until that height, and it's currently impossible to create a
# 2115 : : // chain that long. Nevertheless we may wish to consider a future soft fork
# 2116 : : // which retroactively prevents block 490,897 from creating a duplicate
# 2117 : : // coinbase. The two historical BIP30 violations often provide a confusing
# 2118 : : // edge case when manipulating the UTXO and it would be simpler not to have
# 2119 : : // another edge case to deal with.
# 2120 : :
# 2121 : : // testnet3 has no blocks before the BIP34 height with indicated heights
# 2122 : : // post BIP34 before approximately height 486,000,000. After block
# 2123 : : // 1,983,702 testnet3 starts doing unnecessary BIP30 checking again.
# 2124 : 93980 : assert(pindex->pprev);
# 2125 : 0 : CBlockIndex* pindexBIP34height = pindex->pprev->GetAncestor(m_params.GetConsensus().BIP34Height);
# 2126 : : //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
# 2127 [ + - ][ + + ]: 93980 : fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == m_params.GetConsensus().BIP34Hash));
# [ + - ]
# 2128 : :
# 2129 : : // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
# 2130 : : // consensus change that ensures coinbases at those heights cannot
# 2131 : : // duplicate earlier coinbases.
# 2132 [ + - ][ # # ]: 93980 : if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
# 2133 [ + + ]: 155485 : for (const auto& tx : block.vtx) {
# 2134 [ + + ]: 647461 : for (size_t o = 0; o < tx->vout.size(); o++) {
# 2135 [ + + ]: 491977 : if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
# 2136 : 1 : LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
# 2137 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
# 2138 : 1 : }
# 2139 : 491977 : }
# 2140 : 155485 : }
# 2141 : 93980 : }
# 2142 : :
# 2143 : : // Enforce BIP68 (sequence locks)
# 2144 : 93979 : int nLockTimeFlags = 0;
# 2145 [ + + ]: 93979 : if (DeploymentActiveAt(*pindex, m_params.GetConsensus(), Consensus::DEPLOYMENT_CSV)) {
# 2146 : 92467 : nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
# 2147 : 92467 : }
# 2148 : :
# 2149 : : // Get the script flags for this block
# 2150 : 93979 : unsigned int flags{GetBlockScriptFlags(*pindex, m_params.GetConsensus())};
# 2151 : :
# 2152 : 93979 : int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
# 2153 [ + - ]: 93979 : LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
# 2154 : :
# 2155 : 93979 : CBlockUndo blockundo;
# 2156 : :
# 2157 : : // Precomputed transaction data pointers must not be invalidated
# 2158 : : // until after `control` has run the script checks (potentially
# 2159 : : // in multiple threads). Preallocate the vector size so a new allocation
# 2160 : : // doesn't invalidate pointers into the vector, and keep txsdata in scope
# 2161 : : // for as long as `control`.
# 2162 [ + + ][ + + ]: 93979 : CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
# 2163 : 93979 : std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
# 2164 : :
# 2165 : 93979 : std::vector<int> prevheights;
# 2166 : 93979 : CAmount nFees = 0;
# 2167 : 93979 : int nInputs = 0;
# 2168 : 93979 : int64_t nSigOpsCost = 0;
# 2169 : 93979 : blockundo.vtxundo.reserve(block.vtx.size() - 1);
# 2170 [ + + ]: 245481 : for (unsigned int i = 0; i < block.vtx.size(); i++)
# 2171 : 155480 : {
# 2172 : 155480 : const CTransaction &tx = *(block.vtx[i]);
# 2173 : :
# 2174 : 155480 : nInputs += tx.vin.size();
# 2175 : :
# 2176 [ + + ]: 155480 : if (!tx.IsCoinBase())
# 2177 : 61501 : {
# 2178 : 61501 : CAmount txfee = 0;
# 2179 : 61501 : TxValidationState tx_state;
# 2180 [ + + ]: 61501 : if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
# 2181 : : // Any transaction validation failure in ConnectBlock is a block consensus failure
# 2182 : 30 : state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
# 2183 : 30 : tx_state.GetRejectReason(), tx_state.GetDebugMessage());
# 2184 : 30 : return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
# 2185 : 30 : }
# 2186 : 61471 : nFees += txfee;
# 2187 [ - + ]: 61471 : if (!MoneyRange(nFees)) {
# 2188 : 0 : LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
# 2189 : 0 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
# 2190 : 0 : }
# 2191 : :
# 2192 : : // Check that transaction is BIP68 final
# 2193 : : // BIP68 lock checks (as opposed to nLockTime checks) must
# 2194 : : // be in ConnectBlock because they require the UTXO set
# 2195 : 61471 : prevheights.resize(tx.vin.size());
# 2196 [ + + ]: 165040 : for (size_t j = 0; j < tx.vin.size(); j++) {
# 2197 : 103569 : prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
# 2198 : 103569 : }
# 2199 : :
# 2200 [ + + ]: 61471 : if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
# 2201 : 12 : LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
# 2202 : 12 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
# 2203 : 12 : }
# 2204 : 61471 : }
# 2205 : :
# 2206 : : // GetTransactionSigOpCost counts 3 types of sigops:
# 2207 : : // * legacy (always)
# 2208 : : // * p2sh (when P2SH enabled in flags and excludes coinbase)
# 2209 : : // * witness (when witness enabled in flags and excludes coinbase)
# 2210 : 155438 : nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
# 2211 [ + + ]: 155438 : if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
# 2212 : 4 : LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
# 2213 : 4 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
# 2214 : 4 : }
# 2215 : :
# 2216 [ + + ]: 155434 : if (!tx.IsCoinBase())
# 2217 : 61455 : {
# 2218 : 61455 : std::vector<CScriptCheck> vChecks;
# 2219 : 61455 : bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
# 2220 : 61455 : TxValidationState tx_state;
# 2221 [ + + ][ + + ]: 61455 : if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
# [ + + ]
# 2222 : : // Any transaction validation failure in ConnectBlock is a block consensus failure
# 2223 : 3932 : state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
# 2224 : 3932 : tx_state.GetRejectReason(), tx_state.GetDebugMessage());
# 2225 : 3932 : return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
# 2226 : 3932 : tx.GetHash().ToString(), state.ToString());
# 2227 : 3932 : }
# 2228 : 57523 : control.Add(vChecks);
# 2229 : 57523 : }
# 2230 : :
# 2231 : 151502 : CTxUndo undoDummy;
# 2232 [ + + ]: 151502 : if (i > 0) {
# 2233 : 57523 : blockundo.vtxundo.push_back(CTxUndo());
# 2234 : 57523 : }
# 2235 [ + + ]: 151502 : UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
# 2236 : 151502 : }
# 2237 : 90001 : int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
# 2238 [ + - ][ + + ]: 90001 : LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
# 2239 : :
# 2240 : 90001 : CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, m_params.GetConsensus());
# 2241 [ + + ]: 90001 : if (block.vtx[0]->GetValueOut() > blockReward) {
# 2242 : 5 : LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
# 2243 : 5 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
# 2244 : 5 : }
# 2245 : :
# 2246 [ + + ]: 89996 : if (!control.Wait()) {
# 2247 : 17 : LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
# 2248 : 17 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
# 2249 : 17 : }
# 2250 : 89979 : int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
# 2251 [ + - ][ + + ]: 89979 : LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
# 2252 : :
# 2253 [ + + ]: 89979 : if (fJustCheck)
# 2254 : 28236 : return true;
# 2255 : :
# 2256 [ - + ]: 61743 : if (!m_blockman.WriteUndoDataForBlock(blockundo, state, pindex, m_params)) {
# 2257 : 0 : return false;
# 2258 : 0 : }
# 2259 : :
# 2260 : 61743 : int64_t nTime5 = GetTimeMicros(); nTimeUndo += nTime5 - nTime4;
# 2261 [ + - ]: 61743 : LogPrint(BCLog::BENCH, " - Write undo data: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeUndo * MICRO, nTimeUndo * MILLI / nBlocksTotal);
# 2262 : :
# 2263 [ + + ]: 61743 : if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
# 2264 : 60074 : pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
# 2265 : 60074 : m_blockman.m_dirty_blockindex.insert(pindex);
# 2266 : 60074 : }
# 2267 : :
# 2268 : 61743 : assert(pindex->phashBlock);
# 2269 : : // add this block to the view's block chain
# 2270 : 0 : view.SetBestBlock(pindex->GetBlockHash());
# 2271 : :
# 2272 : 61743 : int64_t nTime6 = GetTimeMicros(); nTimeIndex += nTime6 - nTime5;
# 2273 [ + - ]: 61743 : LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
# 2274 : :
# 2275 : 61743 : TRACE6(validation, block_connected,
# 2276 : 61743 : block_hash.data(),
# 2277 : 61743 : pindex->nHeight,
# 2278 : 61743 : block.vtx.size(),
# 2279 : 61743 : nInputs,
# 2280 : 61743 : nSigOpsCost,
# 2281 : 61743 : nTime5 - nTimeStart // in microseconds (µs)
# 2282 : 61743 : );
# 2283 : :
# 2284 : 61743 : return true;
# 2285 : 61743 : }
# 2286 : :
# 2287 : : CoinsCacheSizeState CChainState::GetCoinsCacheSizeState()
# 2288 : 225803 : {
# 2289 : 225803 : AssertLockHeld(::cs_main);
# 2290 : 225803 : return this->GetCoinsCacheSizeState(
# 2291 : 225803 : m_coinstip_cache_size_bytes,
# 2292 : 225803 : gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
# 2293 : 225803 : }
# 2294 : :
# 2295 : : CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
# 2296 : : size_t max_coins_cache_size_bytes,
# 2297 : : size_t max_mempool_size_bytes)
# 2298 : 225805 : {
# 2299 : 225805 : AssertLockHeld(::cs_main);
# 2300 [ + + ]: 225805 : const int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
# 2301 : 225805 : int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
# 2302 : 225805 : int64_t nTotalSpace =
# 2303 : 225805 : max_coins_cache_size_bytes + std::max<int64_t>(int64_t(max_mempool_size_bytes) - nMempoolUsage, 0);
# 2304 : :
# 2305 : : //! No need to periodic flush if at least this much space still available.
# 2306 : 225805 : static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
# 2307 : 225805 : int64_t large_threshold =
# 2308 : 225805 : std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
# 2309 : :
# 2310 [ + + ]: 225805 : if (cacheSize > nTotalSpace) {
# 2311 : 1 : LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
# 2312 : 1 : return CoinsCacheSizeState::CRITICAL;
# 2313 [ - + ]: 225804 : } else if (cacheSize > large_threshold) {
# 2314 : 0 : return CoinsCacheSizeState::LARGE;
# 2315 : 0 : }
# 2316 : 225804 : return CoinsCacheSizeState::OK;
# 2317 : 225805 : }
# 2318 : :
# 2319 : : bool CChainState::FlushStateToDisk(
# 2320 : : BlockValidationState &state,
# 2321 : : FlushStateMode mode,
# 2322 : : int nManualPruneHeight)
# 2323 : 225803 : {
# 2324 : 225803 : LOCK(cs_main);
# 2325 : 225803 : assert(this->CanFlushToDisk());
# 2326 : 0 : static std::chrono::microseconds nLastWrite{0};
# 2327 : 225803 : static std::chrono::microseconds nLastFlush{0};
# 2328 : 225803 : std::set<int> setFilesToPrune;
# 2329 : 225803 : bool full_flush_completed = false;
# 2330 : :
# 2331 : 225803 : const size_t coins_count = CoinsTip().GetCacheSize();
# 2332 : 225803 : const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
# 2333 : :
# 2334 : 225803 : try {
# 2335 : 225803 : {
# 2336 : 225803 : bool fFlushForPrune = false;
# 2337 : 225803 : bool fDoFullFlush = false;
# 2338 : :
# 2339 : 225803 : CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
# 2340 : 225803 : LOCK(m_blockman.cs_LastBlockFile);
# 2341 [ + + ][ + + ]: 225803 : if (fPruneMode && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) {
# [ + + ][ + - ]
# 2342 : : // make sure we don't prune above the blockfilterindexes bestblocks
# 2343 : : // pruning is height-based
# 2344 : 59 : int last_prune = m_chain.Height(); // last height we can prune
# 2345 : 59 : ForEachBlockFilterIndex([&](BlockFilterIndex& index) {
# 2346 : 16 : last_prune = std::max(1, std::min(last_prune, index.GetSummary().best_block_height));
# 2347 : 16 : });
# 2348 : :
# 2349 [ + + ]: 59 : if (nManualPruneHeight > 0) {
# 2350 : 3 : LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
# 2351 : :
# 2352 : 3 : m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height());
# 2353 : 56 : } else {
# 2354 : 56 : LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
# 2355 : :
# 2356 : 56 : m_blockman.FindFilesToPrune(setFilesToPrune, m_params.PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload());
# 2357 : 56 : m_blockman.m_check_for_pruning = false;
# 2358 : 56 : }
# 2359 [ + + ]: 59 : if (!setFilesToPrune.empty()) {
# 2360 : 3 : fFlushForPrune = true;
# 2361 [ + + ]: 3 : if (!fHavePruned) {
# 2362 : 1 : m_blockman.m_block_tree_db->WriteFlag("prunedblockfiles", true);
# 2363 : 1 : fHavePruned = true;
# 2364 : 1 : }
# 2365 : 3 : }
# 2366 : 59 : }
# 2367 : 225803 : const auto nNow = GetTime<std::chrono::microseconds>();
# 2368 : : // Avoid writing/flushing immediately after startup.
# 2369 [ + + ]: 225803 : if (nLastWrite.count() == 0) {
# 2370 : 735 : nLastWrite = nNow;
# 2371 : 735 : }
# 2372 [ + + ]: 225803 : if (nLastFlush.count() == 0) {
# 2373 : 735 : nLastFlush = nNow;
# 2374 : 735 : }
# 2375 : : // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
# 2376 [ + + ][ - + ]: 225803 : bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
# 2377 : : // The cache is over the limit, we have to write now.
# 2378 [ + + ][ - + ]: 225803 : bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
# 2379 : : // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
# 2380 [ + + ][ + + ]: 225803 : bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
# 2381 : : // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
# 2382 [ + + ][ + + ]: 225803 : bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
# 2383 : : // Combine all conditions that result in a full cache flush.
# 2384 [ + + ][ - + ]: 225803 : fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
# [ - + ][ + + ]
# [ + + ]
# 2385 : : // Write blocks and block index to disk.
# 2386 [ + + ][ + + ]: 225803 : if (fDoFullFlush || fPeriodicWrite) {
# 2387 : : // Ensure we can write block index
# 2388 [ - + ]: 1623 : if (!CheckDiskSpace(gArgs.GetBlocksDirPath())) {
# 2389 : 0 : return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
# 2390 : 0 : }
# 2391 : 1623 : {
# 2392 : 1623 : LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
# 2393 : :
# 2394 : : // First make sure all block and undo data is flushed to disk.
# 2395 : 1623 : m_blockman.FlushBlockFile();
# 2396 : 1623 : }
# 2397 : :
# 2398 : : // Then update all block file information (which may refer to block and undo files).
# 2399 : 1623 : {
# 2400 : 1623 : LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
# 2401 : :
# 2402 [ - + ]: 1623 : if (!m_blockman.WriteBlockIndexDB()) {
# 2403 : 0 : return AbortNode(state, "Failed to write to block index database");
# 2404 : 0 : }
# 2405 : 1623 : }
# 2406 : : // Finally remove any pruned files
# 2407 [ + + ]: 1623 : if (fFlushForPrune) {
# 2408 : 3 : LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
# 2409 : :
# 2410 : 3 : UnlinkPrunedFiles(setFilesToPrune);
# 2411 : 3 : }
# 2412 : 1623 : nLastWrite = nNow;
# 2413 : 1623 : }
# 2414 : : // Flush best chain related state. This can only be done if the blocks / block index write was also done.
# 2415 [ + + ][ + + ]: 225803 : if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
# [ + - ]
# 2416 : 1613 : LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fkB)",
# 2417 : 1613 : coins_count, coins_mem_usage / 1000), BCLog::BENCH);
# 2418 : :
# 2419 : : // Typical Coin structures on disk are around 48 bytes in size.
# 2420 : : // Pushing a new one to the database can cause it to be written
# 2421 : : // twice (once in the log, and once in the tables). This is already
# 2422 : : // an overestimation, as most will delete an existing entry or
# 2423 : : // overwrite one. Still, use a conservative safety factor of 2.
# 2424 [ - + ]: 1613 : if (!CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
# 2425 : 0 : return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
# 2426 : 0 : }
# 2427 : : // Flush the chainstate (which may refer to block index entries).
# 2428 [ - + ]: 1613 : if (!CoinsTip().Flush())
# 2429 : 0 : return AbortNode(state, "Failed to write to coin database");
# 2430 : 1613 : nLastFlush = nNow;
# 2431 : 1613 : full_flush_completed = true;
# 2432 : 1613 : TRACE5(utxocache, flush,
# 2433 : 1613 : (int64_t)(GetTimeMicros() - nNow.count()), // in microseconds (µs)
# 2434 : 1613 : (u_int32_t)mode,
# 2435 : 1613 : (u_int64_t)coins_count,
# 2436 : 1613 : (u_int64_t)coins_mem_usage,
# 2437 : 1613 : (bool)fFlushForPrune);
# 2438 : 1613 : }
# 2439 : 225803 : }
# 2440 [ + + ]: 225803 : if (full_flush_completed) {
# 2441 : : // Update best block in wallet (so we can detect restored wallets).
# 2442 : 1613 : GetMainSignals().ChainStateFlushed(m_chain.GetLocator());
# 2443 : 1613 : }
# 2444 : 225803 : } catch (const std::runtime_error& e) {
# 2445 : 0 : return AbortNode(state, std::string("System error while flushing: ") + e.what());
# 2446 : 0 : }
# 2447 : 225803 : return true;
# 2448 : 225803 : }
# 2449 : :
# 2450 : : void CChainState::ForceFlushStateToDisk()
# 2451 : 1571 : {
# 2452 : 1571 : BlockValidationState state;
# 2453 [ - + ]: 1571 : if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
# 2454 : 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
# 2455 : 0 : }
# 2456 : 1571 : }
# 2457 : :
# 2458 : : void CChainState::PruneAndFlush()
# 2459 : 9 : {
# 2460 : 9 : BlockValidationState state;
# 2461 : 9 : m_blockman.m_check_for_pruning = true;
# 2462 [ - + ]: 9 : if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
# 2463 : 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
# 2464 : 0 : }
# 2465 : 9 : }
# 2466 : :
# 2467 : : static void DoWarning(const bilingual_str& warning)
# 2468 : 4 : {
# 2469 : 4 : static bool fWarned = false;
# 2470 : 4 : SetMiscWarning(warning);
# 2471 [ + + ]: 4 : if (!fWarned) {
# 2472 : 2 : AlertNotify(warning.original);
# 2473 : 2 : fWarned = true;
# 2474 : 2 : }
# 2475 : 4 : }
# 2476 : :
# 2477 : : /** Private helper function that concatenates warning messages. */
# 2478 : : static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
# 2479 : 144 : {
# 2480 [ - + ]: 144 : if (!res.empty()) res += Untranslated(", ");
# 2481 : 144 : res += warn;
# 2482 : 144 : }
# 2483 : :
# 2484 : : static void UpdateTipLog(
# 2485 : : const CCoinsViewCache& coins_tip,
# 2486 : : const CBlockIndex* tip,
# 2487 : : const CChainParams& params,
# 2488 : : const std::string& func_name,
# 2489 : : const std::string& prefix,
# 2490 : : const std::string& warning_messages) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
# 2491 : 65796 : {
# 2492 : :
# 2493 : 65796 : AssertLockHeld(::cs_main);
# 2494 [ + + ]: 65796 : LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n",
# 2495 : 65796 : prefix, func_name,
# 2496 : 65796 : tip->GetBlockHash().ToString(), tip->nHeight, tip->nVersion,
# 2497 : 65796 : log(tip->nChainWork.getdouble()) / log(2.0), (unsigned long)tip->nChainTx,
# 2498 : 65796 : FormatISO8601DateTime(tip->GetBlockTime()),
# 2499 : 65796 : GuessVerificationProgress(params.TxData(), tip),
# 2500 : 65796 : coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
# 2501 : 65796 : coins_tip.GetCacheSize(),
# 2502 : 65796 : !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : "");
# 2503 : 65796 : }
# 2504 : :
# 2505 : : void CChainState::UpdateTip(const CBlockIndex* pindexNew)
# 2506 : 65797 : {
# 2507 : 65797 : AssertLockHeld(::cs_main);
# 2508 : 65797 : const auto& coins_tip = this->CoinsTip();
# 2509 : :
# 2510 : : // The remainder of the function isn't relevant if we are not acting on
# 2511 : : // the active chainstate, so return if need be.
# 2512 [ + + ]: 65797 : if (this != &m_chainman.ActiveChainstate()) {
# 2513 : : // Only log every so often so that we don't bury log messages at the tip.
# 2514 : 1 : constexpr int BACKGROUND_LOG_INTERVAL = 2000;
# 2515 [ - + ]: 1 : if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
# 2516 : 0 : UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "[background validation] ", "");
# 2517 : 0 : }
# 2518 : 1 : return;
# 2519 : 1 : }
# 2520 : :
# 2521 : : // New best block
# 2522 [ + + ]: 65796 : if (m_mempool) {
# 2523 : 65695 : m_mempool->AddTransactionsUpdated(1);
# 2524 : 65695 : }
# 2525 : :
# 2526 : 65796 : {
# 2527 : 65796 : LOCK(g_best_block_mutex);
# 2528 : 65796 : g_best_block = pindexNew->GetBlockHash();
# 2529 : 65796 : g_best_block_cv.notify_all();
# 2530 : 65796 : }
# 2531 : :
# 2532 : 65796 : bilingual_str warning_messages;
# 2533 [ + + ]: 65796 : if (!this->IsInitialBlockDownload()) {
# 2534 : 59319 : const CBlockIndex* pindex = pindexNew;
# 2535 [ + + ]: 1779570 : for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
# 2536 : 1720251 : WarningBitsConditionChecker checker(bit);
# 2537 : 1720251 : ThresholdState state = checker.GetStateFor(pindex, m_params.GetConsensus(), warningcache[bit]);
# 2538 [ + + ][ + + ]: 1720251 : if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
# 2539 : 148 : const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
# 2540 [ + + ]: 148 : if (state == ThresholdState::ACTIVE) {
# 2541 : 4 : DoWarning(warning);
# 2542 : 144 : } else {
# 2543 : 144 : AppendWarning(warning_messages, warning);
# 2544 : 144 : }
# 2545 : 148 : }
# 2546 : 1720251 : }
# 2547 : 59319 : }
# 2548 : 65796 : UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "", warning_messages.original);
# 2549 : 65796 : }
# 2550 : :
# 2551 : : /** Disconnect m_chain's tip.
# 2552 : : * After calling, the mempool will be in an inconsistent state, with
# 2553 : : * transactions from disconnected blocks being added to disconnectpool. You
# 2554 : : * should make the mempool consistent again by calling MaybeUpdateMempoolForReorg.
# 2555 : : * with cs_main held.
# 2556 : : *
# 2557 : : * If disconnectpool is nullptr, then no disconnected transactions are added to
# 2558 : : * disconnectpool (note that the caller is responsible for mempool consistency
# 2559 : : * in any case).
# 2560 : : */
# 2561 : : bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTransactions* disconnectpool)
# 2562 : 3874 : {
# 2563 : 3874 : AssertLockHeld(cs_main);
# 2564 [ + - ]: 3874 : if (m_mempool) AssertLockHeld(m_mempool->cs);
# 2565 : :
# 2566 : 3874 : CBlockIndex *pindexDelete = m_chain.Tip();
# 2567 : 3874 : assert(pindexDelete);
# 2568 : : // Read block from disk.
# 2569 : 0 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
# 2570 : 3874 : CBlock& block = *pblock;
# 2571 [ - + ]: 3874 : if (!ReadBlockFromDisk(block, pindexDelete, m_params.GetConsensus())) {
# 2572 : 0 : return error("DisconnectTip(): Failed to read block");
# 2573 : 0 : }
# 2574 : : // Apply the block atomically to the chain state.
# 2575 : 3874 : int64_t nStart = GetTimeMicros();
# 2576 : 3874 : {
# 2577 : 3874 : CCoinsViewCache view(&CoinsTip());
# 2578 : 3874 : assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
# 2579 [ + + ]: 3874 : if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
# 2580 : 1 : return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
# 2581 : 3873 : bool flushed = view.Flush();
# 2582 : 3873 : assert(flushed);
# 2583 : 3873 : }
# 2584 [ + - ]: 3873 : LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
# 2585 : : // Write the chain state to disk, if necessary.
# 2586 [ - + ]: 3873 : if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
# 2587 : 0 : return false;
# 2588 : 0 : }
# 2589 : :
# 2590 [ + - ][ + - ]: 3873 : if (disconnectpool && m_mempool) {
# 2591 : : // Save transactions to re-add to mempool at end of reorg
# 2592 [ + + ]: 13937 : for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
# 2593 : 10064 : disconnectpool->addTransaction(*it);
# 2594 : 10064 : }
# 2595 [ + + ]: 7078 : while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
# 2596 : : // Drop the earliest entry, and remove its children from the mempool.
# 2597 : 3205 : auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
# 2598 : 3205 : m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG);
# 2599 : 3205 : disconnectpool->removeEntry(it);
# 2600 : 3205 : }
# 2601 : 3873 : }
# 2602 : :
# 2603 : 3873 : m_chain.SetTip(pindexDelete->pprev);
# 2604 : :
# 2605 : 3873 : UpdateTip(pindexDelete->pprev);
# 2606 : : // Let wallets know transactions went from 1-confirmed to
# 2607 : : // 0-confirmed or conflicted:
# 2608 : 3873 : GetMainSignals().BlockDisconnected(pblock, pindexDelete);
# 2609 : 3873 : return true;
# 2610 : 3873 : }
# 2611 : :
# 2612 : : static int64_t nTimeReadFromDiskTotal = 0;
# 2613 : : static int64_t nTimeConnectTotal = 0;
# 2614 : : static int64_t nTimeFlush = 0;
# 2615 : : static int64_t nTimeChainState = 0;
# 2616 : : static int64_t nTimePostConnect = 0;
# 2617 : :
# 2618 : : struct PerBlockConnectTrace {
# 2619 : : CBlockIndex* pindex = nullptr;
# 2620 : : std::shared_ptr<const CBlock> pblock;
# 2621 : 144003 : PerBlockConnectTrace() {}
# 2622 : : };
# 2623 : : /**
# 2624 : : * Used to track blocks whose transactions were applied to the UTXO state as a
# 2625 : : * part of a single ActivateBestChainStep call.
# 2626 : : *
# 2627 : : * This class is single-use, once you call GetBlocksConnected() you have to throw
# 2628 : : * it away and make a new one.
# 2629 : : */
# 2630 : : class ConnectTrace {
# 2631 : : private:
# 2632 : : std::vector<PerBlockConnectTrace> blocksConnected;
# 2633 : :
# 2634 : : public:
# 2635 : 82079 : explicit ConnectTrace() : blocksConnected(1) {}
# 2636 : :
# 2637 : 61924 : void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
# 2638 : 61924 : assert(!blocksConnected.back().pindex);
# 2639 : 0 : assert(pindex);
# 2640 : 0 : assert(pblock);
# 2641 : 0 : blocksConnected.back().pindex = pindex;
# 2642 : 61924 : blocksConnected.back().pblock = std::move(pblock);
# 2643 : 61924 : blocksConnected.emplace_back();
# 2644 : 61924 : }
# 2645 : :
# 2646 : 63011 : std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
# 2647 : : // We always keep one extra block at the end of our list because
# 2648 : : // blocks are added after all the conflicted transactions have
# 2649 : : // been filled in. Thus, the last entry should always be an empty
# 2650 : : // one waiting for the transactions from the next block. We pop
# 2651 : : // the last entry here to make sure the list we return is sane.
# 2652 : 63011 : assert(!blocksConnected.back().pindex);
# 2653 : 0 : blocksConnected.pop_back();
# 2654 : 63011 : return blocksConnected;
# 2655 : 63011 : }
# 2656 : : };
# 2657 : :
# 2658 : : /**
# 2659 : : * Connect a new block to m_chain. pblock is either nullptr or a pointer to a CBlock
# 2660 : : * corresponding to pindexNew, to bypass loading it again from disk.
# 2661 : : *
# 2662 : : * The block is added to connectTrace if connection succeeds.
# 2663 : : */
# 2664 : : bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool)
# 2665 : 65917 : {
# 2666 : 65917 : AssertLockHeld(cs_main);
# 2667 [ + + ]: 65917 : if (m_mempool) AssertLockHeld(m_mempool->cs);
# 2668 : :
# 2669 : 65917 : assert(pindexNew->pprev == m_chain.Tip());
# 2670 : : // Read block from disk.
# 2671 : 0 : int64_t nTime1 = GetTimeMicros();
# 2672 : 65917 : std::shared_ptr<const CBlock> pthisBlock;
# 2673 [ + + ]: 65917 : if (!pblock) {
# 2674 : 6063 : std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
# 2675 [ - + ]: 6063 : if (!ReadBlockFromDisk(*pblockNew, pindexNew, m_params.GetConsensus())) {
# 2676 : 0 : return AbortNode(state, "Failed to read block");
# 2677 : 0 : }
# 2678 : 6063 : pthisBlock = pblockNew;
# 2679 : 59854 : } else {
# 2680 [ + - ]: 59854 : LogPrint(BCLog::BENCH, " - Using cached block\n");
# 2681 : 59854 : pthisBlock = pblock;
# 2682 : 59854 : }
# 2683 : 65917 : const CBlock& blockConnecting = *pthisBlock;
# 2684 : : // Apply the block atomically to the chain state.
# 2685 : 65917 : int64_t nTime2 = GetTimeMicros(); nTimeReadFromDiskTotal += nTime2 - nTime1;
# 2686 : 65917 : int64_t nTime3;
# 2687 [ + - ]: 65917 : LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDiskTotal * MICRO, nTimeReadFromDiskTotal * MILLI / nBlocksTotal);
# 2688 : 65917 : {
# 2689 : 65917 : CCoinsViewCache view(&CoinsTip());
# 2690 : 65917 : bool rv = ConnectBlock(blockConnecting, state, pindexNew, view);
# 2691 : 65917 : GetMainSignals().BlockChecked(blockConnecting, state);
# 2692 [ + + ]: 65917 : if (!rv) {
# 2693 [ + - ]: 3993 : if (state.IsInvalid())
# 2694 : 3993 : InvalidBlockFound(pindexNew, state);
# 2695 : 3993 : return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
# 2696 : 3993 : }
# 2697 : 61924 : nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
# 2698 : 61924 : assert(nBlocksTotal > 0);
# 2699 [ + - ]: 61924 : LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
# 2700 : 61924 : bool flushed = view.Flush();
# 2701 : 61924 : assert(flushed);
# 2702 : 61924 : }
# 2703 : 0 : int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
# 2704 [ + - ]: 61924 : LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
# 2705 : : // Write the chain state to disk, if necessary.
# 2706 [ - + ]: 61924 : if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
# 2707 : 0 : return false;
# 2708 : 0 : }
# 2709 : 61924 : int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
# 2710 [ + - ]: 61924 : LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
# 2711 : : // Remove conflicting transactions from the mempool.;
# 2712 [ + + ]: 61924 : if (m_mempool) {
# 2713 : 61823 : m_mempool->removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
# 2714 : 61823 : disconnectpool.removeForBlock(blockConnecting.vtx);
# 2715 : 61823 : }
# 2716 : : // Update m_chain & related variables.
# 2717 : 61924 : m_chain.SetTip(pindexNew);
# 2718 : 61924 : UpdateTip(pindexNew);
# 2719 : :
# 2720 : 61924 : int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
# 2721 [ + - ]: 61924 : LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
# 2722 [ + - ]: 61924 : LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
# 2723 : :
# 2724 : 61924 : connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
# 2725 : 61924 : return true;
# 2726 : 61924 : }
# 2727 : :
# 2728 : : /**
# 2729 : : * Return the tip of the chain with the most work in it, that isn't
# 2730 : : * known to be invalid (it's however far from certain to be valid).
# 2731 : : */
# 2732 : : CBlockIndex* CChainState::FindMostWorkChain()
# 2733 : 79704 : {
# 2734 : 79704 : AssertLockHeld(::cs_main);
# 2735 : 79708 : do {
# 2736 : 79708 : CBlockIndex *pindexNew = nullptr;
# 2737 : :
# 2738 : : // Find the best candidate header.
# 2739 : 79708 : {
# 2740 : 79708 : std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
# 2741 [ - + ]: 79708 : if (it == setBlockIndexCandidates.rend())
# 2742 : 0 : return nullptr;
# 2743 : 79708 : pindexNew = *it;
# 2744 : 79708 : }
# 2745 : :
# 2746 : : // Check whether all blocks on the path between the currently active chain and the candidate are valid.
# 2747 : : // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
# 2748 : 0 : CBlockIndex *pindexTest = pindexNew;
# 2749 : 79708 : bool fInvalidAncestor = false;
# 2750 [ + + ][ + + ]: 145636 : while (pindexTest && !m_chain.Contains(pindexTest)) {
# 2751 : 65932 : assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
# 2752 : :
# 2753 : : // Pruned nodes may have entries in setBlockIndexCandidates for
# 2754 : : // which block files have been deleted. Remove those as candidates
# 2755 : : // for the most work chain if we come across them; we can't switch
# 2756 : : // to a chain unless we have all the non-active-chain parent blocks.
# 2757 : 0 : bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
# 2758 : 65932 : bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
# 2759 [ + + ][ - + ]: 65932 : if (fFailedChain || fMissingData) {
# 2760 : : // Candidate chain is not usable (either invalid or missing data)
# 2761 [ + - ][ - + ]: 4 : if (fFailedChain && (m_chainman.m_best_invalid == nullptr || pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork)) {
# [ - + ]
# 2762 : 0 : m_chainman.m_best_invalid = pindexNew;
# 2763 : 0 : }
# 2764 : 4 : CBlockIndex *pindexFailed = pindexNew;
# 2765 : : // Remove the entire chain from the set.
# 2766 [ + + ]: 8 : while (pindexTest != pindexFailed) {
# 2767 [ + - ]: 4 : if (fFailedChain) {
# 2768 : 4 : pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
# 2769 [ # # ]: 4 : } else if (fMissingData) {
# 2770 : : // If we're missing data, then add back to m_blocks_unlinked,
# 2771 : : // so that if the block arrives in the future we can try adding
# 2772 : : // to setBlockIndexCandidates again.
# 2773 : 0 : m_blockman.m_blocks_unlinked.insert(
# 2774 : 0 : std::make_pair(pindexFailed->pprev, pindexFailed));
# 2775 : 0 : }
# 2776 : 4 : setBlockIndexCandidates.erase(pindexFailed);
# 2777 : 4 : pindexFailed = pindexFailed->pprev;
# 2778 : 4 : }
# 2779 : 4 : setBlockIndexCandidates.erase(pindexTest);
# 2780 : 4 : fInvalidAncestor = true;
# 2781 : 4 : break;
# 2782 : 4 : }
# 2783 : 65928 : pindexTest = pindexTest->pprev;
# 2784 : 65928 : }
# 2785 [ + + ]: 79708 : if (!fInvalidAncestor)
# 2786 : 79704 : return pindexNew;
# 2787 : 79708 : } while(true);
# 2788 : 79704 : }
# 2789 : :
# 2790 : : /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
# 2791 : 62397 : void CChainState::PruneBlockIndexCandidates() {
# 2792 : : // Note that we can't delete the current block itself, as we may need to return to it later in case a
# 2793 : : // reorganization to a better block fails.
# 2794 : 62397 : std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
# 2795 [ + - ][ + + ]: 197990 : while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
# [ + + ]
# 2796 : 135593 : setBlockIndexCandidates.erase(it++);
# 2797 : 135593 : }
# 2798 : : // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
# 2799 : 62397 : assert(!setBlockIndexCandidates.empty());
# 2800 : 62397 : }
# 2801 : :
# 2802 : : /**
# 2803 : : * Try to make some progress towards making pindexMostWork the active block.
# 2804 : : * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork.
# 2805 : : *
# 2806 : : * @returns true unless a system error occurred
# 2807 : : */
# 2808 : : bool CChainState::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
# 2809 : 63012 : {
# 2810 : 63012 : AssertLockHeld(cs_main);
# 2811 [ + + ]: 63012 : if (m_mempool) AssertLockHeld(m_mempool->cs);
# 2812 : :
# 2813 : 63012 : const CBlockIndex* pindexOldTip = m_chain.Tip();
# 2814 : 63012 : const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork);
# 2815 : :
# 2816 : : // Disconnect active blocks which are no longer in the best chain.
# 2817 : 63012 : bool fBlocksDisconnected = false;
# 2818 : 63012 : DisconnectedBlockTransactions disconnectpool;
# 2819 [ + + ][ + + ]: 65933 : while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
# 2820 [ + + ]: 2922 : if (!DisconnectTip(state, &disconnectpool)) {
# 2821 : : // This is likely a fatal error, but keep the mempool consistent,
# 2822 : : // just in case. Only remove from the mempool in this case.
# 2823 : 1 : MaybeUpdateMempoolForReorg(disconnectpool, false);
# 2824 : :
# 2825 : : // If we're unable to disconnect a block during normal operation,
# 2826 : : // then that is a failure of our local system -- we should abort
# 2827 : : // rather than stay on a less work chain.
# 2828 : 1 : AbortNode(state, "Failed to disconnect block; see debug.log for details");
# 2829 : 1 : return false;
# 2830 : 1 : }
# 2831 : 2921 : fBlocksDisconnected = true;
# 2832 : 2921 : }
# 2833 : :
# 2834 : : // Build list of new blocks to connect (in descending height order).
# 2835 : 63011 : std::vector<CBlockIndex*> vpindexToConnect;
# 2836 : 63011 : bool fContinue = true;
# 2837 [ + + ]: 63011 : int nHeight = pindexFork ? pindexFork->nHeight : -1;
# 2838 [ + + ][ + + ]: 126103 : while (fContinue && nHeight != pindexMostWork->nHeight) {
# 2839 : : // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
# 2840 : : // a few blocks along the way.
# 2841 : 63092 : int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
# 2842 : 63092 : vpindexToConnect.clear();
# 2843 : 63092 : vpindexToConnect.reserve(nTargetHeight - nHeight);
# 2844 : 63092 : CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
# 2845 [ + + ][ + + ]: 185334 : while (pindexIter && pindexIter->nHeight != nHeight) {
# 2846 : 122242 : vpindexToConnect.push_back(pindexIter);
# 2847 : 122242 : pindexIter = pindexIter->pprev;
# 2848 : 122242 : }
# 2849 : 63092 : nHeight = nTargetHeight;
# 2850 : :
# 2851 : : // Connect new blocks.
# 2852 [ + + ]: 65917 : for (CBlockIndex* pindexConnect : reverse_iterate(vpindexToConnect)) {
# 2853 [ + + ][ + + ]: 65917 : if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
# 2854 [ + - ]: 3993 : if (state.IsInvalid()) {
# 2855 : : // The block violates a consensus rule.
# 2856 [ + - ]: 3993 : if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
# 2857 : 3993 : InvalidChainFound(vpindexToConnect.front());
# 2858 : 3993 : }
# 2859 : 3993 : state = BlockValidationState();
# 2860 : 3993 : fInvalidFound = true;
# 2861 : 3993 : fContinue = false;
# 2862 : 3993 : break;
# 2863 : 3993 : } else {
# 2864 : : // A system error occurred (disk space, database error, ...).
# 2865 : : // Make the mempool consistent with the current tip, just in case
# 2866 : : // any observers try to use it before shutdown.
# 2867 : 0 : MaybeUpdateMempoolForReorg(disconnectpool, false);
# 2868 : 0 : return false;
# 2869 : 0 : }
# 2870 : 61924 : } else {
# 2871 : 61924 : PruneBlockIndexCandidates();
# 2872 [ + + ][ + + ]: 61924 : if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
# 2873 : : // We're in a better position than we were. Return temporarily to release the lock.
# 2874 : 59006 : fContinue = false;
# 2875 : 59006 : break;
# 2876 : 59006 : }
# 2877 : 61924 : }
# 2878 : 65917 : }
# 2879 : 63092 : }
# 2880 : :
# 2881 [ + + ]: 63011 : if (fBlocksDisconnected) {
# 2882 : : // If any blocks were disconnected, disconnectpool may be non empty. Add
# 2883 : : // any disconnected transactions back to the mempool.
# 2884 : 83 : MaybeUpdateMempoolForReorg(disconnectpool, true);
# 2885 : 83 : }
# 2886 [ + + ]: 63011 : if (m_mempool) m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1);
# 2887 : :
# 2888 : 63011 : CheckForkWarningConditions();
# 2889 : :
# 2890 : 63011 : return true;
# 2891 : 63011 : }
# 2892 : :
# 2893 : : static SynchronizationState GetSynchronizationState(bool init)
# 2894 : 111757 : {
# 2895 [ + + ]: 111757 : if (!init) return SynchronizationState::POST_INIT;
# 2896 [ + + ]: 8376 : if (::fReindex) return SynchronizationState::INIT_REINDEX;
# 2897 : 7027 : return SynchronizationState::INIT_DOWNLOAD;
# 2898 : 8376 : }
# 2899 : :
# 2900 : 101430 : static bool NotifyHeaderTip(CChainState& chainstate) LOCKS_EXCLUDED(cs_main) {
# 2901 : 101430 : bool fNotify = false;
# 2902 : 101430 : bool fInitialBlockDownload = false;
# 2903 : 101430 : static CBlockIndex* pindexHeaderOld = nullptr;
# 2904 : 101430 : CBlockIndex* pindexHeader = nullptr;
# 2905 : 101430 : {
# 2906 : 101430 : LOCK(cs_main);
# 2907 : 101430 : pindexHeader = pindexBestHeader;
# 2908 : :
# 2909 [ + + ]: 101430 : if (pindexHeader != pindexHeaderOld) {
# 2910 : 52662 : fNotify = true;
# 2911 : 52662 : fInitialBlockDownload = chainstate.IsInitialBlockDownload();
# 2912 : 52662 : pindexHeaderOld = pindexHeader;
# 2913 : 52662 : }
# 2914 : 101430 : }
# 2915 : : // Send block tip changed notifications without cs_main
# 2916 [ + + ]: 101430 : if (fNotify) {
# 2917 : 52662 : uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader);
# 2918 : 52662 : }
# 2919 : 101430 : return fNotify;
# 2920 : 101430 : }
# 2921 : :
# 2922 : 83109 : static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
# 2923 : 83109 : AssertLockNotHeld(cs_main);
# 2924 : :
# 2925 [ + + ]: 83109 : if (GetMainSignals().CallbacksPending() > 10) {
# 2926 : 130 : SyncWithValidationInterfaceQueue();
# 2927 : 130 : }
# 2928 : 83109 : }
# 2929 : :
# 2930 : : bool CChainState::ActivateBestChain(BlockValidationState& state, std::shared_ptr<const CBlock> pblock)
# 2931 : 75711 : {
# 2932 : 75711 : AssertLockNotHeld(m_chainstate_mutex);
# 2933 : :
# 2934 : : // Note that while we're often called here from ProcessNewBlock, this is
# 2935 : : // far from a guarantee. Things in the P2P/RPC will often end up calling
# 2936 : : // us in the middle of ProcessNewBlock - do not assume pblock is set
# 2937 : : // sanely for performance or correctness!
# 2938 : 75711 : AssertLockNotHeld(::cs_main);
# 2939 : :
# 2940 : : // ABC maintains a fair degree of expensive-to-calculate internal state
# 2941 : : // because this function periodically releases cs_main so that it does not lock up other threads for too long
# 2942 : : // during large connects - and to allow for e.g. the callback queue to drain
# 2943 : : // we use m_chainstate_mutex to enforce mutual exclusion so that only one caller may execute this function at a time
# 2944 : 75711 : LOCK(m_chainstate_mutex);
# 2945 : :
# 2946 : 75711 : CBlockIndex *pindexMostWork = nullptr;
# 2947 : 75711 : CBlockIndex *pindexNewTip = nullptr;
# 2948 : 75711 : int nStopAtHeight = gArgs.GetIntArg("-stopatheight", DEFAULT_STOPATHEIGHT);
# 2949 : 82073 : do {
# 2950 : : // Block until the validation queue drains. This should largely
# 2951 : : // never happen in normal operation, however may happen during
# 2952 : : // reindex, causing memory blowup if we run too far ahead.
# 2953 : : // Note that if a validationinterface callback ends up calling
# 2954 : : // ActivateBestChain this may lead to a deadlock! We should
# 2955 : : // probably have a DEBUG_LOCKORDER test for this in the future.
# 2956 : 82073 : LimitValidationInterfaceQueue();
# 2957 : :
# 2958 : 82073 : {
# 2959 : 82073 : LOCK(cs_main);
# 2960 : : // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
# 2961 : 82073 : LOCK(MempoolMutex());
# 2962 : 82073 : CBlockIndex* starting_tip = m_chain.Tip();
# 2963 : 82073 : bool blocks_connected = false;
# 2964 : 82079 : do {
# 2965 : : // We absolutely may not unlock cs_main until we've made forward progress
# 2966 : : // (with the exception of shutdown due to hardware issues, low disk space, etc).
# 2967 : 82079 : ConnectTrace connectTrace; // Destructed before cs_main is unlocked
# 2968 : :
# 2969 [ + + ]: 82079 : if (pindexMostWork == nullptr) {
# 2970 : 79704 : pindexMostWork = FindMostWorkChain();
# 2971 : 79704 : }
# 2972 : :
# 2973 : : // Whether we have anything to do at all.
# 2974 [ - + ][ + + ]: 82079 : if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
# 2975 : 19067 : break;
# 2976 : 19067 : }
# 2977 : :
# 2978 : 63012 : bool fInvalidFound = false;
# 2979 : 63012 : std::shared_ptr<const CBlock> nullBlockPtr;
# 2980 [ + + ][ + + ]: 63012 : if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
# [ + + ]
# 2981 : : // A system error occurred
# 2982 : 1 : return false;
# 2983 : 1 : }
# 2984 : 63011 : blocks_connected = true;
# 2985 : :
# 2986 [ + + ]: 63011 : if (fInvalidFound) {
# 2987 : : // Wipe cache, we may need another branch now.
# 2988 : 3993 : pindexMostWork = nullptr;
# 2989 : 3993 : }
# 2990 : 63011 : pindexNewTip = m_chain.Tip();
# 2991 : :
# 2992 [ + + ]: 63011 : for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
# 2993 : 61924 : assert(trace.pblock && trace.pindex);
# 2994 : 0 : GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
# 2995 : 61924 : }
# 2996 [ - + ][ + + ]: 63011 : } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
# [ + + ][ + + ]
# 2997 [ + + ]: 82072 : if (!blocks_connected) return true;
# 2998 : :
# 2999 : 63005 : const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
# 3000 : 63005 : bool fInitialDownload = IsInitialBlockDownload();
# 3001 : :
# 3002 : : // Notify external listeners about the new tip.
# 3003 : : // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
# 3004 [ + + ]: 63005 : if (pindexFork != pindexNewTip) {
# 3005 : : // Notify ValidationInterface subscribers
# 3006 : 59012 : GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
# 3007 : :
# 3008 : : // Always notify the UI if a new block tip was connected
# 3009 : 59012 : uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
# 3010 : 59012 : }
# 3011 : 63005 : }
# 3012 : : // When we reach this point, we switched to a new tip (stored in pindexNewTip).
# 3013 : :
# 3014 [ + + ][ + - ]: 63005 : if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
# [ + + ]
# 3015 : :
# 3016 : : // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
# 3017 : : // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
# 3018 : : // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
# 3019 : : // that the best block hash is non-null.
# 3020 [ + + ]: 63005 : if (ShutdownRequested()) break;
# 3021 [ + + ]: 63005 : } while (pindexNewTip != pindexMostWork);
# 3022 : 56643 : CheckBlockIndex();
# 3023 : :
# 3024 : : // Write changes periodically to disk, after relay.
# 3025 [ - + ]: 56643 : if (!FlushStateToDisk(state, FlushStateMode::PERIODIC)) {
# 3026 : 0 : return false;
# 3027 : 0 : }
# 3028 : :
# 3029 : 56643 : return true;
# 3030 : 56643 : }
# 3031 : :
# 3032 : : bool CChainState::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex)
# 3033 : 9 : {
# 3034 : 9 : AssertLockNotHeld(m_chainstate_mutex);
# 3035 : 9 : AssertLockNotHeld(::cs_main);
# 3036 : 9 : {
# 3037 : 9 : LOCK(cs_main);
# 3038 [ + + ]: 9 : if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
# 3039 : : // Nothing to do, this block is not at the tip.
# 3040 : 1 : return true;
# 3041 : 1 : }
# 3042 [ + + ]: 8 : if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
# 3043 : : // The chain has been extended since the last call, reset the counter.
# 3044 : 4 : nBlockReverseSequenceId = -1;
# 3045 : 4 : }
# 3046 : 8 : nLastPreciousChainwork = m_chain.Tip()->nChainWork;
# 3047 : 8 : setBlockIndexCandidates.erase(pindex);
# 3048 : 8 : pindex->nSequenceId = nBlockReverseSequenceId;
# 3049 [ + - ]: 8 : if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
# 3050 : : // We can't keep reducing the counter if somebody really wants to
# 3051 : : // call preciousblock 2**31-1 times on the same set of tips...
# 3052 : 8 : nBlockReverseSequenceId--;
# 3053 : 8 : }
# 3054 [ + - ][ + - ]: 8 : if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
# 3055 : 8 : setBlockIndexCandidates.insert(pindex);
# 3056 : 8 : PruneBlockIndexCandidates();
# 3057 : 8 : }
# 3058 : 8 : }
# 3059 : :
# 3060 : 0 : return ActivateBestChain(state, std::shared_ptr<const CBlock>());
# 3061 : 9 : }
# 3062 : :
# 3063 : : bool CChainState::InvalidateBlock(BlockValidationState& state, CBlockIndex* pindex)
# 3064 : 84 : {
# 3065 : 84 : AssertLockNotHeld(m_chainstate_mutex);
# 3066 : 84 : AssertLockNotHeld(::cs_main);
# 3067 : :
# 3068 : : // Genesis block can't be invalidated
# 3069 : 84 : assert(pindex);
# 3070 [ - + ]: 84 : if (pindex->nHeight == 0) return false;
# 3071 : :
# 3072 : 84 : CBlockIndex* to_mark_failed = pindex;
# 3073 : 84 : bool pindex_was_in_chain = false;
# 3074 : 84 : int disconnected = 0;
# 3075 : :
# 3076 : : // We do not allow ActivateBestChain() to run while InvalidateBlock() is
# 3077 : : // running, as that could cause the tip to change while we disconnect
# 3078 : : // blocks.
# 3079 : 84 : LOCK(m_chainstate_mutex);
# 3080 : :
# 3081 : : // We'll be acquiring and releasing cs_main below, to allow the validation
# 3082 : : // callbacks to run. However, we should keep the block index in a
# 3083 : : // consistent state as we disconnect blocks -- in particular we need to
# 3084 : : // add equal-work blocks to setBlockIndexCandidates as we disconnect.
# 3085 : : // To avoid walking the block index repeatedly in search of candidates,
# 3086 : : // build a map once so that we can look up candidate blocks by chain
# 3087 : : // work as we go.
# 3088 : 84 : std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
# 3089 : :
# 3090 : 84 : {
# 3091 : 84 : LOCK(cs_main);
# 3092 [ + + ]: 17331 : for (auto& entry : m_blockman.m_block_index) {
# 3093 : 17331 : CBlockIndex* candidate = &entry.second;
# 3094 : : // We don't need to put anything in our active chain into the
# 3095 : : // multimap, because those candidates will be found and considered
# 3096 : : // as we disconnect.
# 3097 : : // Instead, consider only non-active-chain blocks that have at
# 3098 : : // least as much work as where we expect the new tip to end up.
# 3099 [ + + ][ + + ]: 17331 : if (!m_chain.Contains(candidate) &&
# 3100 [ + + ]: 17331 : !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
# 3101 [ + + ]: 17331 : candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
# 3102 [ + - ]: 17331 : candidate->HaveTxsDownloaded()) {
# 3103 : 6 : candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
# 3104 : 6 : }
# 3105 : 17331 : }
# 3106 : 84 : }
# 3107 : :
# 3108 : : // Disconnect (descendants of) pindex, and mark them invalid.
# 3109 : 1036 : while (true) {
# 3110 [ - + ]: 1036 : if (ShutdownRequested()) break;
# 3111 : :
# 3112 : : // Make sure the queue of validation callbacks doesn't grow unboundedly.
# 3113 : 1036 : LimitValidationInterfaceQueue();
# 3114 : :
# 3115 : 1036 : LOCK(cs_main);
# 3116 : : // Lock for as long as disconnectpool is in scope to make sure MaybeUpdateMempoolForReorg is
# 3117 : : // called after DisconnectTip without unlocking in between
# 3118 : 1036 : LOCK(MempoolMutex());
# 3119 [ + + ]: 1036 : if (!m_chain.Contains(pindex)) break;
# 3120 : 952 : pindex_was_in_chain = true;
# 3121 : 952 : CBlockIndex *invalid_walk_tip = m_chain.Tip();
# 3122 : :
# 3123 : : // ActivateBestChain considers blocks already in m_chain
# 3124 : : // unconditionally valid already, so force disconnect away from it.
# 3125 : 952 : DisconnectedBlockTransactions disconnectpool;
# 3126 : 952 : bool ret = DisconnectTip(state, &disconnectpool);
# 3127 : : // DisconnectTip will add transactions to disconnectpool.
# 3128 : : // Adjust the mempool to be consistent with the new tip, adding
# 3129 : : // transactions back to the mempool if disconnecting was successful,
# 3130 : : // and we're not doing a very deep invalidation (in which case
# 3131 : : // keeping the mempool up to date is probably futile anyway).
# 3132 [ + + ][ + - ]: 952 : MaybeUpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
# 3133 [ - + ]: 952 : if (!ret) return false;
# 3134 : 952 : assert(invalid_walk_tip->pprev == m_chain.Tip());
# 3135 : :
# 3136 : : // We immediately mark the disconnected blocks as invalid.
# 3137 : : // This prevents a case where pruned nodes may fail to invalidateblock
# 3138 : : // and be left unable to start as they have no tip candidates (as there
# 3139 : : // are no blocks that meet the "have data and are not invalid per
# 3140 : : // nStatus" criteria for inclusion in setBlockIndexCandidates).
# 3141 : 0 : invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
# 3142 : 952 : m_blockman.m_dirty_blockindex.insert(invalid_walk_tip);
# 3143 : 952 : setBlockIndexCandidates.erase(invalid_walk_tip);
# 3144 : 952 : setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
# 3145 [ + + ][ - + ]: 952 : if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
# 3146 : : // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
# 3147 : : // need to be BLOCK_FAILED_CHILD instead.
# 3148 : 0 : to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
# 3149 : 0 : m_blockman.m_dirty_blockindex.insert(to_mark_failed);
# 3150 : 0 : }
# 3151 : :
# 3152 : : // Add any equal or more work headers to setBlockIndexCandidates
# 3153 : 952 : auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
# 3154 [ + + ]: 958 : while (candidate_it != candidate_blocks_by_work.end()) {
# 3155 [ + - ]: 6 : if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
# 3156 : 6 : setBlockIndexCandidates.insert(candidate_it->second);
# 3157 : 6 : candidate_it = candidate_blocks_by_work.erase(candidate_it);
# 3158 : 6 : } else {
# 3159 : 0 : ++candidate_it;
# 3160 : 0 : }
# 3161 : 6 : }
# 3162 : :
# 3163 : : // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
# 3164 : : // iterations, or, if it's the last one, call InvalidChainFound on it.
# 3165 : 952 : to_mark_failed = invalid_walk_tip;
# 3166 : 952 : }
# 3167 : :
# 3168 : 84 : CheckBlockIndex();
# 3169 : :
# 3170 : 84 : {
# 3171 : 84 : LOCK(cs_main);
# 3172 [ - + ]: 84 : if (m_chain.Contains(to_mark_failed)) {
# 3173 : : // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
# 3174 : 0 : return false;
# 3175 : 0 : }
# 3176 : :
# 3177 : : // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
# 3178 : 84 : to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
# 3179 : 84 : m_blockman.m_dirty_blockindex.insert(to_mark_failed);
# 3180 : 84 : setBlockIndexCandidates.erase(to_mark_failed);
# 3181 : 84 : m_chainman.m_failed_blocks.insert(to_mark_failed);
# 3182 : :
# 3183 : : // If any new blocks somehow arrived while we were disconnecting
# 3184 : : // (above), then the pre-calculation of what should go into
# 3185 : : // setBlockIndexCandidates may have missed entries. This would
# 3186 : : // technically be an inconsistency in the block index, but if we clean
# 3187 : : // it up here, this should be an essentially unobservable error.
# 3188 : : // Loop back over all block index entries and add any missing entries
# 3189 : : // to setBlockIndexCandidates.
# 3190 [ + + ]: 17331 : for (auto& [_, block_index] : m_blockman.m_block_index) {
# 3191 [ + + ][ + + ]: 17331 : if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(&block_index, m_chain.Tip())) {
# [ + + ][ + + ]
# 3192 : 90 : setBlockIndexCandidates.insert(&block_index);
# 3193 : 90 : }
# 3194 : 17331 : }
# 3195 : :
# 3196 : 84 : InvalidChainFound(to_mark_failed);
# 3197 : 84 : }
# 3198 : :
# 3199 : : // Only notify about a new block tip if the active chain was modified.
# 3200 [ + + ]: 84 : if (pindex_was_in_chain) {
# 3201 : 83 : uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
# 3202 : 83 : }
# 3203 : 84 : return true;
# 3204 : 84 : }
# 3205 : :
# 3206 : 11 : void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
# 3207 : 11 : AssertLockHeld(cs_main);
# 3208 : :
# 3209 : 11 : int nHeight = pindex->nHeight;
# 3210 : :
# 3211 : : // Remove the invalidity flag from this block and all its descendants.
# 3212 [ + + ]: 1528 : for (auto& [_, block_index] : m_blockman.m_block_index) {
# 3213 [ + + ][ + + ]: 1528 : if (!block_index.IsValid() && block_index.GetAncestor(nHeight) == pindex) {
# 3214 : 233 : block_index.nStatus &= ~BLOCK_FAILED_MASK;
# 3215 : 233 : m_blockman.m_dirty_blockindex.insert(&block_index);
# 3216 [ + - ][ + - ]: 233 : if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), &block_index)) {
# [ + - ][ + - ]
# 3217 : 233 : setBlockIndexCandidates.insert(&block_index);
# 3218 : 233 : }
# 3219 [ + + ]: 233 : if (&block_index == m_chainman.m_best_invalid) {
# 3220 : : // Reset invalid block marker if it was pointing to one of those.
# 3221 : 10 : m_chainman.m_best_invalid = nullptr;
# 3222 : 10 : }
# 3223 : 233 : m_chainman.m_failed_blocks.erase(&block_index);
# 3224 : 233 : }
# 3225 : 1528 : }
# 3226 : :
# 3227 : : // Remove the invalidity flag from all ancestors too.
# 3228 [ + + ]: 1302 : while (pindex != nullptr) {
# 3229 [ + + ]: 1291 : if (pindex->nStatus & BLOCK_FAILED_MASK) {
# 3230 : 1 : pindex->nStatus &= ~BLOCK_FAILED_MASK;
# 3231 : 1 : m_blockman.m_dirty_blockindex.insert(pindex);
# 3232 : 1 : m_chainman.m_failed_blocks.erase(pindex);
# 3233 : 1 : }
# 3234 : 1291 : pindex = pindex->pprev;
# 3235 : 1291 : }
# 3236 : 11 : }
# 3237 : :
# 3238 : : /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
# 3239 : : void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
# 3240 : 64594 : {
# 3241 : 64594 : AssertLockHeld(cs_main);
# 3242 : 64594 : pindexNew->nTx = block.vtx.size();
# 3243 : 64594 : pindexNew->nChainTx = 0;
# 3244 : 64594 : pindexNew->nFile = pos.nFile;
# 3245 : 64594 : pindexNew->nDataPos = pos.nPos;
# 3246 : 64594 : pindexNew->nUndoPos = 0;
# 3247 : 64594 : pindexNew->nStatus |= BLOCK_HAVE_DATA;
# 3248 [ + + ]: 64594 : if (DeploymentActiveAt(*pindexNew, m_params.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
# 3249 : 62803 : pindexNew->nStatus |= BLOCK_OPT_WITNESS;
# 3250 : 62803 : }
# 3251 : 64594 : pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
# 3252 : 64594 : m_blockman.m_dirty_blockindex.insert(pindexNew);
# 3253 : :
# 3254 [ + + ][ + + ]: 64594 : if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
# 3255 : : // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
# 3256 : 63829 : std::deque<CBlockIndex*> queue;
# 3257 : 63829 : queue.push_back(pindexNew);
# 3258 : :
# 3259 : : // Recursively process any descendant blocks that now may be eligible to be connected.
# 3260 [ + + ]: 128419 : while (!queue.empty()) {
# 3261 : 64590 : CBlockIndex *pindex = queue.front();
# 3262 : 64590 : queue.pop_front();
# 3263 [ + + ]: 64590 : pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
# 3264 : 64590 : pindex->nSequenceId = nBlockSequenceId++;
# 3265 [ + + ][ + + ]: 64590 : if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
# [ + + ]
# 3266 : 62772 : setBlockIndexCandidates.insert(pindex);
# 3267 : 62772 : }
# 3268 : 64590 : std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
# 3269 [ + + ]: 65351 : while (range.first != range.second) {
# 3270 : 761 : std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
# 3271 : 761 : queue.push_back(it->second);
# 3272 : 761 : range.first++;
# 3273 : 761 : m_blockman.m_blocks_unlinked.erase(it);
# 3274 : 761 : }
# 3275 : 64590 : }
# 3276 : 63829 : } else {
# 3277 [ + - ][ + - ]: 765 : if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
# 3278 : 765 : m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
# 3279 : 765 : }
# 3280 : 765 : }
# 3281 : 64594 : }
# 3282 : :
# 3283 : : static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
# 3284 : 198856 : {
# 3285 : : // Check proof of work matches claimed amount
# 3286 [ + + ][ + + ]: 198856 : if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
# 3287 : 1 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
# 3288 : :
# 3289 : 198855 : return true;
# 3290 : 198856 : }
# 3291 : :
# 3292 : : bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
# 3293 : 279574 : {
# 3294 : : // These are checks that are independent of context.
# 3295 : :
# 3296 [ + + ]: 279574 : if (block.fChecked)
# 3297 : 147804 : return true;
# 3298 : :
# 3299 : : // Check that the header is valid (particularly PoW). This is mostly
# 3300 : : // redundant with the call in AcceptBlockHeader.
# 3301 [ + + ]: 131770 : if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
# 3302 : 1 : return false;
# 3303 : :
# 3304 : : // Signet only: check block solution
# 3305 [ + + ][ + + ]: 131769 : if (consensusParams.signet_blocks && fCheckPOW && !CheckSignetBlockSolution(block, consensusParams)) {
# [ + + ]
# 3306 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure");
# 3307 : 1 : }
# 3308 : :
# 3309 : : // Check the merkle root.
# 3310 [ + + ]: 131768 : if (fCheckMerkleRoot) {
# 3311 : 75278 : bool mutated;
# 3312 : 75278 : uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
# 3313 [ + + ]: 75278 : if (block.hashMerkleRoot != hashMerkleRoot2)
# 3314 : 13 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
# 3315 : :
# 3316 : : // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
# 3317 : : // of transactions in a block without affecting the merkle root of a block,
# 3318 : : // while still invalidating it.
# 3319 [ + + ]: 75265 : if (mutated)
# 3320 : 148 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
# 3321 : 75265 : }
# 3322 : :
# 3323 : : // All potential-corruption validation must be done before we do any
# 3324 : : // transaction validation, as otherwise we may mark the header as invalid
# 3325 : : // because we receive the wrong transactions for it.
# 3326 : : // Note that witness malleability is checked in ContextualCheckBlock, so no
# 3327 : : // checks that use witness data may be performed here.
# 3328 : :
# 3329 : : // Size limits
# 3330 [ + + ][ - + ]: 131607 : if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
# [ + + ]
# 3331 : 2 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
# 3332 : :
# 3333 : : // First transaction must be coinbase, the rest must not be
# 3334 [ - + ][ + + ]: 131605 : if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
# 3335 : 2 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
# 3336 [ + + ]: 208393 : for (unsigned int i = 1; i < block.vtx.size(); i++)
# 3337 [ + + ]: 76793 : if (block.vtx[i]->IsCoinBase())
# 3338 : 3 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
# 3339 : :
# 3340 : : // Check transactions
# 3341 : : // Must check for duplicate inputs (see CVE-2018-17144)
# 3342 [ + + ]: 208388 : for (const auto& tx : block.vtx) {
# 3343 : 208388 : TxValidationState tx_state;
# 3344 [ + + ]: 208388 : if (!CheckTransaction(*tx, tx_state)) {
# 3345 : : // CheckBlock() does context-free validation checks. The only
# 3346 : : // possible failures are consensus failures.
# 3347 : 322 : assert(tx_state.GetResult() == TxValidationResult::TX_CONSENSUS);
# 3348 : 0 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(),
# 3349 : 322 : strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
# 3350 : 322 : }
# 3351 : 208388 : }
# 3352 : 131278 : unsigned int nSigOps = 0;
# 3353 [ + + ]: 131278 : for (const auto& tx : block.vtx)
# 3354 : 207619 : {
# 3355 : 207619 : nSigOps += GetLegacySigOpCount(*tx);
# 3356 : 207619 : }
# 3357 [ + + ]: 131278 : if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
# 3358 : 9 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
# 3359 : :
# 3360 [ + + ][ + - ]: 131269 : if (fCheckPOW && fCheckMerkleRoot)
# 3361 : 74780 : block.fChecked = true;
# 3362 : :
# 3363 : 131269 : return true;
# 3364 : 131278 : }
# 3365 : :
# 3366 : : void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
# 3367 : 38999 : {
# 3368 : 38999 : int commitpos = GetWitnessCommitmentIndex(block);
# 3369 : 38999 : static const std::vector<unsigned char> nonce(32, 0x00);
# 3370 [ + + ][ + + ]: 38999 : if (commitpos != NO_WITNESS_COMMITMENT && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT) && !block.vtx[0]->HasWitness()) {
# [ + + ]
# 3371 : 28436 : CMutableTransaction tx(*block.vtx[0]);
# 3372 : 28436 : tx.vin[0].scriptWitness.stack.resize(1);
# 3373 : 28436 : tx.vin[0].scriptWitness.stack[0] = nonce;
# 3374 : 28436 : block.vtx[0] = MakeTransactionRef(std::move(tx));
# 3375 : 28436 : }
# 3376 : 38999 : }
# 3377 : :
# 3378 : : std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
# 3379 : 34877 : {
# 3380 : 34877 : std::vector<unsigned char> commitment;
# 3381 : 34877 : int commitpos = GetWitnessCommitmentIndex(block);
# 3382 : 34877 : std::vector<unsigned char> ret(32, 0x00);
# 3383 [ + - ]: 34877 : if (commitpos == NO_WITNESS_COMMITMENT) {
# 3384 : 34877 : uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
# 3385 : 34877 : CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
# 3386 : 34877 : CTxOut out;
# 3387 : 34877 : out.nValue = 0;
# 3388 : 34877 : out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT);
# 3389 : 34877 : out.scriptPubKey[0] = OP_RETURN;
# 3390 : 34877 : out.scriptPubKey[1] = 0x24;
# 3391 : 34877 : out.scriptPubKey[2] = 0xaa;
# 3392 : 34877 : out.scriptPubKey[3] = 0x21;
# 3393 : 34877 : out.scriptPubKey[4] = 0xa9;
# 3394 : 34877 : out.scriptPubKey[5] = 0xed;
# 3395 : 34877 : memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
# 3396 : 34877 : commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
# 3397 : 34877 : CMutableTransaction tx(*block.vtx[0]);
# 3398 : 34877 : tx.vout.push_back(out);
# 3399 : 34877 : block.vtx[0] = MakeTransactionRef(std::move(tx));
# 3400 : 34877 : }
# 3401 : 34877 : UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
# 3402 : 34877 : return commitment;
# 3403 : 34877 : }
# 3404 : :
# 3405 : : /** Context-dependent validity checks.
# 3406 : : * By "context", we mean only the previous block headers, but not the UTXO
# 3407 : : * set; UTXO-related validity checks are done in ConnectBlock().
# 3408 : : * NOTE: This function is not currently invoked by ConnectBlock(), so we
# 3409 : : * should consider upgrade issues if we change which consensus rules are
# 3410 : : * enforced in this function (eg by adding a new consensus rule). See comment
# 3411 : : * in ConnectBlock().
# 3412 : : * Note that -reindex-chainstate skips the validation that happens here!
# 3413 : : */
# 3414 : : static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
# 3415 : 95332 : {
# 3416 : 95332 : AssertLockHeld(::cs_main);
# 3417 : 95332 : assert(pindexPrev != nullptr);
# 3418 : 0 : const int nHeight = pindexPrev->nHeight + 1;
# 3419 : :
# 3420 : : // Check proof of work
# 3421 : 95332 : const Consensus::Params& consensusParams = params.GetConsensus();
# 3422 [ + + ]: 95332 : if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
# 3423 : 2 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
# 3424 : :
# 3425 : : // Check against checkpoints
# 3426 [ + + ]: 95330 : if (fCheckpointsEnabled) {
# 3427 : : // Don't accept any forks from the main chain prior to last checkpoint.
# 3428 : : // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
# 3429 : : // BlockIndex().
# 3430 : 95070 : const CBlockIndex* pcheckpoint = blockman.GetLastCheckpoint(params.Checkpoints());
# 3431 [ + + ][ + + ]: 95070 : if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
# 3432 : 1 : LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
# 3433 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
# 3434 : 1 : }
# 3435 : 95070 : }
# 3436 : :
# 3437 : : // Check timestamp against prev
# 3438 [ + + ]: 95329 : if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
# 3439 : 6 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
# 3440 : :
# 3441 : : // Check timestamp
# 3442 [ + + ]: 95323 : if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
# 3443 : 5 : return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
# 3444 : :
# 3445 : : // Reject blocks with outdated version
# 3446 [ + + ][ + + ]: 95318 : if ((block.nVersion < 2 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB)) ||
# 3447 [ + + ][ + + ]: 95318 : (block.nVersion < 3 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_DERSIG)) ||
# 3448 [ + + ][ + + ]: 95318 : (block.nVersion < 4 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CLTV))) {
# 3449 : 3 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
# 3450 : 3 : strprintf("rejected nVersion=0x%08x block", block.nVersion));
# 3451 : 3 : }
# 3452 : :
# 3453 : 95315 : return true;
# 3454 : 95318 : }
# 3455 : :
# 3456 : : /** NOTE: This function is not currently invoked by ConnectBlock(), so we
# 3457 : : * should consider upgrade issues if we change which consensus rules are
# 3458 : : * enforced in this function (eg by adding a new consensus rule). See comment
# 3459 : : * in ConnectBlock().
# 3460 : : * Note that -reindex-chainstate skips the validation that happens here!
# 3461 : : */
# 3462 : : static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
# 3463 : 92400 : {
# 3464 [ + + ]: 92400 : const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
# 3465 : :
# 3466 : : // Enforce BIP113 (Median Time Past).
# 3467 : 92400 : int nLockTimeFlags = 0;
# 3468 [ + + ]: 92400 : if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV)) {
# 3469 : 90881 : assert(pindexPrev != nullptr);
# 3470 : 0 : nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
# 3471 : 90881 : }
# 3472 : :
# 3473 [ + + ]: 92400 : int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
# 3474 : 92400 : ? pindexPrev->GetMedianTimePast()
# 3475 : 92400 : : block.GetBlockTime();
# 3476 : :
# 3477 : : // Check that all transactions are finalized
# 3478 [ + + ]: 151683 : for (const auto& tx : block.vtx) {
# 3479 [ + + ]: 151683 : if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
# 3480 : 7 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
# 3481 : 7 : }
# 3482 : 151683 : }
# 3483 : :
# 3484 : : // Enforce rule that the coinbase starts with serialized block height
# 3485 [ + + ]: 92393 : if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB))
# 3486 : 92129 : {
# 3487 : 92129 : CScript expect = CScript() << nHeight;
# 3488 [ + + ]: 92129 : if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
# 3489 [ - + ]: 92129 : !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
# 3490 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
# 3491 : 1 : }
# 3492 : 92129 : }
# 3493 : :
# 3494 : : // Validation for witness commitments.
# 3495 : : // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
# 3496 : : // coinbase (where 0x0000....0000 is used instead).
# 3497 : : // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
# 3498 : : // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
# 3499 : : // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
# 3500 : : // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
# 3501 : : // multiple, the last one is used.
# 3502 : 92392 : bool fHaveWitness = false;
# 3503 [ + + ]: 92392 : if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT)) {
# 3504 : 90028 : int commitpos = GetWitnessCommitmentIndex(block);
# 3505 [ + + ]: 90028 : if (commitpos != NO_WITNESS_COMMITMENT) {
# 3506 : 83588 : bool malleated = false;
# 3507 : 83588 : uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
# 3508 : : // The malleation check is ignored; as the transaction tree itself
# 3509 : : // already does not permit it, it is impossible to trigger in the
# 3510 : : // witness tree.
# 3511 [ + + ][ - + ]: 83588 : if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
# 3512 : 6 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
# 3513 : 6 : }
# 3514 : 83582 : CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
# 3515 [ + + ]: 83582 : if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
# 3516 : 3 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
# 3517 : 3 : }
# 3518 : 83579 : fHaveWitness = true;
# 3519 : 83579 : }
# 3520 : 90028 : }
# 3521 : :
# 3522 : : // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
# 3523 [ + + ]: 92383 : if (!fHaveWitness) {
# 3524 [ + + ]: 31751 : for (const auto& tx : block.vtx) {
# 3525 [ + + ]: 31751 : if (tx->HasWitness()) {
# 3526 : 5 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
# 3527 : 5 : }
# 3528 : 31751 : }
# 3529 : 8804 : }
# 3530 : :
# 3531 : : // After the coinbase witness reserved value and commitment are verified,
# 3532 : : // we can check if the block weight passes (before we've checked the
# 3533 : : // coinbase witness, it would be possible for the weight to be too
# 3534 : : // large by filling up the coinbase witness, which doesn't change
# 3535 : : // the block hash, so we couldn't mark the block as permanently
# 3536 : : // failed).
# 3537 [ + + ]: 92378 : if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
# 3538 : 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
# 3539 : 1 : }
# 3540 : :
# 3541 : 92377 : return true;
# 3542 : 92378 : }
# 3543 : :
# 3544 : : bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
# 3545 : 122561 : {
# 3546 : 122561 : AssertLockHeld(cs_main);
# 3547 : : // Check for duplicate
# 3548 : 122561 : uint256 hash = block.GetHash();
# 3549 : 122561 : BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)};
# 3550 [ + + ]: 122561 : if (hash != chainparams.GetConsensus().hashGenesisBlock) {
# 3551 [ + + ]: 122549 : if (miSelf != m_blockman.m_block_index.end()) {
# 3552 : : // Block header is already known.
# 3553 : 55463 : CBlockIndex* pindex = &(miSelf->second);
# 3554 [ + - ]: 55463 : if (ppindex)
# 3555 : 55463 : *ppindex = pindex;
# 3556 [ + + ]: 55463 : if (pindex->nStatus & BLOCK_FAILED_MASK) {
# 3557 [ + - ]: 240 : LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n", __func__, hash.ToString());
# 3558 : 240 : return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
# 3559 : 240 : }
# 3560 : 55223 : return true;
# 3561 : 55463 : }
# 3562 : :
# 3563 [ - + ]: 67086 : if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) {
# 3564 [ # # ]: 0 : LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
# 3565 : 0 : return false;
# 3566 : 0 : }
# 3567 : :
# 3568 : : // Get prev block index
# 3569 : 67086 : CBlockIndex* pindexPrev = nullptr;
# 3570 : 67086 : BlockMap::iterator mi{m_blockman.m_block_index.find(block.hashPrevBlock)};
# 3571 [ + + ]: 67086 : if (mi == m_blockman.m_block_index.end()) {
# 3572 [ + - ]: 3 : LogPrint(BCLog::VALIDATION, "%s: %s prev block not found\n", __func__, hash.ToString());
# 3573 : 3 : return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
# 3574 : 3 : }
# 3575 : 67083 : pindexPrev = &((*mi).second);
# 3576 [ + + ]: 67083 : if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
# 3577 [ + - ]: 6 : LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
# 3578 : 6 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
# 3579 : 6 : }
# 3580 [ + + ]: 67077 : if (!ContextualCheckBlockHeader(block, state, m_blockman, chainparams, pindexPrev, GetAdjustedTime())) {
# 3581 [ + - ]: 14 : LogPrint(BCLog::VALIDATION, "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
# 3582 : 14 : return false;
# 3583 : 14 : }
# 3584 : :
# 3585 : : /* Determine if this block descends from any block which has been found
# 3586 : : * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
# 3587 : : * them as failed. For example:
# 3588 : : *
# 3589 : : * D3
# 3590 : : * /
# 3591 : : * B2 - C2
# 3592 : : * / \
# 3593 : : * A D2 - E2 - F2
# 3594 : : * \
# 3595 : : * B1 - C1 - D1 - E1
# 3596 : : *
# 3597 : : * In the case that we attempted to reorg from E1 to F2, only to find
# 3598 : : * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
# 3599 : : * but NOT D3 (it was not in any of our candidate sets at the time).
# 3600 : : *
# 3601 : : * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
# 3602 : : * in LoadBlockIndex.
# 3603 : : */
# 3604 [ + + ]: 67063 : if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
# 3605 : : // The above does not mean "invalid": it checks if the previous block
# 3606 : : // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
# 3607 : : // optimization, in the common case of adding a new block to the tip,
# 3608 : : // we don't need to iterate over the failed blocks list.
# 3609 [ + + ]: 104190 : for (const CBlockIndex* failedit : m_failed_blocks) {
# 3610 [ + + ]: 104190 : if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
# 3611 : 1 : assert(failedit->nStatus & BLOCK_FAILED_VALID);
# 3612 : 0 : CBlockIndex* invalid_walk = pindexPrev;
# 3613 [ + + ]: 2 : while (invalid_walk != failedit) {
# 3614 : 1 : invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
# 3615 : 1 : m_blockman.m_dirty_blockindex.insert(invalid_walk);
# 3616 : 1 : invalid_walk = invalid_walk->pprev;
# 3617 : 1 : }
# 3618 [ + - ]: 1 : LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
# 3619 : 1 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
# 3620 : 1 : }
# 3621 : 104190 : }
# 3622 : 20353 : }
# 3623 : 67063 : }
# 3624 : 67074 : CBlockIndex* pindex{m_blockman.AddToBlockIndex(block)};
# 3625 : :
# 3626 [ + - ]: 67074 : if (ppindex)
# 3627 : 67074 : *ppindex = pindex;
# 3628 : :
# 3629 : 67074 : return true;
# 3630 : 122561 : }
# 3631 : :
# 3632 : : // Exposed wrapper for AcceptBlockHeader
# 3633 : : bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
# 3634 : 25330 : {
# 3635 : 25330 : AssertLockNotHeld(cs_main);
# 3636 : 25330 : {
# 3637 : 25330 : LOCK(cs_main);
# 3638 [ + + ]: 46177 : for (const CBlockHeader& header : headers) {
# 3639 : 46177 : CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
# 3640 : 46177 : bool accepted{AcceptBlockHeader(header, state, chainparams, &pindex)};
# 3641 : 46177 : ActiveChainstate().CheckBlockIndex();
# 3642 : :
# 3643 [ + + ]: 46177 : if (!accepted) {
# 3644 : 27 : return false;
# 3645 : 27 : }
# 3646 [ + + ]: 46150 : if (ppindex) {
# 3647 : 45248 : *ppindex = pindex;
# 3648 : 45248 : }
# 3649 : 46150 : }
# 3650 : 25330 : }
# 3651 [ + + ]: 25303 : if (NotifyHeaderTip(ActiveChainstate())) {
# 3652 [ + + ][ + + ]: 19114 : if (ActiveChainstate().IsInitialBlockDownload() && ppindex && *ppindex) {
# [ + - ]
# 3653 : 452 : const CBlockIndex& last_accepted{**ppindex};
# 3654 : 452 : const int64_t blocks_left{(GetTime() - last_accepted.GetBlockTime()) / chainparams.GetConsensus().nPowTargetSpacing};
# 3655 : 452 : const double progress{100.0 * last_accepted.nHeight / (last_accepted.nHeight + blocks_left)};
# 3656 : 452 : LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress);
# 3657 : 452 : }
# 3658 : 19114 : }
# 3659 : 25303 : return true;
# 3660 : 25330 : }
# 3661 : :
# 3662 : : /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
# 3663 : : bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
# 3664 : 76384 : {
# 3665 : 76384 : const CBlock& block = *pblock;
# 3666 : :
# 3667 [ + + ]: 76384 : if (fNewBlock) *fNewBlock = false;
# 3668 : 76384 : AssertLockHeld(cs_main);
# 3669 : :
# 3670 : 76384 : CBlockIndex *pindexDummy = nullptr;
# 3671 [ + + ]: 76384 : CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
# 3672 : :
# 3673 : 76384 : bool accepted_header{m_chainman.AcceptBlockHeader(block, state, m_params, &pindex)};
# 3674 : 76384 : CheckBlockIndex();
# 3675 : :
# 3676 [ + + ]: 76384 : if (!accepted_header)
# 3677 : 237 : return false;
# 3678 : :
# 3679 : : // Try to process all requested blocks that we don't have, but only
# 3680 : : // process an unrequested block if it's new and has enough work to
# 3681 : : // advance our tip, and isn't too many blocks ahead.
# 3682 : 76147 : bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
# 3683 [ + + ]: 76147 : bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
# 3684 : : // Blocks that are too out-of-order needlessly limit the effectiveness of
# 3685 : : // pruning, because pruning will not delete block files that contain any
# 3686 : : // blocks which are too close in height to the tip. Apply this test
# 3687 : : // regardless of whether pruning is enabled; it should generally be safe to
# 3688 : : // not process unrequested blocks.
# 3689 : 76147 : bool fTooFarAhead{pindex->nHeight > m_chain.Height() + int(MIN_BLOCKS_TO_KEEP)};
# 3690 : :
# 3691 : : // TODO: Decouple this function from the block download logic by removing fRequested
# 3692 : : // This requires some new chain data structure to efficiently look up if a
# 3693 : : // block is in a chain leading to a candidate for best tip, despite not
# 3694 : : // being such a candidate itself.
# 3695 : : // Note that this would break the getblockfrompeer RPC
# 3696 : :
# 3697 : : // TODO: deal better with return value and error conditions for duplicate
# 3698 : : // and unrequested blocks.
# 3699 [ + + ]: 76147 : if (fAlreadyHave) return true;
# 3700 [ + + ]: 65249 : if (!fRequested) { // If we didn't ask for it:
# 3701 [ - + ]: 1704 : if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
# 3702 [ + + ]: 1704 : if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
# 3703 [ + + ]: 612 : if (fTooFarAhead) return true; // Block height is too high
# 3704 : :
# 3705 : : // Protect against DoS attacks from low-work chains.
# 3706 : : // If our tip is behind, a peer could try to send us
# 3707 : : // low-work blocks on a fake chain that we would never
# 3708 : : // request; don't process these.
# 3709 [ + + ]: 611 : if (pindex->nChainWork < nMinimumChainWork) return true;
# 3710 : 611 : }
# 3711 : :
# 3712 [ - + ]: 64155 : if (!CheckBlock(block, state, m_params.GetConsensus()) ||
# 3713 [ + + ]: 64155 : !ContextualCheckBlock(block, state, m_params.GetConsensus(), pindex->pprev)) {
# 3714 [ + - ][ + + ]: 22 : if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
# 3715 : 8 : pindex->nStatus |= BLOCK_FAILED_VALID;
# 3716 : 8 : m_blockman.m_dirty_blockindex.insert(pindex);
# 3717 : 8 : }
# 3718 : 22 : return error("%s: %s", __func__, state.ToString());
# 3719 : 22 : }
# 3720 : :
# 3721 : : // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
# 3722 : : // (but if it does not build on our best tip, let the SendMessages loop relay it)
# 3723 [ + + ][ + + ]: 64133 : if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
# 3724 : 56292 : GetMainSignals().NewPoWValidBlock(pindex, pblock);
# 3725 : :
# 3726 : : // Write block to history file
# 3727 [ + + ]: 64133 : if (fNewBlock) *fNewBlock = true;
# 3728 : 64133 : try {
# 3729 : 64133 : FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, pindex->nHeight, m_chain, m_params, dbp)};
# 3730 [ - + ]: 64133 : if (blockPos.IsNull()) {
# 3731 : 0 : state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
# 3732 : 0 : return false;
# 3733 : 0 : }
# 3734 : 64133 : ReceivedBlockTransactions(block, pindex, blockPos);
# 3735 : 64133 : } catch (const std::runtime_error& e) {
# 3736 : 0 : return AbortNode(state, std::string("System error: ") + e.what());
# 3737 : 0 : }
# 3738 : :
# 3739 : 64133 : FlushStateToDisk(state, FlushStateMode::NONE);
# 3740 : :
# 3741 : 64133 : CheckBlockIndex();
# 3742 : :
# 3743 : 64133 : return true;
# 3744 : 64133 : }
# 3745 : :
# 3746 : : bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock>& block, bool force_processing, bool* new_block)
# 3747 : 75430 : {
# 3748 : 75430 : AssertLockNotHeld(cs_main);
# 3749 : :
# 3750 : 75430 : {
# 3751 : 75430 : CBlockIndex *pindex = nullptr;
# 3752 [ + + ]: 75430 : if (new_block) *new_block = false;
# 3753 : 75430 : BlockValidationState state;
# 3754 : :
# 3755 : : // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
# 3756 : : // Therefore, the following critical section must include the CheckBlock() call as well.
# 3757 : 75430 : LOCK(cs_main);
# 3758 : :
# 3759 : : // Skipping AcceptBlock() for CheckBlock() failures means that we will never mark a block as invalid if
# 3760 : : // CheckBlock() fails. This is protective against consensus failure if there are any unknown forms of block
# 3761 : : // malleability that cause CheckBlock() to fail; see e.g. CVE-2012-2459 and
# 3762 : : // https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html. Because CheckBlock() is
# 3763 : : // not very expensive, the anti-DoS benefits of caching failure (of a definitely-invalid block) are not substantial.
# 3764 : 75430 : bool ret = CheckBlock(*block, state, chainparams.GetConsensus());
# 3765 [ + + ]: 75430 : if (ret) {
# 3766 : : // Store to disk
# 3767 : 74941 : ret = ActiveChainstate().AcceptBlock(block, state, &pindex, force_processing, nullptr, new_block);
# 3768 : 74941 : }
# 3769 [ + + ]: 75430 : if (!ret) {
# 3770 : 744 : GetMainSignals().BlockChecked(*block, state);
# 3771 : 744 : return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
# 3772 : 744 : }
# 3773 : 75430 : }
# 3774 : :
# 3775 : 74686 : NotifyHeaderTip(ActiveChainstate());
# 3776 : :
# 3777 : 74686 : BlockValidationState state; // Only used to report errors, not invalidity - ignore it
# 3778 [ + + ]: 74686 : if (!ActiveChainstate().ActivateBestChain(state, block)) {
# 3779 : 1 : return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
# 3780 : 1 : }
# 3781 : :
# 3782 : 74685 : return true;
# 3783 : 74686 : }
# 3784 : :
# 3785 : : MempoolAcceptResult ChainstateManager::ProcessTransaction(const CTransactionRef& tx, bool test_accept)
# 3786 : 33402 : {
# 3787 : 33402 : AssertLockHeld(cs_main);
# 3788 : 33402 : CChainState& active_chainstate = ActiveChainstate();
# 3789 [ - + ]: 33402 : if (!active_chainstate.GetMempool()) {
# 3790 : 0 : TxValidationState state;
# 3791 : 0 : state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool");
# 3792 : 0 : return MempoolAcceptResult::Failure(state);
# 3793 : 0 : }
# 3794 : 33402 : auto result = AcceptToMemoryPool(active_chainstate, tx, GetTime(), /*bypass_limits=*/ false, test_accept);
# 3795 : 33402 : active_chainstate.GetMempool()->check(active_chainstate.CoinsTip(), active_chainstate.m_chain.Height() + 1);
# 3796 : 33402 : return result;
# 3797 : 33402 : }
# 3798 : :
# 3799 : : bool TestBlockValidity(BlockValidationState& state,
# 3800 : : const CChainParams& chainparams,
# 3801 : : CChainState& chainstate,
# 3802 : : const CBlock& block,
# 3803 : : CBlockIndex* pindexPrev,
# 3804 : : bool fCheckPOW,
# 3805 : : bool fCheckMerkleRoot)
# 3806 : 28255 : {
# 3807 : 28255 : AssertLockHeld(cs_main);
# 3808 : 28255 : assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
# 3809 : 0 : CCoinsViewCache viewNew(&chainstate.CoinsTip());
# 3810 : 28255 : uint256 block_hash(block.GetHash());
# 3811 : 28255 : CBlockIndex indexDummy(block);
# 3812 : 28255 : indexDummy.pprev = pindexPrev;
# 3813 : 28255 : indexDummy.nHeight = pindexPrev->nHeight + 1;
# 3814 : 28255 : indexDummy.phashBlock = &block_hash;
# 3815 : :
# 3816 : : // NOTE: CheckBlockHeader is called by CheckBlock
# 3817 [ + + ]: 28255 : if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainparams, pindexPrev, GetAdjustedTime()))
# 3818 : 3 : return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
# 3819 [ + + ]: 28252 : if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
# 3820 : 7 : return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
# 3821 [ + + ]: 28245 : if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
# 3822 : 1 : return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
# 3823 [ + + ]: 28244 : if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, true)) {
# 3824 : 8 : return false;
# 3825 : 8 : }
# 3826 : 28236 : assert(state.IsValid());
# 3827 : :
# 3828 : 0 : return true;
# 3829 : 28244 : }
# 3830 : :
# 3831 : : /* This function is called from the RPC code for pruneblockchain */
# 3832 : : void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
# 3833 : 3 : {
# 3834 : 3 : BlockValidationState state;
# 3835 [ - + ]: 3 : if (!active_chainstate.FlushStateToDisk(
# 3836 : 3 : state, FlushStateMode::NONE, nManualPruneHeight)) {
# 3837 : 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
# 3838 : 0 : }
# 3839 : 3 : }
# 3840 : :
# 3841 : : void CChainState::LoadMempool(const ArgsManager& args)
# 3842 : 725 : {
# 3843 [ - + ]: 725 : if (!m_mempool) return;
# 3844 [ + + ]: 725 : if (args.GetBoolArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
# 3845 : 718 : ::LoadMempool(*m_mempool, *this);
# 3846 : 718 : }
# 3847 : 725 : m_mempool->SetIsLoaded(!ShutdownRequested());
# 3848 : 725 : }
# 3849 : :
# 3850 : : bool CChainState::LoadChainTip()
# 3851 : 467 : {
# 3852 : 467 : AssertLockHeld(cs_main);
# 3853 : 467 : const CCoinsViewCache& coins_cache = CoinsTip();
# 3854 : 467 : assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
# 3855 : 0 : const CBlockIndex* tip = m_chain.Tip();
# 3856 : :
# 3857 [ + + ][ + + ]: 467 : if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
# [ + - ]
# 3858 : 2 : return true;
# 3859 : 2 : }
# 3860 : :
# 3861 : : // Load pointer to end of best chain
# 3862 : 465 : CBlockIndex* pindex = m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
# 3863 [ - + ]: 465 : if (!pindex) {
# 3864 : 0 : return false;
# 3865 : 0 : }
# 3866 : 465 : m_chain.SetTip(pindex);
# 3867 : 465 : PruneBlockIndexCandidates();
# 3868 : :
# 3869 : 465 : tip = m_chain.Tip();
# 3870 : 465 : LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
# 3871 : 465 : tip->GetBlockHash().ToString(),
# 3872 : 465 : m_chain.Height(),
# 3873 : 465 : FormatISO8601DateTime(tip->GetBlockTime()),
# 3874 : 465 : GuessVerificationProgress(m_params.TxData(), tip));
# 3875 : 465 : return true;
# 3876 : 465 : }
# 3877 : :
# 3878 : : CVerifyDB::CVerifyDB()
# 3879 : 464 : {
# 3880 : 464 : uiInterface.ShowProgress(_("Verifying blocks…").translated, 0, false);
# 3881 : 464 : }
# 3882 : :
# 3883 : : CVerifyDB::~CVerifyDB()
# 3884 : 464 : {
# 3885 : 464 : uiInterface.ShowProgress("", 100, false);
# 3886 : 464 : }
# 3887 : :
# 3888 : : bool CVerifyDB::VerifyDB(
# 3889 : : CChainState& chainstate,
# 3890 : : const Consensus::Params& consensus_params,
# 3891 : : CCoinsView& coinsview,
# 3892 : : int nCheckLevel, int nCheckDepth)
# 3893 : 464 : {
# 3894 : 464 : AssertLockHeld(cs_main);
# 3895 : :
# 3896 [ - + ][ + + ]: 464 : if (chainstate.m_chain.Tip() == nullptr || chainstate.m_chain.Tip()->pprev == nullptr) {
# 3897 : 68 : return true;
# 3898 : 68 : }
# 3899 : :
# 3900 : : // Verify blocks in the best chain
# 3901 [ + + ][ + + ]: 396 : if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height()) {
# 3902 : 21 : nCheckDepth = chainstate.m_chain.Height();
# 3903 : 21 : }
# 3904 : 396 : nCheckLevel = std::max(0, std::min(4, nCheckLevel));
# 3905 : 396 : LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
# 3906 : 396 : CCoinsViewCache coins(&coinsview);
# 3907 : 396 : CBlockIndex* pindex;
# 3908 : 396 : CBlockIndex* pindexFailure = nullptr;
# 3909 : 396 : int nGoodTransactions = 0;
# 3910 : 396 : BlockValidationState state;
# 3911 : 396 : int reportDone = 0;
# 3912 : 396 : LogPrintf("[0%%]..."); /* Continued */
# 3913 : :
# 3914 : 396 : const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
# 3915 : :
# 3916 [ + - ][ + + ]: 3142 : for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
# 3917 [ + + ]: 3121 : const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
# 3918 [ + + ]: 3121 : if (reportDone < percentageDone / 10) {
# 3919 : : // report every 10% step
# 3920 : 2253 : LogPrintf("[%d%%]...", percentageDone); /* Continued */
# 3921 : 2253 : reportDone = percentageDone / 10;
# 3922 : 2253 : }
# 3923 : 3121 : uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
# 3924 [ + + ]: 3121 : if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) {
# 3925 : 372 : break;
# 3926 : 372 : }
# 3927 [ + + ][ + - ]: 2749 : if ((fPruneMode || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
# [ - + ]
# 3928 : : // If pruning or running under an assumeutxo snapshot, only go
# 3929 : : // back as far as we have data.
# 3930 : 0 : LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
# 3931 : 0 : break;
# 3932 : 0 : }
# 3933 : 2749 : CBlock block;
# 3934 : : // check level 0: read from disk
# 3935 [ - + ]: 2749 : if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
# 3936 : 0 : return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 3937 : 0 : }
# 3938 : : // check level 1: verify block validity
# 3939 [ + - ][ - + ]: 2749 : if (nCheckLevel >= 1 && !CheckBlock(block, state, consensus_params)) {
# 3940 : 0 : return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
# 3941 : 0 : pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
# 3942 : 0 : }
# 3943 : : // check level 2: verify undo validity
# 3944 [ + - ][ + - ]: 2749 : if (nCheckLevel >= 2 && pindex) {
# 3945 : 2749 : CBlockUndo undo;
# 3946 [ + - ]: 2749 : if (!pindex->GetUndoPos().IsNull()) {
# 3947 [ + + ]: 2749 : if (!UndoReadFromDisk(undo, pindex)) {
# 3948 : 1 : return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
# 3949 : 1 : }
# 3950 : 2749 : }
# 3951 : 2749 : }
# 3952 : : // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
# 3953 : 2748 : size_t curr_coins_usage = coins.DynamicMemoryUsage() + chainstate.CoinsTip().DynamicMemoryUsage();
# 3954 : :
# 3955 [ + - ][ + - ]: 2748 : if (nCheckLevel >= 3 && curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
# 3956 : 2748 : assert(coins.GetBestBlock() == pindex->GetBlockHash());
# 3957 : 0 : DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
# 3958 [ - + ]: 2748 : if (res == DISCONNECT_FAILED) {
# 3959 : 0 : return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 3960 : 0 : }
# 3961 [ - + ]: 2748 : if (res == DISCONNECT_UNCLEAN) {
# 3962 : 0 : nGoodTransactions = 0;
# 3963 : 0 : pindexFailure = pindex;
# 3964 : 2748 : } else {
# 3965 : 2748 : nGoodTransactions += block.vtx.size();
# 3966 : 2748 : }
# 3967 : 2748 : }
# 3968 [ + + ]: 2748 : if (ShutdownRequested()) return true;
# 3969 : 2748 : }
# 3970 [ - + ]: 393 : if (pindexFailure) {
# 3971 : 0 : return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
# 3972 : 0 : }
# 3973 : :
# 3974 : : // store block count as we move pindex at check level >= 4
# 3975 : 393 : int block_count = chainstate.m_chain.Height() - pindex->nHeight;
# 3976 : :
# 3977 : : // check level 4: try reconnecting blocks
# 3978 [ + + ]: 393 : if (nCheckLevel >= 4) {
# 3979 [ + + ]: 288 : while (pindex != chainstate.m_chain.Tip()) {
# 3980 : 287 : const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
# 3981 [ + + ]: 287 : if (reportDone < percentageDone / 10) {
# 3982 : : // report every 10% step
# 3983 : 5 : LogPrintf("[%d%%]...", percentageDone); /* Continued */
# 3984 : 5 : reportDone = percentageDone / 10;
# 3985 : 5 : }
# 3986 : 287 : uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
# 3987 : 287 : pindex = chainstate.m_chain.Next(pindex);
# 3988 : 287 : CBlock block;
# 3989 [ - + ]: 287 : if (!ReadBlockFromDisk(block, pindex, consensus_params))
# 3990 : 0 : return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 3991 [ - + ]: 287 : if (!chainstate.ConnectBlock(block, state, pindex, coins)) {
# 3992 : 0 : return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
# 3993 : 0 : }
# 3994 [ - + ]: 287 : if (ShutdownRequested()) return true;
# 3995 : 287 : }
# 3996 : 1 : }
# 3997 : :
# 3998 : 393 : LogPrintf("[DONE].\n");
# 3999 : 393 : LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
# 4000 : :
# 4001 : 393 : return true;
# 4002 : 393 : }
# 4003 : :
# 4004 : : /** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
# 4005 : : bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs)
# 4006 : 0 : {
# 4007 : 0 : AssertLockHeld(cs_main);
# 4008 : : // TODO: merge with ConnectBlock
# 4009 : 0 : CBlock block;
# 4010 [ # # ]: 0 : if (!ReadBlockFromDisk(block, pindex, m_params.GetConsensus())) {
# 4011 : 0 : return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
# 4012 : 0 : }
# 4013 : :
# 4014 [ # # ]: 0 : for (const CTransactionRef& tx : block.vtx) {
# 4015 [ # # ]: 0 : if (!tx->IsCoinBase()) {
# 4016 [ # # ]: 0 : for (const CTxIn &txin : tx->vin) {
# 4017 : 0 : inputs.SpendCoin(txin.prevout);
# 4018 : 0 : }
# 4019 : 0 : }
# 4020 : : // Pass check = true as every addition may be an overwrite.
# 4021 : 0 : AddCoins(inputs, *tx, pindex->nHeight, true);
# 4022 : 0 : }
# 4023 : 0 : return true;
# 4024 : 0 : }
# 4025 : :
# 4026 : : bool CChainState::ReplayBlocks()
# 4027 : 934 : {
# 4028 : 934 : LOCK(cs_main);
# 4029 : :
# 4030 : 934 : CCoinsView& db = this->CoinsDB();
# 4031 : 934 : CCoinsViewCache cache(&db);
# 4032 : :
# 4033 : 934 : std::vector<uint256> hashHeads = db.GetHeadBlocks();
# 4034 [ + - ]: 934 : if (hashHeads.empty()) return true; // We're already in a consistent state.
# 4035 [ # # ]: 0 : if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
# 4036 : :
# 4037 : 0 : uiInterface.ShowProgress(_("Replaying blocks…").translated, 0, false);
# 4038 : 0 : LogPrintf("Replaying blocks\n");
# 4039 : :
# 4040 : 0 : const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
# 4041 : 0 : const CBlockIndex* pindexNew; // New tip during the interrupted flush.
# 4042 : 0 : const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
# 4043 : :
# 4044 [ # # ]: 0 : if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
# 4045 : 0 : return error("ReplayBlocks(): reorganization to unknown block requested");
# 4046 : 0 : }
# 4047 : 0 : pindexNew = &(m_blockman.m_block_index[hashHeads[0]]);
# 4048 : :
# 4049 [ # # ]: 0 : if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
# 4050 [ # # ]: 0 : if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
# 4051 : 0 : return error("ReplayBlocks(): reorganization from unknown block requested");
# 4052 : 0 : }
# 4053 : 0 : pindexOld = &(m_blockman.m_block_index[hashHeads[1]]);
# 4054 : 0 : pindexFork = LastCommonAncestor(pindexOld, pindexNew);
# 4055 : 0 : assert(pindexFork != nullptr);
# 4056 : 0 : }
# 4057 : :
# 4058 : : // Rollback along the old branch.
# 4059 [ # # ]: 0 : while (pindexOld != pindexFork) {
# 4060 [ # # ]: 0 : if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
# 4061 : 0 : CBlock block;
# 4062 [ # # ]: 0 : if (!ReadBlockFromDisk(block, pindexOld, m_params.GetConsensus())) {
# 4063 : 0 : return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
# 4064 : 0 : }
# 4065 : 0 : LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
# 4066 : 0 : DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
# 4067 [ # # ]: 0 : if (res == DISCONNECT_FAILED) {
# 4068 : 0 : return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
# 4069 : 0 : }
# 4070 : : // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
# 4071 : : // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
# 4072 : : // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
# 4073 : : // the result is still a version of the UTXO set with the effects of that block undone.
# 4074 : 0 : }
# 4075 : 0 : pindexOld = pindexOld->pprev;
# 4076 : 0 : }
# 4077 : :
# 4078 : : // Roll forward from the forking point to the new tip.
# 4079 [ # # ]: 0 : int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
# 4080 [ # # ]: 0 : for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
# 4081 : 0 : const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
# 4082 : 0 : LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
# 4083 : 0 : uiInterface.ShowProgress(_("Replaying blocks…").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
# 4084 [ # # ]: 0 : if (!RollforwardBlock(pindex, cache)) return false;
# 4085 : 0 : }
# 4086 : :
# 4087 : 0 : cache.SetBestBlock(pindexNew->GetBlockHash());
# 4088 : 0 : cache.Flush();
# 4089 : 0 : uiInterface.ShowProgress("", 100, false);
# 4090 : 0 : return true;
# 4091 : 0 : }
# 4092 : :
# 4093 : : bool CChainState::NeedsRedownload() const
# 4094 : 924 : {
# 4095 : 924 : AssertLockHeld(cs_main);
# 4096 : :
# 4097 : : // At and above m_params.SegwitHeight, segwit consensus rules must be validated
# 4098 : 924 : CBlockIndex* block{m_chain.Tip()};
# 4099 : :
# 4100 [ + + ][ + + ]: 78058 : while (block != nullptr && DeploymentActiveAt(*block, m_params.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
# 4101 [ + + ]: 77135 : if (!(block->nStatus & BLOCK_OPT_WITNESS)) {
# 4102 : : // block is insufficiently validated for a segwit client
# 4103 : 1 : return true;
# 4104 : 1 : }
# 4105 : 77134 : block = block->pprev;
# 4106 : 77134 : }
# 4107 : :
# 4108 : 923 : return false;
# 4109 : 924 : }
# 4110 : :
# 4111 : : void CChainState::UnloadBlockIndex()
# 4112 : 2099 : {
# 4113 : 2099 : AssertLockHeld(::cs_main);
# 4114 : 2099 : nBlockSequenceId = 1;
# 4115 : 2099 : setBlockIndexCandidates.clear();
# 4116 : 2099 : }
# 4117 : :
# 4118 : : // May NOT be used after any connections are up as much
# 4119 : : // of the peer-processing logic assumes a consistent
# 4120 : : // block index state
# 4121 : : void UnloadBlockIndex(CTxMemPool* mempool, ChainstateManager& chainman)
# 4122 : 2106 : {
# 4123 : 2106 : AssertLockHeld(::cs_main);
# 4124 : 2106 : chainman.Unload();
# 4125 : 2106 : pindexBestHeader = nullptr;
# 4126 [ + + ]: 2106 : if (mempool) mempool->clear();
# 4127 : 2106 : g_versionbitscache.Clear();
# 4128 [ + + ]: 63180 : for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
# 4129 : 61074 : warningcache[b].clear();
# 4130 : 61074 : }
# 4131 : 2106 : fHavePruned = false;
# 4132 : 2106 : }
# 4133 : :
# 4134 : : bool ChainstateManager::LoadBlockIndex()
# 4135 : 939 : {
# 4136 : 939 : AssertLockHeld(cs_main);
# 4137 : : // Load block index from databases
# 4138 : 939 : bool needs_init = fReindex;
# 4139 [ + + ]: 939 : if (!fReindex) {
# 4140 : 929 : bool ret = m_blockman.LoadBlockIndexDB();
# 4141 [ + + ]: 929 : if (!ret) return false;
# 4142 : :
# 4143 : 928 : std::vector<CBlockIndex*> vSortedByHeight{m_blockman.GetAllBlockIndices()};
# 4144 : 928 : std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
# 4145 : 928 : CBlockIndexHeightOnlyComparator());
# 4146 : :
# 4147 : : // Find start of assumed-valid region.
# 4148 : 928 : int first_assumed_valid_height = std::numeric_limits<int>::max();
# 4149 : :
# 4150 [ + + ]: 78593 : for (const CBlockIndex* block : vSortedByHeight) {
# 4151 [ + + ]: 78593 : if (block->IsAssumedValid()) {
# 4152 : 1 : auto chainstates = GetAll();
# 4153 : :
# 4154 : : // If we encounter an assumed-valid block index entry, ensure that we have
# 4155 : : // one chainstate that tolerates assumed-valid entries and another that does
# 4156 : : // not (i.e. the background validation chainstate), since assumed-valid
# 4157 : : // entries should always be pending validation by a fully-validated chainstate.
# 4158 : 2 : auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
# 4159 : 1 : assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
# 4160 : 0 : assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
# 4161 : :
# 4162 : 0 : first_assumed_valid_height = block->nHeight;
# 4163 : 1 : break;
# 4164 : 1 : }
# 4165 : 78593 : }
# 4166 : :
# 4167 [ + + ]: 78512 : for (CBlockIndex* pindex : vSortedByHeight) {
# 4168 [ + + ]: 78512 : if (ShutdownRequested()) return false;
# 4169 [ + + ]: 78511 : if (pindex->IsAssumedValid() ||
# 4170 [ + + ]: 78511 : (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
# 4171 [ + - ][ # # ]: 78491 : (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
# 4172 : :
# 4173 : : // Fill each chainstate's block candidate set. Only add assumed-valid
# 4174 : : // blocks to the tip candidate set if the chainstate is allowed to rely on
# 4175 : : // assumed-valid blocks.
# 4176 : : //
# 4177 : : // If all setBlockIndexCandidates contained the assumed-valid blocks, the
# 4178 : : // background chainstate's ActivateBestChain() call would add assumed-valid
# 4179 : : // blocks to the chain (based on how FindMostWorkChain() works). Obviously
# 4180 : : // we don't want this since the purpose of the background validation chain
# 4181 : : // is to validate assued-valid blocks.
# 4182 : : //
# 4183 : : // Note: This is considering all blocks whose height is greater or equal to
# 4184 : : // the first assumed-valid block to be assumed-valid blocks, and excluding
# 4185 : : // them from the background chainstate's setBlockIndexCandidates set. This
# 4186 : : // does mean that some blocks which are not technically assumed-valid
# 4187 : : // (later blocks on a fork beginning before the first assumed-valid block)
# 4188 : : // might not get added to the background chainstate, but this is ok,
# 4189 : : // because they will still be attached to the active chainstate if they
# 4190 : : // actually contain more work.
# 4191 : : //
# 4192 : : // Instead of this height-based approach, an earlier attempt was made at
# 4193 : : // detecting "holistically" whether the block index under consideration
# 4194 : : // relied on an assumed-valid ancestor, but this proved to be too slow to
# 4195 : : // be practical.
# 4196 [ + + ]: 78053 : for (CChainState* chainstate : GetAll()) {
# 4197 [ + + ]: 78053 : if (chainstate->reliesOnAssumedValid() ||
# 4198 [ + + ]: 78053 : pindex->nHeight < first_assumed_valid_height) {
# 4199 : 77972 : chainstate->setBlockIndexCandidates.insert(pindex);
# 4200 : 77972 : }
# 4201 : 78053 : }
# 4202 : 77952 : }
# 4203 [ + + ][ + - ]: 78511 : if (pindex->nStatus & BLOCK_FAILED_MASK && (!m_best_invalid || pindex->nChainWork > m_best_invalid->nChainWork)) {
# [ # # ]
# 4204 : 11 : m_best_invalid = pindex;
# 4205 : 11 : }
# 4206 : 78511 : }
# 4207 : :
# 4208 : 927 : needs_init = m_blockman.m_block_index.empty();
# 4209 : 927 : }
# 4210 : :
# 4211 [ + + ]: 937 : if (needs_init) {
# 4212 : : // Everything here is for *new* reindex/DBs. Thus, though
# 4213 : : // LoadBlockIndexDB may have set fReindex if we shut down
# 4214 : : // mid-reindex previously, we don't check fReindex and
# 4215 : : // instead only check it prior to LoadBlockIndexDB to set
# 4216 : : // needs_init.
# 4217 : :
# 4218 : 467 : LogPrintf("Initializing databases...\n");
# 4219 : 467 : }
# 4220 : 937 : return true;
# 4221 : 939 : }
# 4222 : :
# 4223 : : bool CChainState::LoadGenesisBlock()
# 4224 : 939 : {
# 4225 : 939 : LOCK(cs_main);
# 4226 : :
# 4227 : : // Check whether we're already initialized by checking for genesis in
# 4228 : : // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
# 4229 : : // set based on the coins db, not the block index db, which is the only
# 4230 : : // thing loaded at this point.
# 4231 [ + + ]: 939 : if (m_blockman.m_block_index.count(m_params.GenesisBlock().GetHash()))
# 4232 : 478 : return true;
# 4233 : :
# 4234 : 461 : try {
# 4235 : 461 : const CBlock& block = m_params.GenesisBlock();
# 4236 : 461 : FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, m_chain, m_params, nullptr)};
# 4237 [ - + ]: 461 : if (blockPos.IsNull()) {
# 4238 : 0 : return error("%s: writing genesis block to disk failed", __func__);
# 4239 : 0 : }
# 4240 : 461 : CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
# 4241 : 461 : ReceivedBlockTransactions(block, pindex, blockPos);
# 4242 : 461 : } catch (const std::runtime_error& e) {
# 4243 : 0 : return error("%s: failed to write genesis block: %s", __func__, e.what());
# 4244 : 0 : }
# 4245 : :
# 4246 : 461 : return true;
# 4247 : 461 : }
# 4248 : :
# 4249 : : void CChainState::LoadExternalBlockFile(FILE* fileIn, FlatFilePos* dbp)
# 4250 : 10 : {
# 4251 : 10 : AssertLockNotHeld(m_chainstate_mutex);
# 4252 : : // Map of disk positions for blocks with unknown parent (only used for reindex)
# 4253 : 10 : static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
# 4254 : 10 : int64_t nStart = GetTimeMillis();
# 4255 : :
# 4256 : 10 : int nLoaded = 0;
# 4257 : 10 : try {
# 4258 : : // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
# 4259 : 10 : CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE+8, SER_DISK, CLIENT_VERSION);
# 4260 : 10 : uint64_t nRewind = blkdat.GetPos();
# 4261 [ + + ]: 1451 : while (!blkdat.eof()) {
# 4262 [ - + ]: 1450 : if (ShutdownRequested()) return;
# 4263 : :
# 4264 : 1450 : blkdat.SetPos(nRewind);
# 4265 : 1450 : nRewind++; // start one byte further next time, in case of failure
# 4266 : 1450 : blkdat.SetLimit(); // remove former limit
# 4267 : 1450 : unsigned int nSize = 0;
# 4268 : 1450 : try {
# 4269 : : // locate a header
# 4270 : 1450 : unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
# 4271 : 1450 : blkdat.FindByte(m_params.MessageStart()[0]);
# 4272 : 1450 : nRewind = blkdat.GetPos() + 1;
# 4273 : 1450 : blkdat >> buf;
# 4274 [ - + ]: 1450 : if (memcmp(buf, m_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE)) {
# 4275 : 0 : continue;
# 4276 : 0 : }
# 4277 : : // read size
# 4278 : 1450 : blkdat >> nSize;
# 4279 [ + + ][ - + ]: 1450 : if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
# 4280 : 0 : continue;
# 4281 : 1450 : } catch (const std::exception&) {
# 4282 : : // no valid block header found; don't complain
# 4283 : 9 : break;
# 4284 : 9 : }
# 4285 : 1441 : try {
# 4286 : : // read block
# 4287 : 1441 : uint64_t nBlockPos = blkdat.GetPos();
# 4288 [ + + ]: 1441 : if (dbp)
# 4289 : 1340 : dbp->nPos = nBlockPos;
# 4290 : 1441 : blkdat.SetLimit(nBlockPos + nSize);
# 4291 : 1441 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
# 4292 : 1441 : CBlock& block = *pblock;
# 4293 : 1441 : blkdat >> block;
# 4294 : 1441 : nRewind = blkdat.GetPos();
# 4295 : :
# 4296 : 1441 : uint256 hash = block.GetHash();
# 4297 : 1441 : {
# 4298 : 1441 : LOCK(cs_main);
# 4299 : : // detect out of order blocks, and store them for later
# 4300 [ + + ][ + + ]: 1441 : if (hash != m_params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) {
# 4301 [ + - ]: 13 : LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
# 4302 : 13 : block.hashPrevBlock.ToString());
# 4303 [ + - ]: 13 : if (dbp)
# 4304 : 13 : mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
# 4305 : 13 : continue;
# 4306 : 13 : }
# 4307 : :
# 4308 : : // process in case the block isn't known yet
# 4309 : 1428 : const CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash);
# 4310 [ + + ][ - + ]: 1428 : if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
# 4311 : 1427 : BlockValidationState state;
# 4312 [ + + ]: 1427 : if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr)) {
# 4313 : 1423 : nLoaded++;
# 4314 : 1423 : }
# 4315 [ - + ]: 1427 : if (state.IsError()) {
# 4316 : 0 : break;
# 4317 : 0 : }
# 4318 [ - + ][ # # ]: 1427 : } else if (hash != m_params.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
# 4319 [ # # ]: 0 : LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
# 4320 : 0 : }
# 4321 : 1428 : }
# 4322 : :
# 4323 : : // Activate the genesis block so normal node progress can continue
# 4324 [ + + ]: 1428 : if (hash == m_params.GetConsensus().hashGenesisBlock) {
# 4325 : 10 : BlockValidationState state;
# 4326 [ - + ]: 10 : if (!ActivateBestChain(state, nullptr)) {
# 4327 : 0 : break;
# 4328 : 0 : }
# 4329 : 10 : }
# 4330 : :
# 4331 : 1428 : NotifyHeaderTip(*this);
# 4332 : :
# 4333 : : // Recursively process earlier encountered successors of this block
# 4334 : 1428 : std::deque<uint256> queue;
# 4335 : 1428 : queue.push_back(hash);
# 4336 [ + + ]: 2869 : while (!queue.empty()) {
# 4337 : 1441 : uint256 head = queue.front();
# 4338 : 1441 : queue.pop_front();
# 4339 : 1441 : std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
# 4340 [ + + ]: 1454 : while (range.first != range.second) {
# 4341 : 13 : std::multimap<uint256, FlatFilePos>::iterator it = range.first;
# 4342 : 13 : std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
# 4343 [ + - ]: 13 : if (ReadBlockFromDisk(*pblockrecursive, it->second, m_params.GetConsensus())) {
# 4344 [ + - ]: 13 : LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
# 4345 : 13 : head.ToString());
# 4346 : 13 : LOCK(cs_main);
# 4347 : 13 : BlockValidationState dummy;
# 4348 [ + - ]: 13 : if (AcceptBlock(pblockrecursive, dummy, nullptr, true, &it->second, nullptr)) {
# 4349 : 13 : nLoaded++;
# 4350 : 13 : queue.push_back(pblockrecursive->GetHash());
# 4351 : 13 : }
# 4352 : 13 : }
# 4353 : 13 : range.first++;
# 4354 : 13 : mapBlocksUnknownParent.erase(it);
# 4355 : 13 : NotifyHeaderTip(*this);
# 4356 : 13 : }
# 4357 : 1441 : }
# 4358 : 1428 : } catch (const std::exception& e) {
# 4359 : 0 : LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
# 4360 : 0 : }
# 4361 : 1441 : }
# 4362 : 10 : } catch (const std::runtime_error& e) {
# 4363 : 0 : AbortNode(std::string("System error: ") + e.what());
# 4364 : 0 : }
# 4365 : 10 : LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
# 4366 : 10 : }
# 4367 : :
# 4368 : : void CChainState::CheckBlockIndex()
# 4369 : 243421 : {
# 4370 [ + + ]: 243421 : if (!fCheckBlockIndex) {
# 4371 : 604 : return;
# 4372 : 604 : }
# 4373 : :
# 4374 : 242817 : LOCK(cs_main);
# 4375 : :
# 4376 : : // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
# 4377 : : // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
# 4378 : : // tests when iterating the block tree require that m_chain has been initialized.)
# 4379 [ + + ]: 242817 : if (m_chain.Height() < 0) {
# 4380 : 18 : assert(m_blockman.m_block_index.size() <= 1);
# 4381 : 0 : return;
# 4382 : 18 : }
# 4383 : :
# 4384 : : // Build forward-pointing map of the entire block tree.
# 4385 : 242799 : std::multimap<CBlockIndex*,CBlockIndex*> forward;
# 4386 [ + + ]: 120288730 : for (auto& [_, block_index] : m_blockman.m_block_index) {
# 4387 : 120288730 : forward.emplace(block_index.pprev, &block_index);
# 4388 : 120288730 : }
# 4389 : :
# 4390 : 242799 : assert(forward.size() == m_blockman.m_block_index.size());
# 4391 : :
# 4392 : 0 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
# 4393 : 242799 : CBlockIndex *pindex = rangeGenesis.first->second;
# 4394 : 242799 : rangeGenesis.first++;
# 4395 : 242799 : assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
# 4396 : :
# 4397 : : // Iterate over the entire block tree, using depth-first search.
# 4398 : : // Along the way, remember whether there are blocks on the path from genesis
# 4399 : : // block being explored which are the first to have certain properties.
# 4400 : 0 : size_t nNodes = 0;
# 4401 : 242799 : int nHeight = 0;
# 4402 : 242799 : CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
# 4403 : 242799 : CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
# 4404 : 242799 : CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
# 4405 : 242799 : CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
# 4406 : 242799 : CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
# 4407 : 242799 : CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
# 4408 : 242799 : CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
# 4409 [ + + ]: 120531529 : while (pindex != nullptr) {
# 4410 : 120288730 : nNodes++;
# 4411 [ + + ][ + + ]: 120288730 : if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
# 4412 : : // Assumed-valid index entries will not have data since we haven't downloaded the
# 4413 : : // full block yet.
# 4414 [ + + ][ + + ]: 120288730 : if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA) && !pindex->IsAssumedValid()) {
# [ + - ]
# 4415 : 432939 : pindexFirstMissing = pindex;
# 4416 : 432939 : }
# 4417 [ + + ][ + + ]: 120288730 : if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
# 4418 [ + + ][ + - ]: 120288730 : if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
# [ - + ]
# 4419 : :
# 4420 [ + + ][ + - ]: 120288730 : if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) {
# 4421 : : // Skip validity flag checks for BLOCK_ASSUMED_VALID index entries, since these
# 4422 : : // *_VALID_MASK flags will not be present for index entries we are temporarily assuming
# 4423 : : // valid.
# 4424 [ + + ]: 120045931 : if (pindexFirstNotTransactionsValid == nullptr &&
# 4425 [ + + ]: 120045931 : (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) {
# 4426 : 429339 : pindexFirstNotTransactionsValid = pindex;
# 4427 : 429339 : }
# 4428 : :
# 4429 [ + + ]: 120045931 : if (pindexFirstNotChainValid == nullptr &&
# 4430 [ + + ]: 120045931 : (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) {
# 4431 : 18530247 : pindexFirstNotChainValid = pindex;
# 4432 : 18530247 : }
# 4433 : :
# 4434 [ + + ]: 120045931 : if (pindexFirstNotScriptsValid == nullptr &&
# 4435 [ + + ]: 120045931 : (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) {
# 4436 : 18530247 : pindexFirstNotScriptsValid = pindex;
# 4437 : 18530247 : }
# 4438 : 120045931 : }
# 4439 : :
# 4440 : : // Begin: actual consistency checks.
# 4441 [ + + ]: 120288730 : if (pindex->pprev == nullptr) {
# 4442 : : // Genesis block checks.
# 4443 : 242799 : assert(pindex->GetBlockHash() == m_params.GetConsensus().hashGenesisBlock); // Genesis block's hash must match.
# 4444 : 0 : assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
# 4445 : 242799 : }
# 4446 [ + + ]: 120288730 : if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
# 4447 : : // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
# 4448 : : // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
# 4449 : : // Unless these indexes are assumed valid and pending block download on a
# 4450 : : // background chainstate.
# 4451 [ + + ][ + - ]: 120288730 : if (!fHavePruned && !pindex->IsAssumedValid()) {
# 4452 : : // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
# 4453 : 111640630 : assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
# 4454 : 0 : assert(pindexFirstMissing == pindexFirstNeverProcessed);
# 4455 : 111640630 : } else {
# 4456 : : // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
# 4457 [ + + ]: 8648100 : if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
# 4458 : 8648100 : }
# 4459 [ + + ]: 120288730 : if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
# 4460 [ - + ]: 120288730 : if (pindex->IsAssumedValid()) {
# 4461 : : // Assumed-valid blocks should have some nTx value.
# 4462 : 0 : assert(pindex->nTx > 0);
# 4463 : : // Assumed-valid blocks should connect to the main chain.
# 4464 : 0 : assert((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE);
# 4465 : 120288730 : } else {
# 4466 : : // Otherwise there should only be an nTx value if we have
# 4467 : : // actually seen a block's transactions.
# 4468 : 120288730 : assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
# 4469 : 120288730 : }
# 4470 : : // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
# 4471 : 0 : assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
# 4472 : 0 : assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
# 4473 : 0 : assert(pindex->nHeight == nHeight); // nHeight must be consistent.
# 4474 : 0 : assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
# 4475 : 0 : assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
# 4476 : 0 : assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
# 4477 [ + - ]: 120288730 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
# 4478 [ + + ]: 120288730 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
# 4479 [ + + ]: 120288730 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
# 4480 [ + + ]: 120288730 : if (pindexFirstInvalid == nullptr) {
# 4481 : : // Checks for not-invalid blocks.
# 4482 : 102129001 : assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
# 4483 : 102129001 : }
# 4484 [ + + ][ + + ]: 120288730 : if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
# [ + + ]
# 4485 [ + + ]: 703807 : if (pindexFirstInvalid == nullptr) {
# 4486 : 600282 : const bool is_active = this == &m_chainman.ActiveChainstate();
# 4487 : :
# 4488 : : // If this block sorts at least as good as the current tip and
# 4489 : : // is valid and we have all data for its parents, it must be in
# 4490 : : // setBlockIndexCandidates. m_chain.Tip() must also be there
# 4491 : : // even if some data has been pruned.
# 4492 : : //
# 4493 : : // Don't perform this check for the background chainstate since
# 4494 : : // its setBlockIndexCandidates shouldn't have some entries (i.e. those past the
# 4495 : : // snapshot block) which do exist in the block index for the active chainstate.
# 4496 [ + + ][ + + ]: 600282 : if (is_active && (pindexFirstMissing == nullptr || pindex == m_chain.Tip())) {
# [ + + ]
# 4497 : 598475 : assert(setBlockIndexCandidates.count(pindex));
# 4498 : 598475 : }
# 4499 : : // If some parent is missing, then it could be that this block was in
# 4500 : : // setBlockIndexCandidates but had to be removed because of the missing data.
# 4501 : : // In this case it must be in m_blocks_unlinked -- see test below.
# 4502 : 600282 : }
# 4503 : 119584923 : } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
# 4504 : 119584923 : assert(setBlockIndexCandidates.count(pindex) == 0);
# 4505 : 119584923 : }
# 4506 : : // Check whether this block is in m_blocks_unlinked.
# 4507 : 0 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
# 4508 : 120288730 : bool foundInUnlinked = false;
# 4509 [ + + ]: 120290352 : while (rangeUnlinked.first != rangeUnlinked.second) {
# 4510 : 137446 : assert(rangeUnlinked.first->first == pindex->pprev);
# 4511 [ + + ]: 137446 : if (rangeUnlinked.first->second == pindex) {
# 4512 : 135824 : foundInUnlinked = true;
# 4513 : 135824 : break;
# 4514 : 135824 : }
# 4515 : 1622 : rangeUnlinked.first++;
# 4516 : 1622 : }
# 4517 [ + + ][ + + ]: 120288730 : if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
# [ + + ][ + - ]
# 4518 : : // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
# 4519 : 135824 : assert(foundInUnlinked);
# 4520 : 135824 : }
# 4521 [ + + ]: 120288730 : if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
# 4522 [ + + ]: 120288730 : if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
# 4523 [ + + ][ + + ]: 120288730 : if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
# [ + + ][ + + ]
# 4524 : : // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
# 4525 : 5795700 : assert(fHavePruned); // We must have pruned.
# 4526 : : // This block may have entered m_blocks_unlinked if:
# 4527 : : // - it has a descendant that at some point had more work than the
# 4528 : : // tip, and
# 4529 : : // - we tried switching to that descendant but were missing
# 4530 : : // data for some intermediate block between m_chain and the
# 4531 : : // tip.
# 4532 : : // So if this block is itself better than m_chain.Tip() and it wasn't in
# 4533 : : // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
# 4534 [ + + ][ - + ]: 5795700 : if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
# [ - + ]
# 4535 [ # # ]: 0 : if (pindexFirstInvalid == nullptr) {
# 4536 : 0 : assert(foundInUnlinked);
# 4537 : 0 : }
# 4538 : 0 : }
# 4539 : 5795700 : }
# 4540 : : // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
# 4541 : : // End: actual consistency checks.
# 4542 : :
# 4543 : : // Try descending into the first subnode.
# 4544 : 0 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
# 4545 [ + + ]: 120288730 : if (range.first != range.second) {
# 4546 : : // A subnode was found.
# 4547 : 101595363 : pindex = range.first->second;
# 4548 : 101595363 : nHeight++;
# 4549 : 101595363 : continue;
# 4550 : 101595363 : }
# 4551 : : // This is a leaf node.
# 4552 : : // Move upwards until we reach a node of which we have not yet visited the last child.
# 4553 [ + + ]: 120531529 : while (pindex) {
# 4554 : : // We are going to either move to a parent or a sibling of pindex.
# 4555 : : // If pindex was the first with a certain property, unset the corresponding variable.
# 4556 [ + + ]: 120288730 : if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
# 4557 [ + + ]: 120288730 : if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
# 4558 [ + + ]: 120288730 : if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
# 4559 [ - + ]: 120288730 : if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
# 4560 [ + + ]: 120288730 : if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
# 4561 [ + + ]: 120288730 : if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
# 4562 [ + + ]: 120288730 : if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
# 4563 : : // Find our parent.
# 4564 : 120288730 : CBlockIndex* pindexPar = pindex->pprev;
# 4565 : : // Find which child we just visited.
# 4566 : 120288730 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
# 4567 [ + + ]: 146476618 : while (rangePar.first->second != pindex) {
# 4568 : 26187888 : assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
# 4569 : 0 : rangePar.first++;
# 4570 : 26187888 : }
# 4571 : : // Proceed to the next one.
# 4572 : 120288730 : rangePar.first++;
# 4573 [ + + ]: 120288730 : if (rangePar.first != rangePar.second) {
# 4574 : : // Move to the sibling.
# 4575 : 18450568 : pindex = rangePar.first->second;
# 4576 : 18450568 : break;
# 4577 : 101838162 : } else {
# 4578 : : // Move up further.
# 4579 : 101838162 : pindex = pindexPar;
# 4580 : 101838162 : nHeight--;
# 4581 : 101838162 : continue;
# 4582 : 101838162 : }
# 4583 : 120288730 : }
# 4584 : 18693367 : }
# 4585 : :
# 4586 : : // Check that we actually traversed the entire map.
# 4587 : 242799 : assert(nNodes == forward.size());
# 4588 : 242799 : }
# 4589 : :
# 4590 : : std::string CChainState::ToString()
# 4591 : 996 : {
# 4592 : 996 : AssertLockHeld(::cs_main);
# 4593 : 996 : CBlockIndex* tip = m_chain.Tip();
# 4594 : 996 : return strprintf("Chainstate [%s] @ height %d (%s)",
# 4595 [ + + ]: 996 : m_from_snapshot_blockhash ? "snapshot" : "ibd",
# 4596 [ + + ][ + + ]: 996 : tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
# 4597 : 996 : }
# 4598 : :
# 4599 : : bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
# 4600 : 23 : {
# 4601 : 23 : AssertLockHeld(::cs_main);
# 4602 [ - + ]: 23 : if (coinstip_size == m_coinstip_cache_size_bytes &&
# 4603 [ # # ]: 23 : coinsdb_size == m_coinsdb_cache_size_bytes) {
# 4604 : : // Cache sizes are unchanged, no need to continue.
# 4605 : 0 : return true;
# 4606 : 0 : }
# 4607 : 23 : size_t old_coinstip_size = m_coinstip_cache_size_bytes;
# 4608 : 23 : m_coinstip_cache_size_bytes = coinstip_size;
# 4609 : 23 : m_coinsdb_cache_size_bytes = coinsdb_size;
# 4610 : 23 : CoinsDB().ResizeCache(coinsdb_size);
# 4611 : :
# 4612 : 23 : LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
# 4613 : 23 : this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
# 4614 : 23 : LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
# 4615 : 23 : this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
# 4616 : :
# 4617 : 23 : BlockValidationState state;
# 4618 : 23 : bool ret;
# 4619 : :
# 4620 [ + + ]: 23 : if (coinstip_size > old_coinstip_size) {
# 4621 : : // Likely no need to flush if cache sizes have grown.
# 4622 : 9 : ret = FlushStateToDisk(state, FlushStateMode::IF_NEEDED);
# 4623 : 14 : } else {
# 4624 : : // Otherwise, flush state to disk and deallocate the in-memory coins map.
# 4625 : 14 : ret = FlushStateToDisk(state, FlushStateMode::ALWAYS);
# 4626 : 14 : CoinsTip().ReallocateCache();
# 4627 : 14 : }
# 4628 : 23 : return ret;
# 4629 : 23 : }
# 4630 : :
# 4631 : : static const uint64_t MEMPOOL_DUMP_VERSION = 1;
# 4632 : :
# 4633 : : bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function)
# 4634 : 718 : {
# 4635 : 718 : int64_t nExpiryTimeout = gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
# 4636 : 718 : FILE* filestr{mockable_fopen_function(gArgs.GetDataDirNet() / "mempool.dat", "rb")};
# 4637 : 718 : CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
# 4638 [ + + ]: 718 : if (file.IsNull()) {
# 4639 : 421 : LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
# 4640 : 421 : return false;
# 4641 : 421 : }
# 4642 : :
# 4643 : 297 : int64_t count = 0;
# 4644 : 297 : int64_t expired = 0;
# 4645 : 297 : int64_t failed = 0;
# 4646 : 297 : int64_t already_there = 0;
# 4647 : 297 : int64_t unbroadcast = 0;
# 4648 : 297 : int64_t nNow = GetTime();
# 4649 : :
# 4650 : 297 : try {
# 4651 : 297 : uint64_t version;
# 4652 : 297 : file >> version;
# 4653 [ - + ]: 297 : if (version != MEMPOOL_DUMP_VERSION) {
# 4654 : 0 : return false;
# 4655 : 0 : }
# 4656 : 297 : uint64_t num;
# 4657 : 297 : file >> num;
# 4658 [ + + ]: 435 : while (num) {
# 4659 : 138 : --num;
# 4660 : 138 : CTransactionRef tx;
# 4661 : 138 : int64_t nTime;
# 4662 : 138 : int64_t nFeeDelta;
# 4663 : 138 : file >> tx;
# 4664 : 138 : file >> nTime;
# 4665 : 138 : file >> nFeeDelta;
# 4666 : :
# 4667 : 138 : CAmount amountdelta = nFeeDelta;
# 4668 [ + + ]: 138 : if (amountdelta) {
# 4669 : 6 : pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
# 4670 : 6 : }
# 4671 [ + - ]: 138 : if (nTime > nNow - nExpiryTimeout) {
# 4672 : 138 : LOCK(cs_main);
# 4673 : 138 : const auto& accepted = AcceptToMemoryPool(active_chainstate, tx, nTime, /*bypass_limits=*/false, /*test_accept=*/false);
# 4674 [ + + ]: 138 : if (accepted.m_result_type == MempoolAcceptResult::ResultType::VALID) {
# 4675 : 106 : ++count;
# 4676 : 106 : } else {
# 4677 : : // mempool may contain the transaction already, e.g. from
# 4678 : : // wallet(s) having loaded it while we were processing
# 4679 : : // mempool transactions; consider these as valid, instead of
# 4680 : : // failed, but mark them as 'already there'
# 4681 [ + + ]: 32 : if (pool.exists(GenTxid::Txid(tx->GetHash()))) {
# 4682 : 2 : ++already_there;
# 4683 : 30 : } else {
# 4684 : 30 : ++failed;
# 4685 : 30 : }
# 4686 : 32 : }
# 4687 : 138 : } else {
# 4688 : 0 : ++expired;
# 4689 : 0 : }
# 4690 [ - + ]: 138 : if (ShutdownRequested())
# 4691 : 0 : return false;
# 4692 : 138 : }
# 4693 : 297 : std::map<uint256, CAmount> mapDeltas;
# 4694 : 297 : file >> mapDeltas;
# 4695 : :
# 4696 [ - + ]: 297 : for (const auto& i : mapDeltas) {
# 4697 : 0 : pool.PrioritiseTransaction(i.first, i.second);
# 4698 : 0 : }
# 4699 : :
# 4700 : 297 : std::set<uint256> unbroadcast_txids;
# 4701 : 297 : file >> unbroadcast_txids;
# 4702 : 297 : unbroadcast = unbroadcast_txids.size();
# 4703 [ + + ]: 297 : for (const auto& txid : unbroadcast_txids) {
# 4704 : : // Ensure transactions were accepted to mempool then add to
# 4705 : : // unbroadcast set.
# 4706 [ + + ]: 61 : if (pool.get(txid) != nullptr) pool.AddUnbroadcastTx(txid);
# 4707 : 61 : }
# 4708 : 297 : } catch (const std::exception& e) {
# 4709 : 0 : LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
# 4710 : 0 : return false;
# 4711 : 0 : }
# 4712 : :
# 4713 : 297 : LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there, %i waiting for initial broadcast\n", count, failed, expired, already_there, unbroadcast);
# 4714 : 297 : return true;
# 4715 : 297 : }
# 4716 : :
# 4717 : : bool DumpMempool(const CTxMemPool& pool, FopenFn mockable_fopen_function, bool skip_file_commit)
# 4718 : 717 : {
# 4719 : 717 : int64_t start = GetTimeMicros();
# 4720 : :
# 4721 : 717 : std::map<uint256, CAmount> mapDeltas;
# 4722 : 717 : std::vector<TxMempoolInfo> vinfo;
# 4723 : 717 : std::set<uint256> unbroadcast_txids;
# 4724 : :
# 4725 : 717 : static Mutex dump_mutex;
# 4726 : 717 : LOCK(dump_mutex);
# 4727 : :
# 4728 : 717 : {
# 4729 : 717 : LOCK(pool.cs);
# 4730 [ + + ]: 717 : for (const auto &i : pool.mapDeltas) {
# 4731 : 241 : mapDeltas[i.first] = i.second;
# 4732 : 241 : }
# 4733 : 717 : vinfo = pool.infoAll();
# 4734 : 717 : unbroadcast_txids = pool.GetUnbroadcastTxs();
# 4735 : 717 : }
# 4736 : :
# 4737 : 717 : int64_t mid = GetTimeMicros();
# 4738 : :
# 4739 : 717 : try {
# 4740 : 717 : FILE* filestr{mockable_fopen_function(gArgs.GetDataDirNet() / "mempool.dat.new", "wb")};
# 4741 [ + + ]: 717 : if (!filestr) {
# 4742 : 1 : return false;
# 4743 : 1 : }
# 4744 : :
# 4745 : 716 : CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
# 4746 : :
# 4747 : 716 : uint64_t version = MEMPOOL_DUMP_VERSION;
# 4748 : 716 : file << version;
# 4749 : :
# 4750 : 716 : file << (uint64_t)vinfo.size();
# 4751 [ + + ]: 716 : for (const auto& i : vinfo) {
# 4752 : 697 : file << *(i.tx);
# 4753 : 697 : file << int64_t{count_seconds(i.m_time)};
# 4754 : 697 : file << int64_t{i.nFeeDelta};
# 4755 : 697 : mapDeltas.erase(i.tx->GetHash());
# 4756 : 697 : }
# 4757 : :
# 4758 : 716 : file << mapDeltas;
# 4759 : :
# 4760 : 716 : LogPrintf("Writing %d unbroadcast transactions to disk.\n", unbroadcast_txids.size());
# 4761 : 716 : file << unbroadcast_txids;
# 4762 : :
# 4763 [ + - ][ - + ]: 716 : if (!skip_file_commit && !FileCommit(file.Get()))
# 4764 : 0 : throw std::runtime_error("FileCommit failed");
# 4765 : 716 : file.fclose();
# 4766 [ - + ]: 716 : if (!RenameOver(gArgs.GetDataDirNet() / "mempool.dat.new", gArgs.GetDataDirNet() / "mempool.dat")) {
# 4767 : 0 : throw std::runtime_error("Rename failed");
# 4768 : 0 : }
# 4769 : 716 : int64_t last = GetTimeMicros();
# 4770 : 716 : LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO);
# 4771 : 716 : } catch (const std::exception& e) {
# 4772 : 0 : LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
# 4773 : 0 : return false;
# 4774 : 0 : }
# 4775 : 716 : return true;
# 4776 : 717 : }
# 4777 : :
# 4778 : : //! Guess how far we are in the verification process at the given block index
# 4779 : : //! require cs_main if pindex has not been validated yet (because nChainTx might be unset)
# 4780 : 166252 : double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
# 4781 [ + + ]: 166252 : if (pindex == nullptr)
# 4782 : 1 : return 0.0;
# 4783 : :
# 4784 : 166251 : int64_t nNow = time(nullptr);
# 4785 : :
# 4786 : 166251 : double fTxTotal;
# 4787 : :
# 4788 [ + + ]: 166251 : if (pindex->nChainTx <= data.nTxCount) {
# 4789 : 346 : fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
# 4790 : 165905 : } else {
# 4791 : 165905 : fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
# 4792 : 165905 : }
# 4793 : :
# 4794 : 166251 : return std::min<double>(pindex->nChainTx / fTxTotal, 1.0);
# 4795 : 166252 : }
# 4796 : :
# 4797 : : std::optional<uint256> ChainstateManager::SnapshotBlockhash() const
# 4798 : 15 : {
# 4799 : 15 : LOCK(::cs_main);
# 4800 [ + + ][ + + ]: 15 : if (m_active_chainstate && m_active_chainstate->m_from_snapshot_blockhash) {
# 4801 : : // If a snapshot chainstate exists, it will always be our active.
# 4802 : 4 : return m_active_chainstate->m_from_snapshot_blockhash;
# 4803 : 4 : }
# 4804 : 11 : return std::nullopt;
# 4805 : 15 : }
# 4806 : :
# 4807 : : std::vector<CChainState*> ChainstateManager::GetAll()
# 4808 : 85107 : {
# 4809 : 85107 : LOCK(::cs_main);
# 4810 : 85107 : std::vector<CChainState*> out;
# 4811 : :
# 4812 [ + - ][ + + ]: 85107 : if (!IsSnapshotValidated() && m_ibd_chainstate) {
# 4813 : 85051 : out.push_back(m_ibd_chainstate.get());
# 4814 : 85051 : }
# 4815 : :
# 4816 [ + + ]: 85107 : if (m_snapshot_chainstate) {
# 4817 : 118 : out.push_back(m_snapshot_chainstate.get());
# 4818 : 118 : }
# 4819 : :
# 4820 : 85107 : return out;
# 4821 : 85107 : }
# 4822 : :
# 4823 : : CChainState& ChainstateManager::InitializeChainstate(
# 4824 : : CTxMemPool* mempool, const std::optional<uint256>& snapshot_blockhash)
# 4825 : 946 : {
# 4826 : 946 : AssertLockHeld(::cs_main);
# 4827 : 946 : bool is_snapshot = snapshot_blockhash.has_value();
# 4828 : 946 : std::unique_ptr<CChainState>& to_modify =
# 4829 [ + + ]: 946 : is_snapshot ? m_snapshot_chainstate : m_ibd_chainstate;
# 4830 : :
# 4831 [ - + ]: 946 : if (to_modify) {
# 4832 : 0 : throw std::logic_error("should not be overwriting a chainstate");
# 4833 : 0 : }
# 4834 : 946 : to_modify.reset(new CChainState(mempool, m_blockman, *this, snapshot_blockhash));
# 4835 : :
# 4836 : : // Snapshot chainstates and initial IBD chaintates always become active.
# 4837 [ + + ][ + - ]: 946 : if (is_snapshot || (!is_snapshot && !m_active_chainstate)) {
# [ + - ]
# 4838 : 946 : LogPrintf("Switching active chainstate to %s\n", to_modify->ToString());
# 4839 : 946 : m_active_chainstate = to_modify.get();
# 4840 : 946 : } else {
# 4841 : 0 : throw std::logic_error("unexpected chainstate activation");
# 4842 : 0 : }
# 4843 : :
# 4844 : 946 : return *to_modify;
# 4845 : 946 : }
# 4846 : :
# 4847 : : const AssumeutxoData* ExpectedAssumeutxo(
# 4848 : : const int height, const CChainParams& chainparams)
# 4849 : 15 : {
# 4850 : 15 : const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo();
# 4851 : 15 : const auto assumeutxo_found = valid_assumeutxos_map.find(height);
# 4852 : :
# 4853 [ + + ]: 15 : if (assumeutxo_found != valid_assumeutxos_map.end()) {
# 4854 : 8 : return &assumeutxo_found->second;
# 4855 : 8 : }
# 4856 : 7 : return nullptr;
# 4857 : 15 : }
# 4858 : :
# 4859 : : bool ChainstateManager::ActivateSnapshot(
# 4860 : : CAutoFile& coins_file,
# 4861 : : const SnapshotMetadata& metadata,
# 4862 : : bool in_memory)
# 4863 : 9 : {
# 4864 : 9 : uint256 base_blockhash = metadata.m_base_blockhash;
# 4865 : :
# 4866 [ + + ]: 9 : if (this->SnapshotBlockhash()) {
# 4867 : 1 : LogPrintf("[snapshot] can't activate a snapshot-based chainstate more than once\n");
# 4868 : 1 : return false;
# 4869 : 1 : }
# 4870 : :
# 4871 : 8 : int64_t current_coinsdb_cache_size{0};
# 4872 : 8 : int64_t current_coinstip_cache_size{0};
# 4873 : :
# 4874 : : // Cache percentages to allocate to each chainstate.
# 4875 : : //
# 4876 : : // These particular percentages don't matter so much since they will only be
# 4877 : : // relevant during snapshot activation; caches are rebalanced at the conclusion of
# 4878 : : // this function. We want to give (essentially) all available cache capacity to the
# 4879 : : // snapshot to aid the bulk load later in this function.
# 4880 : 8 : static constexpr double IBD_CACHE_PERC = 0.01;
# 4881 : 8 : static constexpr double SNAPSHOT_CACHE_PERC = 0.99;
# 4882 : :
# 4883 : 8 : {
# 4884 : 8 : LOCK(::cs_main);
# 4885 : : // Resize the coins caches to ensure we're not exceeding memory limits.
# 4886 : : //
# 4887 : : // Allocate the majority of the cache to the incoming snapshot chainstate, since
# 4888 : : // (optimistically) getting to its tip will be the top priority. We'll need to call
# 4889 : : // `MaybeRebalanceCaches()` once we're done with this function to ensure
# 4890 : : // the right allocation (including the possibility that no snapshot was activated
# 4891 : : // and that we should restore the active chainstate caches to their original size).
# 4892 : : //
# 4893 : 8 : current_coinsdb_cache_size = this->ActiveChainstate().m_coinsdb_cache_size_bytes;
# 4894 : 8 : current_coinstip_cache_size = this->ActiveChainstate().m_coinstip_cache_size_bytes;
# 4895 : :
# 4896 : : // Temporarily resize the active coins cache to make room for the newly-created
# 4897 : : // snapshot chain.
# 4898 : 8 : this->ActiveChainstate().ResizeCoinsCaches(
# 4899 : 8 : static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC),
# 4900 : 8 : static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC));
# 4901 : 8 : }
# 4902 : :
# 4903 : 8 : auto snapshot_chainstate = WITH_LOCK(::cs_main,
# 4904 : 8 : return std::make_unique<CChainState>(
# 4905 : 8 : /*mempool=*/nullptr, m_blockman, *this, base_blockhash));
# 4906 : :
# 4907 : 8 : {
# 4908 : 8 : LOCK(::cs_main);
# 4909 : 8 : snapshot_chainstate->InitCoinsDB(
# 4910 : 8 : static_cast<size_t>(current_coinsdb_cache_size * SNAPSHOT_CACHE_PERC),
# 4911 : 8 : in_memory, false, "chainstate");
# 4912 : 8 : snapshot_chainstate->InitCoinsCache(
# 4913 : 8 : static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC));
# 4914 : 8 : }
# 4915 : :
# 4916 : 8 : const bool snapshot_ok = this->PopulateAndValidateSnapshot(
# 4917 : 8 : *snapshot_chainstate, coins_file, metadata);
# 4918 : :
# 4919 [ + + ]: 8 : if (!snapshot_ok) {
# 4920 : 6 : WITH_LOCK(::cs_main, this->MaybeRebalanceCaches());
# 4921 : 6 : return false;
# 4922 : 6 : }
# 4923 : :
# 4924 : 2 : {
# 4925 : 2 : LOCK(::cs_main);
# 4926 : 2 : assert(!m_snapshot_chainstate);
# 4927 : 0 : m_snapshot_chainstate.swap(snapshot_chainstate);
# 4928 : 2 : const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip();
# 4929 : 2 : assert(chaintip_loaded);
# 4930 : :
# 4931 : 0 : m_active_chainstate = m_snapshot_chainstate.get();
# 4932 : :
# 4933 : 2 : LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString());
# 4934 : 2 : LogPrintf("[snapshot] (%.2f MB)\n",
# 4935 : 2 : m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000));
# 4936 : :
# 4937 : 2 : this->MaybeRebalanceCaches();
# 4938 : 2 : }
# 4939 : 0 : return true;
# 4940 : 8 : }
# 4941 : :
# 4942 : : static void FlushSnapshotToDisk(CCoinsViewCache& coins_cache, bool snapshot_loaded)
# 4943 : 3 : {
# 4944 [ + - ]: 3 : LOG_TIME_MILLIS_WITH_CATEGORY_MSG_ONCE(
# 4945 : 3 : strprintf("%s (%.2f MB)",
# 4946 : 3 : snapshot_loaded ? "saving snapshot chainstate" : "flushing coins cache",
# 4947 : 3 : coins_cache.DynamicMemoryUsage() / (1000 * 1000)),
# 4948 : 3 : BCLog::LogFlags::ALL);
# 4949 : :
# 4950 : 3 : coins_cache.Flush();
# 4951 : 3 : }
# 4952 : :
# 4953 : : bool ChainstateManager::PopulateAndValidateSnapshot(
# 4954 : : CChainState& snapshot_chainstate,
# 4955 : : CAutoFile& coins_file,
# 4956 : : const SnapshotMetadata& metadata)
# 4957 : 8 : {
# 4958 : : // It's okay to release cs_main before we're done using `coins_cache` because we know
# 4959 : : // that nothing else will be referencing the newly created snapshot_chainstate yet.
# 4960 : 8 : CCoinsViewCache& coins_cache = *WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip());
# 4961 : :
# 4962 : 8 : uint256 base_blockhash = metadata.m_base_blockhash;
# 4963 : :
# 4964 : 8 : CBlockIndex* snapshot_start_block = WITH_LOCK(::cs_main, return m_blockman.LookupBlockIndex(base_blockhash));
# 4965 : :
# 4966 [ + + ]: 8 : if (!snapshot_start_block) {
# 4967 : : // Needed for GetUTXOStats and ExpectedAssumeutxo to determine the height and to avoid a crash when base_blockhash.IsNull()
# 4968 : 2 : LogPrintf("[snapshot] Did not find snapshot start blockheader %s\n",
# 4969 : 2 : base_blockhash.ToString());
# 4970 : 2 : return false;
# 4971 : 2 : }
# 4972 : :
# 4973 : 6 : int base_height = snapshot_start_block->nHeight;
# 4974 : 6 : auto maybe_au_data = ExpectedAssumeutxo(base_height, ::Params());
# 4975 : :
# 4976 [ + + ]: 6 : if (!maybe_au_data) {
# 4977 : 1 : LogPrintf("[snapshot] assumeutxo height in snapshot metadata not recognized " /* Continued */
# 4978 : 1 : "(%d) - refusing to load snapshot\n", base_height);
# 4979 : 1 : return false;
# 4980 : 1 : }
# 4981 : :
# 4982 : 5 : const AssumeutxoData& au_data = *maybe_au_data;
# 4983 : :
# 4984 : 5 : COutPoint outpoint;
# 4985 : 5 : Coin coin;
# 4986 : 5 : const uint64_t coins_count = metadata.m_coins_count;
# 4987 : 5 : uint64_t coins_left = metadata.m_coins_count;
# 4988 : :
# 4989 : 5 : LogPrintf("[snapshot] loading coins from snapshot %s\n", base_blockhash.ToString());
# 4990 : 5 : int64_t coins_processed{0};
# 4991 : :
# 4992 [ + + ]: 553 : while (coins_left > 0) {
# 4993 : 549 : try {
# 4994 : 549 : coins_file >> outpoint;
# 4995 : 549 : coins_file >> coin;
# 4996 : 549 : } catch (const std::ios_base::failure&) {
# 4997 : 1 : LogPrintf("[snapshot] bad snapshot format or truncated snapshot after deserializing %d coins\n",
# 4998 : 1 : coins_count - coins_left);
# 4999 : 1 : return false;
# 5000 : 1 : }
# 5001 [ - + ]: 548 : if (coin.nHeight > base_height ||
# 5002 [ - + ]: 548 : outpoint.n >= std::numeric_limits<decltype(outpoint.n)>::max() // Avoid integer wrap-around in coinstats.cpp:ApplyHash
# 5003 : 548 : ) {
# 5004 : 0 : LogPrintf("[snapshot] bad snapshot data after deserializing %d coins\n",
# 5005 : 0 : coins_count - coins_left);
# 5006 : 0 : return false;
# 5007 : 0 : }
# 5008 : :
# 5009 : 548 : coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin));
# 5010 : :
# 5011 : 548 : --coins_left;
# 5012 : 548 : ++coins_processed;
# 5013 : :
# 5014 [ - + ]: 548 : if (coins_processed % 1000000 == 0) {
# 5015 : 0 : LogPrintf("[snapshot] %d coins loaded (%.2f%%, %.2f MB)\n",
# 5016 : 0 : coins_processed,
# 5017 : 0 : static_cast<float>(coins_processed) * 100 / static_cast<float>(coins_count),
# 5018 : 0 : coins_cache.DynamicMemoryUsage() / (1000 * 1000));
# 5019 : 0 : }
# 5020 : :
# 5021 : : // Batch write and flush (if we need to) every so often.
# 5022 : : //
# 5023 : : // If our average Coin size is roughly 41 bytes, checking every 120,000 coins
# 5024 : : // means <5MB of memory imprecision.
# 5025 [ - + ]: 548 : if (coins_processed % 120000 == 0) {
# 5026 [ # # ]: 0 : if (ShutdownRequested()) {
# 5027 : 0 : return false;
# 5028 : 0 : }
# 5029 : :
# 5030 : 0 : const auto snapshot_cache_state = WITH_LOCK(::cs_main,
# 5031 : 0 : return snapshot_chainstate.GetCoinsCacheSizeState());
# 5032 : :
# 5033 [ # # ]: 0 : if (snapshot_cache_state >= CoinsCacheSizeState::CRITICAL) {
# 5034 : : // This is a hack - we don't know what the actual best block is, but that
# 5035 : : // doesn't matter for the purposes of flushing the cache here. We'll set this
# 5036 : : // to its correct value (`base_blockhash`) below after the coins are loaded.
# 5037 : 0 : coins_cache.SetBestBlock(GetRandHash());
# 5038 : :
# 5039 : : // No need to acquire cs_main since this chainstate isn't being used yet.
# 5040 : 0 : FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/false);
# 5041 : 0 : }
# 5042 : 0 : }
# 5043 : 548 : }
# 5044 : :
# 5045 : : // Important that we set this. This and the coins_cache accesses above are
# 5046 : : // sort of a layer violation, but either we reach into the innards of
# 5047 : : // CCoinsViewCache here or we have to invert some of the CChainState to
# 5048 : : // embed them in a snapshot-activation-specific CCoinsViewCache bulk load
# 5049 : : // method.
# 5050 : 4 : coins_cache.SetBestBlock(base_blockhash);
# 5051 : :
# 5052 : 4 : bool out_of_coins{false};
# 5053 : 4 : try {
# 5054 : 4 : coins_file >> outpoint;
# 5055 : 4 : } catch (const std::ios_base::failure&) {
# 5056 : : // We expect an exception since we should be out of coins.
# 5057 : 3 : out_of_coins = true;
# 5058 : 3 : }
# 5059 [ + + ]: 4 : if (!out_of_coins) {
# 5060 : 1 : LogPrintf("[snapshot] bad snapshot - coins left over after deserializing %d coins\n",
# 5061 : 1 : coins_count);
# 5062 : 1 : return false;
# 5063 : 1 : }
# 5064 : :
# 5065 : 3 : LogPrintf("[snapshot] loaded %d (%.2f MB) coins from snapshot %s\n",
# 5066 : 3 : coins_count,
# 5067 : 3 : coins_cache.DynamicMemoryUsage() / (1000 * 1000),
# 5068 : 3 : base_blockhash.ToString());
# 5069 : :
# 5070 : : // No need to acquire cs_main since this chainstate isn't being used yet.
# 5071 : 3 : FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/true);
# 5072 : :
# 5073 : 3 : assert(coins_cache.GetBestBlock() == base_blockhash);
# 5074 : :
# 5075 : 0 : CCoinsStats stats{CoinStatsHashType::HASH_SERIALIZED};
# 5076 : 329 : auto breakpoint_fnc = [] { /* TODO insert breakpoint here? */ };
# 5077 : :
# 5078 : : // As above, okay to immediately release cs_main here since no other context knows
# 5079 : : // about the snapshot_chainstate.
# 5080 : 3 : CCoinsViewDB* snapshot_coinsdb = WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB());
# 5081 : :
# 5082 [ - + ]: 3 : if (!GetUTXOStats(snapshot_coinsdb, m_blockman, stats, breakpoint_fnc)) {
# 5083 : 0 : LogPrintf("[snapshot] failed to generate coins stats\n");
# 5084 : 0 : return false;
# 5085 : 0 : }
# 5086 : :
# 5087 : : // Assert that the deserialized chainstate contents match the expected assumeutxo value.
# 5088 [ + + ]: 3 : if (AssumeutxoHash{stats.hashSerialized} != au_data.hash_serialized) {
# 5089 : 1 : LogPrintf("[snapshot] bad snapshot content hash: expected %s, got %s\n",
# 5090 : 1 : au_data.hash_serialized.ToString(), stats.hashSerialized.ToString());
# 5091 : 1 : return false;
# 5092 : 1 : }
# 5093 : :
# 5094 : 2 : snapshot_chainstate.m_chain.SetTip(snapshot_start_block);
# 5095 : :
# 5096 : : // The remainder of this function requires modifying data protected by cs_main.
# 5097 : 2 : LOCK(::cs_main);
# 5098 : :
# 5099 : : // Fake various pieces of CBlockIndex state:
# 5100 : 2 : CBlockIndex* index = nullptr;
# 5101 : :
# 5102 : : // Don't make any modifications to the genesis block.
# 5103 : : // This is especially important because we don't want to erroneously
# 5104 : : // apply BLOCK_ASSUMED_VALID to genesis, which would happen if we didn't skip
# 5105 : : // it here (since it apparently isn't BLOCK_VALID_SCRIPTS).
# 5106 : 2 : constexpr int AFTER_GENESIS_START{1};
# 5107 : :
# 5108 [ + + ]: 222 : for (int i = AFTER_GENESIS_START; i <= snapshot_chainstate.m_chain.Height(); ++i) {
# 5109 : 220 : index = snapshot_chainstate.m_chain[i];
# 5110 : :
# 5111 : : // Fake nTx so that LoadBlockIndex() loads assumed-valid CBlockIndex
# 5112 : : // entries (among other things)
# 5113 [ - + ]: 220 : if (!index->nTx) {
# 5114 : 0 : index->nTx = 1;
# 5115 : 0 : }
# 5116 : : // Fake nChainTx so that GuessVerificationProgress reports accurately
# 5117 : 220 : index->nChainTx = index->pprev->nChainTx + index->nTx;
# 5118 : :
# 5119 : : // Mark unvalidated block index entries beneath the snapshot base block as assumed-valid.
# 5120 [ - + ]: 220 : if (!index->IsValid(BLOCK_VALID_SCRIPTS)) {
# 5121 : : // This flag will be removed once the block is fully validated by a
# 5122 : : // background chainstate.
# 5123 : 0 : index->nStatus |= BLOCK_ASSUMED_VALID;
# 5124 : 0 : }
# 5125 : :
# 5126 : : // Fake BLOCK_OPT_WITNESS so that CChainState::NeedsRedownload()
# 5127 : : // won't ask to rewind the entire assumed-valid chain on startup.
# 5128 [ + - ]: 220 : if (DeploymentActiveAt(*index, ::Params().GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
# 5129 : 220 : index->nStatus |= BLOCK_OPT_WITNESS;
# 5130 : 220 : }
# 5131 : :
# 5132 : 220 : m_blockman.m_dirty_blockindex.insert(index);
# 5133 : : // Changes to the block index will be flushed to disk after this call
# 5134 : : // returns in `ActivateSnapshot()`, when `MaybeRebalanceCaches()` is
# 5135 : : // called, since we've added a snapshot chainstate and therefore will
# 5136 : : // have to downsize the IBD chainstate, which will result in a call to
# 5137 : : // `FlushStateToDisk(ALWAYS)`.
# 5138 : 220 : }
# 5139 : :
# 5140 : 2 : assert(index);
# 5141 : 0 : index->nChainTx = au_data.nChainTx;
# 5142 : 2 : snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block);
# 5143 : :
# 5144 : 2 : LogPrintf("[snapshot] validated snapshot (%.2f MB)\n",
# 5145 : 2 : coins_cache.DynamicMemoryUsage() / (1000 * 1000));
# 5146 : 2 : return true;
# 5147 : 3 : }
# 5148 : :
# 5149 : : CChainState& ChainstateManager::ActiveChainstate() const
# 5150 : 6267686 : {
# 5151 : 6267686 : LOCK(::cs_main);
# 5152 : 6267686 : assert(m_active_chainstate);
# 5153 : 0 : return *m_active_chainstate;
# 5154 : 6267686 : }
# 5155 : :
# 5156 : : bool ChainstateManager::IsSnapshotActive() const
# 5157 : 3 : {
# 5158 : 3 : LOCK(::cs_main);
# 5159 [ + + ][ + - ]: 3 : return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get();
# 5160 : 3 : }
# 5161 : :
# 5162 : : void ChainstateManager::Unload()
# 5163 : 2106 : {
# 5164 : 2106 : AssertLockHeld(::cs_main);
# 5165 [ + + ]: 2106 : for (CChainState* chainstate : this->GetAll()) {
# 5166 : 2096 : chainstate->m_chain.SetTip(nullptr);
# 5167 : 2096 : chainstate->UnloadBlockIndex();
# 5168 : 2096 : }
# 5169 : :
# 5170 : 2106 : m_failed_blocks.clear();
# 5171 : 2106 : m_blockman.Unload();
# 5172 : 2106 : m_best_invalid = nullptr;
# 5173 : 2106 : }
# 5174 : :
# 5175 : : void ChainstateManager::MaybeRebalanceCaches()
# 5176 : 10 : {
# 5177 : 10 : AssertLockHeld(::cs_main);
# 5178 [ + - ][ + + ]: 10 : if (m_ibd_chainstate && !m_snapshot_chainstate) {
# 5179 : 7 : LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
# 5180 : : // Allocate everything to the IBD chainstate.
# 5181 : 7 : m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
# 5182 : 7 : }
# 5183 [ + - ][ - + ]: 3 : else if (m_snapshot_chainstate && !m_ibd_chainstate) {
# 5184 : 0 : LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n");
# 5185 : : // Allocate everything to the snapshot chainstate.
# 5186 : 0 : m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
# 5187 : 0 : }
# 5188 [ + - ][ + - ]: 3 : else if (m_ibd_chainstate && m_snapshot_chainstate) {
# 5189 : : // If both chainstates exist, determine who needs more cache based on IBD status.
# 5190 : : //
# 5191 : : // Note: shrink caches first so that we don't inadvertently overwhelm available memory.
# 5192 [ + + ]: 3 : if (m_snapshot_chainstate->IsInitialBlockDownload()) {
# 5193 : 1 : m_ibd_chainstate->ResizeCoinsCaches(
# 5194 : 1 : m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
# 5195 : 1 : m_snapshot_chainstate->ResizeCoinsCaches(
# 5196 : 1 : m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
# 5197 : 2 : } else {
# 5198 : 2 : m_snapshot_chainstate->ResizeCoinsCaches(
# 5199 : 2 : m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
# 5200 : 2 : m_ibd_chainstate->ResizeCoinsCaches(
# 5201 : 2 : m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
# 5202 : 2 : }
# 5203 : 3 : }
# 5204 : 10 : }
|