1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2017 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
6 #include <validation.h>
8 #include <arith_uint256.h>
10 #include <chainparams.h>
11 #include <checkpoints.h>
12 #include <checkqueue.h>
13 #include <consensus/consensus.h>
14 #include <consensus/merkle.h>
15 #include <consensus/tx_verify.h>
16 #include <consensus/validation.h>
17 #include <cuckoocache.h>
20 #include <policy/fees.h>
21 #include <policy/policy.h>
22 #include <policy/rbf.h>
24 #include <primitives/block.h>
25 #include <primitives/transaction.h>
27 #include <reverse_iterator.h>
28 #include <script/script.h>
29 #include <script/sigcache.h>
30 #include <script/standard.h>
32 #include <tinyformat.h>
34 #include <txmempool.h>
35 #include <ui_interface.h>
38 #include <utilmoneystr.h>
39 #include <utilstrencodings.h>
40 #include <validationinterface.h>
46 #include <boost/algorithm/string/replace.hpp>
47 #include <boost/algorithm/string/join.hpp>
48 #include <boost/thread.hpp>
51 # error "Bitcoin cannot be compiled without assertions."
54 #define MICRO 0.000001
61 struct CBlockIndexWorkComparator
63 bool operator()(const CBlockIndex
*pa
, const CBlockIndex
*pb
) const {
64 // First sort by most total work, ...
65 if (pa
->nChainWork
> pb
->nChainWork
) return false;
66 if (pa
->nChainWork
< pb
->nChainWork
) return true;
68 // ... then by earliest time received, ...
69 if (pa
->nSequenceId
< pb
->nSequenceId
) return false;
70 if (pa
->nSequenceId
> pb
->nSequenceId
) return true;
72 // Use pointer address as tie breaker (should only happen with blocks
73 // loaded from disk, as those all have id 0).
74 if (pa
< pb
) return false;
75 if (pa
> pb
) return true;
85 DISCONNECT_OK
, // All good.
86 DISCONNECT_UNCLEAN
, // Rolled back, but UTXO set was inconsistent with block.
87 DISCONNECT_FAILED
// Something else went wrong.
93 * CChainState stores and provides an API to update our local knowledge of the
94 * current best chain and header tree.
96 * It generally provides access to the current block tree, as well as functions
97 * to provide new data, which it will appropriately validate and incorporate in
98 * its state as necessary.
100 * Eventually, the API here is targeted at being exposed externally as a
101 * consumable libconsensus library, so any functions added must only call
102 * other class member functions, pure functions in other parts of the consensus
103 * library, callbacks via the validation interface, or read/write-to-disk
104 * functions (eventually this will also be via callbacks).
109 * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
110 * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
111 * missing the data for the block.
113 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
> setBlockIndexCandidates
;
116 * Every received block is assigned a unique and increasing identifier, so we
117 * know which one to give priority in case of a fork.
119 CCriticalSection cs_nBlockSequenceId
;
120 /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
121 int32_t nBlockSequenceId
= 1;
122 /** Decreasing counter (used by subsequent preciousblock calls). */
123 int32_t nBlockReverseSequenceId
= -1;
124 /** chainwork for the last block that preciousblock has been applied to. */
125 arith_uint256 nLastPreciousChainwork
= 0;
127 /** In order to efficiently track invalidity of headers, we keep the set of
128 * blocks which we tried to connect and found to be invalid here (ie which
129 * were set to BLOCK_FAILED_VALID since the last restart). We can then
130 * walk this set and check if a new header is a descendant of something in
131 * this set, preventing us from having to walk mapBlockIndex when we try
132 * to connect a bad block and fail.
134 * While this is more complicated than marking everything which descends
135 * from an invalid block as invalid at the time we discover it to be
136 * invalid, doing so would require walking all of mapBlockIndex to find all
137 * descendants. Since this case should be very rare, keeping track of all
138 * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
141 * Because we already walk mapBlockIndex in height-order at startup, we go
142 * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time,
143 * instead of putting things in this set.
145 std::set
<CBlockIndex
*> g_failed_blocks
;
149 BlockMap mapBlockIndex
;
150 std::multimap
<CBlockIndex
*, CBlockIndex
*> mapBlocksUnlinked
;
151 CBlockIndex
*pindexBestInvalid
= nullptr;
153 bool LoadBlockIndex(const Consensus::Params
& consensus_params
, CBlockTreeDB
& blocktree
);
155 bool ActivateBestChain(CValidationState
&state
, const CChainParams
& chainparams
, std::shared_ptr
<const CBlock
> pblock
);
157 bool AcceptBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
);
158 bool AcceptBlock(const std::shared_ptr
<const CBlock
>& pblock
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
, bool fRequested
, const CDiskBlockPos
* dbp
, bool* fNewBlock
);
160 // Block (dis)connection on a given view:
161 DisconnectResult
DisconnectBlock(const CBlock
& block
, const CBlockIndex
* pindex
, CCoinsViewCache
& view
);
162 bool ConnectBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* pindex
,
163 CCoinsViewCache
& view
, const CChainParams
& chainparams
, bool fJustCheck
= false);
165 // Block disconnection on our pcoinsTip:
166 bool DisconnectTip(CValidationState
& state
, const CChainParams
& chainparams
, DisconnectedBlockTransactions
*disconnectpool
);
168 // Manual block validity manipulation:
169 bool PreciousBlock(CValidationState
& state
, const CChainParams
& params
, CBlockIndex
*pindex
);
170 bool InvalidateBlock(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
*pindex
);
171 bool ResetBlockFailureFlags(CBlockIndex
*pindex
);
173 bool ReplayBlocks(const CChainParams
& params
, CCoinsView
* view
);
174 bool RewindBlockIndex(const CChainParams
& params
);
175 bool LoadGenesisBlock(const CChainParams
& chainparams
);
177 void PruneBlockIndexCandidates();
179 void UnloadBlockIndex();
182 bool ActivateBestChainStep(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexMostWork
, const std::shared_ptr
<const CBlock
>& pblock
, bool& fInvalidFound
, ConnectTrace
& connectTrace
);
183 bool ConnectTip(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexNew
, const std::shared_ptr
<const CBlock
>& pblock
, ConnectTrace
& connectTrace
, DisconnectedBlockTransactions
&disconnectpool
);
185 CBlockIndex
* AddToBlockIndex(const CBlockHeader
& block
);
186 /** Create a new block index entry for a given block hash */
187 CBlockIndex
* InsertBlockIndex(const uint256
& hash
);
188 void CheckBlockIndex(const Consensus::Params
& consensusParams
);
190 void InvalidBlockFound(CBlockIndex
*pindex
, const CValidationState
&state
);
191 CBlockIndex
* FindMostWorkChain();
192 bool ReceivedBlockTransactions(const CBlock
&block
, CValidationState
& state
, CBlockIndex
*pindexNew
, const CDiskBlockPos
& pos
, const Consensus::Params
& consensusParams
);
195 bool RollforwardBlock(const CBlockIndex
* pindex
, CCoinsViewCache
& inputs
, const CChainParams
& params
);
200 CCriticalSection cs_main
;
202 BlockMap
& mapBlockIndex
= g_chainstate
.mapBlockIndex
;
203 CChain
& chainActive
= g_chainstate
.chainActive
;
204 CBlockIndex
*pindexBestHeader
= nullptr;
205 CWaitableCriticalSection csBestBlock
;
206 CConditionVariable cvBlockChange
;
207 int nScriptCheckThreads
= 0;
208 std::atomic_bool
fImporting(false);
209 std::atomic_bool
fReindex(false);
210 bool fTxIndex
= false;
211 bool fHavePruned
= false;
212 bool fPruneMode
= false;
213 bool fIsBareMultisigStd
= DEFAULT_PERMIT_BAREMULTISIG
;
214 bool fRequireStandard
= true;
215 bool fCheckBlockIndex
= false;
216 bool fCheckpointsEnabled
= DEFAULT_CHECKPOINTS_ENABLED
;
217 size_t nCoinCacheUsage
= 5000 * 300;
218 uint64_t nPruneTarget
= 0;
219 int64_t nMaxTipAge
= DEFAULT_MAX_TIP_AGE
;
220 bool fEnableReplacement
= DEFAULT_ENABLE_REPLACEMENT
;
222 uint256 hashAssumeValid
;
223 arith_uint256 nMinimumChainWork
;
225 CFeeRate minRelayTxFee
= CFeeRate(DEFAULT_MIN_RELAY_TX_FEE
);
226 CAmount maxTxFee
= DEFAULT_TRANSACTION_MAXFEE
;
228 CBlockPolicyEstimator feeEstimator
;
229 CTxMemPool
mempool(&feeEstimator
);
231 /** Constant stuff for coinbase transactions we create: */
232 CScript COINBASE_FLAGS
;
234 const std::string strMessageMagic
= "Bitcoin Signed Message:\n";
238 CBlockIndex
*&pindexBestInvalid
= g_chainstate
.pindexBestInvalid
;
240 /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
241 * Pruned nodes may have entries where B is missing data.
243 std::multimap
<CBlockIndex
*, CBlockIndex
*>& mapBlocksUnlinked
= g_chainstate
.mapBlocksUnlinked
;
245 CCriticalSection cs_LastBlockFile
;
246 std::vector
<CBlockFileInfo
> vinfoBlockFile
;
247 int nLastBlockFile
= 0;
248 /** Global flag to indicate we should check to see if there are
249 * block/undo files that should be deleted. Set on startup
250 * or if we allocate more file space when we're in prune mode
252 bool fCheckForPruning
= false;
254 /** Dirty block index entries. */
255 std::set
<CBlockIndex
*> setDirtyBlockIndex
;
257 /** Dirty block file entries. */
258 std::set
<int> setDirtyFileInfo
;
261 CBlockIndex
* FindForkInGlobalIndex(const CChain
& chain
, const CBlockLocator
& locator
)
263 // Find the first block the caller has in the main chain
264 for (const uint256
& hash
: locator
.vHave
) {
265 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
266 if (mi
!= mapBlockIndex
.end())
268 CBlockIndex
* pindex
= (*mi
).second
;
269 if (chain
.Contains(pindex
))
271 if (pindex
->GetAncestor(chain
.Height()) == chain
.Tip()) {
276 return chain
.Genesis();
279 std::unique_ptr
<CCoinsViewDB
> pcoinsdbview
;
280 std::unique_ptr
<CCoinsViewCache
> pcoinsTip
;
281 std::unique_ptr
<CBlockTreeDB
> pblocktree
;
283 enum FlushStateMode
{
285 FLUSH_STATE_IF_NEEDED
,
286 FLUSH_STATE_PERIODIC
,
290 // See definition for documentation
291 static bool FlushStateToDisk(const CChainParams
& chainParams
, CValidationState
&state
, FlushStateMode mode
, int nManualPruneHeight
=0);
292 static void FindFilesToPruneManual(std::set
<int>& setFilesToPrune
, int nManualPruneHeight
);
293 static void FindFilesToPrune(std::set
<int>& setFilesToPrune
, uint64_t nPruneAfterHeight
);
294 bool CheckInputs(const CTransaction
& tx
, CValidationState
&state
, const CCoinsViewCache
&inputs
, bool fScriptChecks
, unsigned int flags
, bool cacheSigStore
, bool cacheFullScriptStore
, PrecomputedTransactionData
& txdata
, std::vector
<CScriptCheck
> *pvChecks
= nullptr);
295 static FILE* OpenUndoFile(const CDiskBlockPos
&pos
, bool fReadOnly
= false);
297 bool CheckFinalTx(const CTransaction
&tx
, int flags
)
299 AssertLockHeld(cs_main
);
301 // By convention a negative value for flags indicates that the
302 // current network-enforced consensus rules should be used. In
303 // a future soft-fork scenario that would mean checking which
304 // rules would be enforced for the next block and setting the
305 // appropriate flags. At the present time no soft-forks are
306 // scheduled, so no flags are set.
307 flags
= std::max(flags
, 0);
309 // CheckFinalTx() uses chainActive.Height()+1 to evaluate
310 // nLockTime because when IsFinalTx() is called within
311 // CBlock::AcceptBlock(), the height of the block *being*
312 // evaluated is what is used. Thus if we want to know if a
313 // transaction can be part of the *next* block, we need to call
314 // IsFinalTx() with one more than chainActive.Height().
315 const int nBlockHeight
= chainActive
.Height() + 1;
317 // BIP113 requires that time-locked transactions have nLockTime set to
318 // less than the median time of the previous block they're contained in.
319 // When the next block is created its previous block will be the current
320 // chain tip, so we use that to calculate the median time passed to
321 // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
322 const int64_t nBlockTime
= (flags
& LOCKTIME_MEDIAN_TIME_PAST
)
323 ? chainActive
.Tip()->GetMedianTimePast()
326 return IsFinalTx(tx
, nBlockHeight
, nBlockTime
);
329 bool TestLockPointValidity(const LockPoints
* lp
)
331 AssertLockHeld(cs_main
);
333 // If there are relative lock times then the maxInputBlock will be set
334 // If there are no relative lock times, the LockPoints don't depend on the chain
335 if (lp
->maxInputBlock
) {
336 // Check whether chainActive is an extension of the block at which the LockPoints
337 // calculation was valid. If not LockPoints are no longer valid
338 if (!chainActive
.Contains(lp
->maxInputBlock
)) {
343 // LockPoints still valid
347 bool CheckSequenceLocks(const CTransaction
&tx
, int flags
, LockPoints
* lp
, bool useExistingLockPoints
)
349 AssertLockHeld(cs_main
);
350 AssertLockHeld(mempool
.cs
);
352 CBlockIndex
* tip
= chainActive
.Tip();
353 assert(tip
!= nullptr);
357 // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
358 // height based locks because when SequenceLocks() is called within
359 // ConnectBlock(), the height of the block *being*
360 // evaluated is what is used.
361 // Thus if we want to know if a transaction can be part of the
362 // *next* block, we need to use one more than chainActive.Height()
363 index
.nHeight
= tip
->nHeight
+ 1;
365 std::pair
<int, int64_t> lockPair
;
366 if (useExistingLockPoints
) {
368 lockPair
.first
= lp
->height
;
369 lockPair
.second
= lp
->time
;
372 // pcoinsTip contains the UTXO set for chainActive.Tip()
373 CCoinsViewMemPool
viewMemPool(pcoinsTip
.get(), mempool
);
374 std::vector
<int> prevheights
;
375 prevheights
.resize(tx
.vin
.size());
376 for (size_t txinIndex
= 0; txinIndex
< tx
.vin
.size(); txinIndex
++) {
377 const CTxIn
& txin
= tx
.vin
[txinIndex
];
379 if (!viewMemPool
.GetCoin(txin
.prevout
, coin
)) {
380 return error("%s: Missing input", __func__
);
382 if (coin
.nHeight
== MEMPOOL_HEIGHT
) {
383 // Assume all mempool transaction confirm in the next block
384 prevheights
[txinIndex
] = tip
->nHeight
+ 1;
386 prevheights
[txinIndex
] = coin
.nHeight
;
389 lockPair
= CalculateSequenceLocks(tx
, flags
, &prevheights
, index
);
391 lp
->height
= lockPair
.first
;
392 lp
->time
= lockPair
.second
;
393 // Also store the hash of the block with the highest height of
394 // all the blocks which have sequence locked prevouts.
395 // This hash needs to still be on the chain
396 // for these LockPoint calculations to be valid
397 // Note: It is impossible to correctly calculate a maxInputBlock
398 // if any of the sequence locked inputs depend on unconfirmed txs,
399 // except in the special case where the relative lock time/height
400 // is 0, which is equivalent to no sequence lock. Since we assume
401 // input height of tip+1 for mempool txs and test the resulting
402 // lockPair from CalculateSequenceLocks against tip+1. We know
403 // EvaluateSequenceLocks will fail if there was a non-zero sequence
404 // lock on a mempool input, so we can use the return value of
405 // CheckSequenceLocks to indicate the LockPoints validity
406 int maxInputHeight
= 0;
407 for (int height
: prevheights
) {
408 // Can ignore mempool inputs since we'll fail if they had non-zero locks
409 if (height
!= tip
->nHeight
+1) {
410 maxInputHeight
= std::max(maxInputHeight
, height
);
413 lp
->maxInputBlock
= tip
->GetAncestor(maxInputHeight
);
416 return EvaluateSequenceLocks(index
, lockPair
);
419 // Returns the script flags which should be checked for a given block
420 static unsigned int GetBlockScriptFlags(const CBlockIndex
* pindex
, const Consensus::Params
& chainparams
);
422 static void LimitMempoolSize(CTxMemPool
& pool
, size_t limit
, unsigned long age
) {
423 int expired
= pool
.Expire(GetTime() - age
);
425 LogPrint(BCLog::MEMPOOL
, "Expired %i transactions from the memory pool\n", expired
);
428 std::vector
<COutPoint
> vNoSpendsRemaining
;
429 pool
.TrimToSize(limit
, &vNoSpendsRemaining
);
430 for (const COutPoint
& removed
: vNoSpendsRemaining
)
431 pcoinsTip
->Uncache(removed
);
434 /** Convert CValidationState to a human-readable message for logging */
435 std::string
FormatStateMessage(const CValidationState
&state
)
437 return strprintf("%s%s (code %i)",
438 state
.GetRejectReason(),
439 state
.GetDebugMessage().empty() ? "" : ", "+state
.GetDebugMessage(),
440 state
.GetRejectCode());
443 static bool IsCurrentForFeeEstimation()
445 AssertLockHeld(cs_main
);
446 if (IsInitialBlockDownload())
448 if (chainActive
.Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE
))
450 if (chainActive
.Height() < pindexBestHeader
->nHeight
- 1)
455 /* Make mempool consistent after a reorg, by re-adding or recursively erasing
456 * disconnected block transactions from the mempool, and also removing any
457 * other transactions from the mempool that are no longer valid given the new
460 * Note: we assume that disconnectpool only contains transactions that are NOT
461 * confirmed in the current chain nor already in the mempool (otherwise,
462 * in-mempool descendants of such transactions would be removed).
464 * Passing fAddToMempool=false will skip trying to add the transactions back,
465 * and instead just erase from the mempool as needed.
468 void UpdateMempoolForReorg(DisconnectedBlockTransactions
&disconnectpool
, bool fAddToMempool
)
470 AssertLockHeld(cs_main
);
471 std::vector
<uint256
> vHashUpdate
;
472 // disconnectpool's insertion_order index sorts the entries from
473 // oldest to newest, but the oldest entry will be the last tx from the
474 // latest mined block that was disconnected.
475 // Iterate disconnectpool in reverse, so that we add transactions
476 // back to the mempool starting with the earliest transaction that had
477 // been previously seen in a block.
478 auto it
= disconnectpool
.queuedTx
.get
<insertion_order
>().rbegin();
479 while (it
!= disconnectpool
.queuedTx
.get
<insertion_order
>().rend()) {
480 // ignore validation errors in resurrected transactions
481 CValidationState stateDummy
;
482 if (!fAddToMempool
|| (*it
)->IsCoinBase() ||
483 !AcceptToMemoryPool(mempool
, stateDummy
, *it
, nullptr /* pfMissingInputs */,
484 nullptr /* plTxnReplaced */, true /* bypass_limits */, 0 /* nAbsurdFee */)) {
485 // If the transaction doesn't make it in to the mempool, remove any
486 // transactions that depend on it (which would now be orphans).
487 mempool
.removeRecursive(**it
, MemPoolRemovalReason::REORG
);
488 } else if (mempool
.exists((*it
)->GetHash())) {
489 vHashUpdate
.push_back((*it
)->GetHash());
493 disconnectpool
.queuedTx
.clear();
494 // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
495 // no in-mempool children, which is generally not true when adding
496 // previously-confirmed transactions back to the mempool.
497 // UpdateTransactionsFromBlock finds descendants of any transactions in
498 // the disconnectpool that were added back and cleans up the mempool state.
499 mempool
.UpdateTransactionsFromBlock(vHashUpdate
);
501 // We also need to remove any now-immature transactions
502 mempool
.removeForReorg(pcoinsTip
.get(), chainActive
.Tip()->nHeight
+ 1, STANDARD_LOCKTIME_VERIFY_FLAGS
);
503 // Re-limit mempool size, in case we added any transactions
504 LimitMempoolSize(mempool
, gArgs
.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, gArgs
.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
507 // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
508 // were somehow broken and returning the wrong scriptPubKeys
509 static bool CheckInputsFromMempoolAndCache(const CTransaction
& tx
, CValidationState
&state
, const CCoinsViewCache
&view
, CTxMemPool
& pool
,
510 unsigned int flags
, bool cacheSigStore
, PrecomputedTransactionData
& txdata
) {
511 AssertLockHeld(cs_main
);
513 // pool.cs should be locked already, but go ahead and re-take the lock here
514 // to enforce that mempool doesn't change between when we check the view
515 // and when we actually call through to CheckInputs
518 assert(!tx
.IsCoinBase());
519 for (const CTxIn
& txin
: tx
.vin
) {
520 const Coin
& coin
= view
.AccessCoin(txin
.prevout
);
522 // At this point we haven't actually checked if the coins are all
523 // available (or shouldn't assume we have, since CheckInputs does).
524 // So we just return failure if the inputs are not available here,
525 // and then only have to check equivalence for available inputs.
526 if (coin
.IsSpent()) return false;
528 const CTransactionRef
& txFrom
= pool
.get(txin
.prevout
.hash
);
530 assert(txFrom
->GetHash() == txin
.prevout
.hash
);
531 assert(txFrom
->vout
.size() > txin
.prevout
.n
);
532 assert(txFrom
->vout
[txin
.prevout
.n
] == coin
.out
);
534 const Coin
& coinFromDisk
= pcoinsTip
->AccessCoin(txin
.prevout
);
535 assert(!coinFromDisk
.IsSpent());
536 assert(coinFromDisk
.out
== coin
.out
);
540 return CheckInputs(tx
, state
, view
, true, flags
, cacheSigStore
, true, txdata
);
543 static bool AcceptToMemoryPoolWorker(const CChainParams
& chainparams
, CTxMemPool
& pool
, CValidationState
& state
, const CTransactionRef
& ptx
,
544 bool* pfMissingInputs
, int64_t nAcceptTime
, std::list
<CTransactionRef
>* plTxnReplaced
,
545 bool bypass_limits
, const CAmount
& nAbsurdFee
, std::vector
<COutPoint
>& coins_to_uncache
)
547 const CTransaction
& tx
= *ptx
;
548 const uint256 hash
= tx
.GetHash();
549 AssertLockHeld(cs_main
);
551 *pfMissingInputs
= false;
553 if (!CheckTransaction(tx
, state
))
554 return false; // state filled in by CheckTransaction
556 // Coinbase is only valid in a block, not as a loose transaction
558 return state
.DoS(100, false, REJECT_INVALID
, "coinbase");
560 // Reject transactions with witness before segregated witness activates (override with -prematurewitness)
561 bool witnessEnabled
= IsWitnessEnabled(chainActive
.Tip(), chainparams
.GetConsensus());
562 if (!gArgs
.GetBoolArg("-prematurewitness", false) && tx
.HasWitness() && !witnessEnabled
) {
563 return state
.DoS(0, false, REJECT_NONSTANDARD
, "no-witness-yet", true);
566 // Rather not work on nonstandard transactions (unless -testnet/-regtest)
568 if (fRequireStandard
&& !IsStandardTx(tx
, reason
, witnessEnabled
))
569 return state
.DoS(0, false, REJECT_NONSTANDARD
, reason
);
571 // Only accept nLockTime-using transactions that can be mined in the next
572 // block; we don't want our mempool filled up with transactions that can't
574 if (!CheckFinalTx(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
))
575 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-final");
577 // is it already in the memory pool?
578 if (pool
.exists(hash
)) {
579 return state
.Invalid(false, REJECT_DUPLICATE
, "txn-already-in-mempool");
582 // Check for conflicts with in-memory transactions
583 std::set
<uint256
> setConflicts
;
585 LOCK(pool
.cs
); // protect pool.mapNextTx
586 for (const CTxIn
&txin
: tx
.vin
)
588 auto itConflicting
= pool
.mapNextTx
.find(txin
.prevout
);
589 if (itConflicting
!= pool
.mapNextTx
.end())
591 const CTransaction
*ptxConflicting
= itConflicting
->second
;
592 if (!setConflicts
.count(ptxConflicting
->GetHash()))
594 // Allow opt-out of transaction replacement by setting
595 // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
597 // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
598 // non-replaceable transactions. All inputs rather than just one
599 // is for the sake of multi-party protocols, where we don't
600 // want a single party to be able to disable replacement.
602 // The opt-out ignores descendants as anyone relying on
603 // first-seen mempool behavior should be checking all
604 // unconfirmed ancestors anyway; doing otherwise is hopelessly
606 bool fReplacementOptOut
= true;
607 if (fEnableReplacement
)
609 for (const CTxIn
&_txin
: ptxConflicting
->vin
)
611 if (_txin
.nSequence
<= MAX_BIP125_RBF_SEQUENCE
)
613 fReplacementOptOut
= false;
618 if (fReplacementOptOut
) {
619 return state
.Invalid(false, REJECT_DUPLICATE
, "txn-mempool-conflict");
622 setConflicts
.insert(ptxConflicting
->GetHash());
630 CCoinsViewCache
view(&dummy
);
635 CCoinsViewMemPool
viewMemPool(pcoinsTip
.get(), pool
);
636 view
.SetBackend(viewMemPool
);
638 // do all inputs exist?
639 for (const CTxIn txin
: tx
.vin
) {
640 if (!pcoinsTip
->HaveCoinInCache(txin
.prevout
)) {
641 coins_to_uncache
.push_back(txin
.prevout
);
643 if (!view
.HaveCoin(txin
.prevout
)) {
644 // Are inputs missing because we already have the tx?
645 for (size_t out
= 0; out
< tx
.vout
.size(); out
++) {
646 // Optimistically just do efficient check of cache for outputs
647 if (pcoinsTip
->HaveCoinInCache(COutPoint(hash
, out
))) {
648 return state
.Invalid(false, REJECT_DUPLICATE
, "txn-already-known");
651 // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
652 if (pfMissingInputs
) {
653 *pfMissingInputs
= true;
655 return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
659 // Bring the best block into scope
662 // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
663 view
.SetBackend(dummy
);
665 // Only accept BIP68 sequence locked transactions that can be mined in the next
666 // block; we don't want our mempool filled up with transactions that can't
668 // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
669 // CoinsViewCache instead of create its own
670 if (!CheckSequenceLocks(tx
, STANDARD_LOCKTIME_VERIFY_FLAGS
, &lp
))
671 return state
.DoS(0, false, REJECT_NONSTANDARD
, "non-BIP68-final");
673 } // end LOCK(pool.cs)
676 if (!Consensus::CheckTxInputs(tx
, state
, view
, GetSpendHeight(view
), nFees
)) {
677 return error("%s: Consensus::CheckTxInputs: %s, %s", __func__
, tx
.GetHash().ToString(), FormatStateMessage(state
));
680 // Check for non-standard pay-to-script-hash in inputs
681 if (fRequireStandard
&& !AreInputsStandard(tx
, view
))
682 return state
.Invalid(false, REJECT_NONSTANDARD
, "bad-txns-nonstandard-inputs");
684 // Check for non-standard witness in P2WSH
685 if (tx
.HasWitness() && fRequireStandard
&& !IsWitnessStandard(tx
, view
))
686 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-witness-nonstandard", true);
688 int64_t nSigOpsCost
= GetTransactionSigOpCost(tx
, view
, STANDARD_SCRIPT_VERIFY_FLAGS
);
690 // nModifiedFees includes any fee deltas from PrioritiseTransaction
691 CAmount nModifiedFees
= nFees
;
692 pool
.ApplyDelta(hash
, nModifiedFees
);
694 // Keep track of transactions that spend a coinbase, which we re-scan
695 // during reorgs to ensure COINBASE_MATURITY is still met.
696 bool fSpendsCoinbase
= false;
697 for (const CTxIn
&txin
: tx
.vin
) {
698 const Coin
&coin
= view
.AccessCoin(txin
.prevout
);
699 if (coin
.IsCoinBase()) {
700 fSpendsCoinbase
= true;
705 CTxMemPoolEntry
entry(ptx
, nFees
, nAcceptTime
, chainActive
.Height(),
706 fSpendsCoinbase
, nSigOpsCost
, lp
);
707 unsigned int nSize
= entry
.GetTxSize();
709 // Check that the transaction doesn't have an excessive number of
710 // sigops, making it impossible to mine. Since the coinbase transaction
711 // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
712 // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
713 // merely non-standard transaction.
714 if (nSigOpsCost
> MAX_STANDARD_TX_SIGOPS_COST
)
715 return state
.DoS(0, false, REJECT_NONSTANDARD
, "bad-txns-too-many-sigops", false,
716 strprintf("%d", nSigOpsCost
));
718 CAmount mempoolRejectFee
= pool
.GetMinFee(gArgs
.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000).GetFee(nSize
);
719 if (!bypass_limits
&& mempoolRejectFee
> 0 && nModifiedFees
< mempoolRejectFee
) {
720 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool min fee not met", false, strprintf("%d < %d", nFees
, mempoolRejectFee
));
723 // No transactions are allowed below minRelayTxFee except from disconnected blocks
724 if (!bypass_limits
&& nModifiedFees
< ::minRelayTxFee
.GetFee(nSize
)) {
725 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "min relay fee not met");
728 if (nAbsurdFee
&& nFees
> nAbsurdFee
)
729 return state
.Invalid(false,
730 REJECT_HIGHFEE
, "absurdly-high-fee",
731 strprintf("%d > %d", nFees
, nAbsurdFee
));
733 // Calculate in-mempool ancestors, up to a limit.
734 CTxMemPool::setEntries setAncestors
;
735 size_t nLimitAncestors
= gArgs
.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT
);
736 size_t nLimitAncestorSize
= gArgs
.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT
)*1000;
737 size_t nLimitDescendants
= gArgs
.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT
);
738 size_t nLimitDescendantSize
= gArgs
.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT
)*1000;
739 std::string errString
;
740 if (!pool
.CalculateMemPoolAncestors(entry
, setAncestors
, nLimitAncestors
, nLimitAncestorSize
, nLimitDescendants
, nLimitDescendantSize
, errString
)) {
741 return state
.DoS(0, false, REJECT_NONSTANDARD
, "too-long-mempool-chain", false, errString
);
744 // A transaction that spends outputs that would be replaced by it is invalid. Now
745 // that we have the set of all ancestors we can detect this
746 // pathological case by making sure setConflicts and setAncestors don't
748 for (CTxMemPool::txiter ancestorIt
: setAncestors
)
750 const uint256
&hashAncestor
= ancestorIt
->GetTx().GetHash();
751 if (setConflicts
.count(hashAncestor
))
753 return state
.DoS(10, false,
754 REJECT_INVALID
, "bad-txns-spends-conflicting-tx", false,
755 strprintf("%s spends conflicting transaction %s",
757 hashAncestor
.ToString()));
761 // Check if it's economically rational to mine this transaction rather
762 // than the ones it replaces.
763 CAmount nConflictingFees
= 0;
764 size_t nConflictingSize
= 0;
765 uint64_t nConflictingCount
= 0;
766 CTxMemPool::setEntries allConflicting
;
768 // If we don't hold the lock allConflicting might be incomplete; the
769 // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
770 // mempool consistency for us.
772 const bool fReplacementTransaction
= setConflicts
.size();
773 if (fReplacementTransaction
)
775 CFeeRate
newFeeRate(nModifiedFees
, nSize
);
776 std::set
<uint256
> setConflictsParents
;
777 const int maxDescendantsToVisit
= 100;
778 CTxMemPool::setEntries setIterConflicting
;
779 for (const uint256
&hashConflicting
: setConflicts
)
781 CTxMemPool::txiter mi
= pool
.mapTx
.find(hashConflicting
);
782 if (mi
== pool
.mapTx
.end())
785 // Save these to avoid repeated lookups
786 setIterConflicting
.insert(mi
);
788 // Don't allow the replacement to reduce the feerate of the
791 // We usually don't want to accept replacements with lower
792 // feerates than what they replaced as that would lower the
793 // feerate of the next block. Requiring that the feerate always
794 // be increased is also an easy-to-reason about way to prevent
795 // DoS attacks via replacements.
797 // The mining code doesn't (currently) take children into
798 // account (CPFP) so we only consider the feerates of
799 // transactions being directly replaced, not their indirect
800 // descendants. While that does mean high feerate children are
801 // ignored when deciding whether or not to replace, we do
802 // require the replacement to pay more overall fees too,
803 // mitigating most cases.
804 CFeeRate
oldFeeRate(mi
->GetModifiedFee(), mi
->GetTxSize());
805 if (newFeeRate
<= oldFeeRate
)
807 return state
.DoS(0, false,
808 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
809 strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
811 newFeeRate
.ToString(),
812 oldFeeRate
.ToString()));
815 for (const CTxIn
&txin
: mi
->GetTx().vin
)
817 setConflictsParents
.insert(txin
.prevout
.hash
);
820 nConflictingCount
+= mi
->GetCountWithDescendants();
822 // This potentially overestimates the number of actual descendants
823 // but we just want to be conservative to avoid doing too much
825 if (nConflictingCount
<= maxDescendantsToVisit
) {
826 // If not too many to replace, then calculate the set of
827 // transactions that would have to be evicted
828 for (CTxMemPool::txiter it
: setIterConflicting
) {
829 pool
.CalculateDescendants(it
, allConflicting
);
831 for (CTxMemPool::txiter it
: allConflicting
) {
832 nConflictingFees
+= it
->GetModifiedFee();
833 nConflictingSize
+= it
->GetTxSize();
836 return state
.DoS(0, false,
837 REJECT_NONSTANDARD
, "too many potential replacements", false,
838 strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
841 maxDescendantsToVisit
));
844 for (unsigned int j
= 0; j
< tx
.vin
.size(); j
++)
846 // We don't want to accept replacements that require low
847 // feerate junk to be mined first. Ideally we'd keep track of
848 // the ancestor feerates and make the decision based on that,
849 // but for now requiring all new inputs to be confirmed works.
850 if (!setConflictsParents
.count(tx
.vin
[j
].prevout
.hash
))
852 // Rather than check the UTXO set - potentially expensive -
853 // it's cheaper to just check if the new input refers to a
854 // tx that's in the mempool.
855 if (pool
.mapTx
.find(tx
.vin
[j
].prevout
.hash
) != pool
.mapTx
.end())
856 return state
.DoS(0, false,
857 REJECT_NONSTANDARD
, "replacement-adds-unconfirmed", false,
858 strprintf("replacement %s adds unconfirmed input, idx %d",
859 hash
.ToString(), j
));
863 // The replacement must pay greater fees than the transactions it
864 // replaces - if we did the bandwidth used by those conflicting
865 // transactions would not be paid for.
866 if (nModifiedFees
< nConflictingFees
)
868 return state
.DoS(0, false,
869 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
870 strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
871 hash
.ToString(), FormatMoney(nModifiedFees
), FormatMoney(nConflictingFees
)));
874 // Finally in addition to paying more fees than the conflicts the
875 // new transaction must pay for its own bandwidth.
876 CAmount nDeltaFees
= nModifiedFees
- nConflictingFees
;
877 if (nDeltaFees
< ::incrementalRelayFee
.GetFee(nSize
))
879 return state
.DoS(0, false,
880 REJECT_INSUFFICIENTFEE
, "insufficient fee", false,
881 strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
883 FormatMoney(nDeltaFees
),
884 FormatMoney(::incrementalRelayFee
.GetFee(nSize
))));
888 unsigned int scriptVerifyFlags
= STANDARD_SCRIPT_VERIFY_FLAGS
;
889 if (!chainparams
.RequireStandard()) {
890 scriptVerifyFlags
= gArgs
.GetArg("-promiscuousmempoolflags", scriptVerifyFlags
);
893 // Check against previous transactions
894 // This is done last to help prevent CPU exhaustion denial-of-service attacks.
895 PrecomputedTransactionData
txdata(tx
);
896 if (!CheckInputs(tx
, state
, view
, true, scriptVerifyFlags
, true, false, txdata
)) {
897 // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
898 // need to turn both off, and compare against just turning off CLEANSTACK
899 // to see if the failure is specifically due to witness validation.
900 CValidationState stateDummy
; // Want reported failures to be from first CheckInputs
901 if (!tx
.HasWitness() && CheckInputs(tx
, stateDummy
, view
, true, scriptVerifyFlags
& ~(SCRIPT_VERIFY_WITNESS
| SCRIPT_VERIFY_CLEANSTACK
), true, false, txdata
) &&
902 !CheckInputs(tx
, stateDummy
, view
, true, scriptVerifyFlags
& ~SCRIPT_VERIFY_CLEANSTACK
, true, false, txdata
)) {
903 // Only the witness is missing, so the transaction itself may be fine.
904 state
.SetCorruptionPossible();
906 return false; // state filled in by CheckInputs
909 // Check again against the current block tip's script verification
910 // flags to cache our script execution flags. This is, of course,
911 // useless if the next block has different script flags from the
912 // previous one, but because the cache tracks script flags for us it
913 // will auto-invalidate and we'll just have a few blocks of extra
914 // misses on soft-fork activation.
916 // This is also useful in case of bugs in the standard flags that cause
917 // transactions to pass as valid when they're actually invalid. For
918 // instance the STRICTENC flag was incorrectly allowing certain
919 // CHECKSIG NOT scripts to pass, even though they were invalid.
921 // There is a similar check in CreateNewBlock() to prevent creating
922 // invalid blocks (using TestBlockValidity), however allowing such
923 // transactions into the mempool can be exploited as a DoS attack.
924 unsigned int currentBlockScriptVerifyFlags
= GetBlockScriptFlags(chainActive
.Tip(), Params().GetConsensus());
925 if (!CheckInputsFromMempoolAndCache(tx
, state
, view
, pool
, currentBlockScriptVerifyFlags
, true, txdata
))
927 // If we're using promiscuousmempoolflags, we may hit this normally
928 // Check if current block has some flags that scriptVerifyFlags
929 // does not before printing an ominous warning
930 if (!(~scriptVerifyFlags
& currentBlockScriptVerifyFlags
)) {
931 return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against latest-block but not STANDARD flags %s, %s",
932 __func__
, hash
.ToString(), FormatStateMessage(state
));
934 if (!CheckInputs(tx
, state
, view
, true, MANDATORY_SCRIPT_VERIFY_FLAGS
, true, false, txdata
)) {
935 return error("%s: ConnectInputs failed against MANDATORY but not STANDARD flags due to promiscuous mempool %s, %s",
936 __func__
, hash
.ToString(), FormatStateMessage(state
));
938 LogPrintf("Warning: -promiscuousmempool flags set to not include currently enforced soft forks, this may break mining or otherwise cause instability!\n");
943 // Remove conflicting transactions from the mempool
944 for (const CTxMemPool::txiter it
: allConflicting
)
946 LogPrint(BCLog::MEMPOOL
, "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
947 it
->GetTx().GetHash().ToString(),
949 FormatMoney(nModifiedFees
- nConflictingFees
),
950 (int)nSize
- (int)nConflictingSize
);
952 plTxnReplaced
->push_back(it
->GetSharedTx());
954 pool
.RemoveStaged(allConflicting
, false, MemPoolRemovalReason::REPLACED
);
956 // This transaction should only count for fee estimation if:
957 // - it isn't a BIP 125 replacement transaction (may not be widely supported)
958 // - it's not being readded during a reorg which bypasses typical mempool fee limits
959 // - the node is not behind
960 // - the transaction is not dependent on any other transactions in the mempool
961 bool validForFeeEstimation
= !fReplacementTransaction
&& !bypass_limits
&& IsCurrentForFeeEstimation() && pool
.HasNoInputsOf(tx
);
963 // Store transaction in memory
964 pool
.addUnchecked(hash
, entry
, setAncestors
, validForFeeEstimation
);
966 // trim mempool and check if tx was trimmed
967 if (!bypass_limits
) {
968 LimitMempoolSize(pool
, gArgs
.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000, gArgs
.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60);
969 if (!pool
.exists(hash
))
970 return state
.DoS(0, false, REJECT_INSUFFICIENTFEE
, "mempool full");
974 GetMainSignals().TransactionAddedToMempool(ptx
);
979 /** (try to) add transaction to memory pool with a specified acceptance time **/
980 static bool AcceptToMemoryPoolWithTime(const CChainParams
& chainparams
, CTxMemPool
& pool
, CValidationState
&state
, const CTransactionRef
&tx
,
981 bool* pfMissingInputs
, int64_t nAcceptTime
, std::list
<CTransactionRef
>* plTxnReplaced
,
982 bool bypass_limits
, const CAmount nAbsurdFee
)
984 std::vector
<COutPoint
> coins_to_uncache
;
985 bool res
= AcceptToMemoryPoolWorker(chainparams
, pool
, state
, tx
, pfMissingInputs
, nAcceptTime
, plTxnReplaced
, bypass_limits
, nAbsurdFee
, coins_to_uncache
);
987 for (const COutPoint
& hashTx
: coins_to_uncache
)
988 pcoinsTip
->Uncache(hashTx
);
990 // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
991 CValidationState stateDummy
;
992 FlushStateToDisk(chainparams
, stateDummy
, FLUSH_STATE_PERIODIC
);
996 bool AcceptToMemoryPool(CTxMemPool
& pool
, CValidationState
&state
, const CTransactionRef
&tx
,
997 bool* pfMissingInputs
, std::list
<CTransactionRef
>* plTxnReplaced
,
998 bool bypass_limits
, const CAmount nAbsurdFee
)
1000 const CChainParams
& chainparams
= Params();
1001 return AcceptToMemoryPoolWithTime(chainparams
, pool
, state
, tx
, pfMissingInputs
, GetTime(), plTxnReplaced
, bypass_limits
, nAbsurdFee
);
1005 * Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock.
1006 * If blockIndex is provided, the transaction is fetched from the corresponding block.
1008 bool GetTransaction(const uint256
& hash
, CTransactionRef
& txOut
, const Consensus::Params
& consensusParams
, uint256
& hashBlock
, bool fAllowSlow
, CBlockIndex
* blockIndex
)
1010 CBlockIndex
* pindexSlow
= blockIndex
;
1015 CTransactionRef ptx
= mempool
.get(hash
);
1023 if (pblocktree
->ReadTxIndex(hash
, postx
)) {
1024 CAutoFile
file(OpenBlockFile(postx
, true), SER_DISK
, CLIENT_VERSION
);
1026 return error("%s: OpenBlockFile failed", __func__
);
1027 CBlockHeader header
;
1030 fseek(file
.Get(), postx
.nTxOffset
, SEEK_CUR
);
1032 } catch (const std::exception
& e
) {
1033 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1035 hashBlock
= header
.GetHash();
1036 if (txOut
->GetHash() != hash
)
1037 return error("%s: txid mismatch", __func__
);
1041 // transaction not found in index, nothing more can be done
1045 if (fAllowSlow
) { // use coin database to locate block that contains transaction, and scan it
1046 const Coin
& coin
= AccessByTxid(*pcoinsTip
, hash
);
1047 if (!coin
.IsSpent()) pindexSlow
= chainActive
[coin
.nHeight
];
1053 if (ReadBlockFromDisk(block
, pindexSlow
, consensusParams
)) {
1054 for (const auto& tx
: block
.vtx
) {
1055 if (tx
->GetHash() == hash
) {
1057 hashBlock
= pindexSlow
->GetBlockHash();
1072 //////////////////////////////////////////////////////////////////////////////
1074 // CBlock and CBlockIndex
1077 static bool WriteBlockToDisk(const CBlock
& block
, CDiskBlockPos
& pos
, const CMessageHeader::MessageStartChars
& messageStart
)
1079 // Open history file to append
1080 CAutoFile
fileout(OpenBlockFile(pos
), SER_DISK
, CLIENT_VERSION
);
1081 if (fileout
.IsNull())
1082 return error("WriteBlockToDisk: OpenBlockFile failed");
1084 // Write index header
1085 unsigned int nSize
= GetSerializeSize(fileout
, block
);
1086 fileout
<< FLATDATA(messageStart
) << nSize
;
1089 long fileOutPos
= ftell(fileout
.Get());
1091 return error("WriteBlockToDisk: ftell failed");
1092 pos
.nPos
= (unsigned int)fileOutPos
;
1098 bool ReadBlockFromDisk(CBlock
& block
, const CDiskBlockPos
& pos
, const Consensus::Params
& consensusParams
)
1102 // Open history file to read
1103 CAutoFile
filein(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1104 if (filein
.IsNull())
1105 return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos
.ToString());
1111 catch (const std::exception
& e
) {
1112 return error("%s: Deserialize or I/O error - %s at %s", __func__
, e
.what(), pos
.ToString());
1116 if (!CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
1117 return error("ReadBlockFromDisk: Errors in block header at %s", pos
.ToString());
1122 bool ReadBlockFromDisk(CBlock
& block
, const CBlockIndex
* pindex
, const Consensus::Params
& consensusParams
)
1124 if (!ReadBlockFromDisk(block
, pindex
->GetBlockPos(), consensusParams
))
1126 if (block
.GetHash() != pindex
->GetBlockHash())
1127 return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1128 pindex
->ToString(), pindex
->GetBlockPos().ToString());
1132 CAmount
GetBlockSubsidy(int nHeight
, const Consensus::Params
& consensusParams
)
1134 int halvings
= nHeight
/ consensusParams
.nSubsidyHalvingInterval
;
1135 // Force block reward to zero when right shift is undefined.
1139 CAmount nSubsidy
= 50 * COIN
;
1140 // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1141 nSubsidy
>>= halvings
;
1145 bool IsInitialBlockDownload()
1147 // Once this function has returned false, it must remain false.
1148 static std::atomic
<bool> latchToFalse
{false};
1149 // Optimization: pre-test latch before taking the lock.
1150 if (latchToFalse
.load(std::memory_order_relaxed
))
1154 if (latchToFalse
.load(std::memory_order_relaxed
))
1156 if (fImporting
|| fReindex
)
1158 if (chainActive
.Tip() == nullptr)
1160 if (chainActive
.Tip()->nChainWork
< nMinimumChainWork
)
1162 if (chainActive
.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge
))
1164 LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1165 latchToFalse
.store(true, std::memory_order_relaxed
);
1169 CBlockIndex
*pindexBestForkTip
= nullptr, *pindexBestForkBase
= nullptr;
1171 static void AlertNotify(const std::string
& strMessage
)
1173 uiInterface
.NotifyAlertChanged();
1174 std::string strCmd
= gArgs
.GetArg("-alertnotify", "");
1175 if (strCmd
.empty()) return;
1177 // Alert text should be plain ascii coming from a trusted source, but to
1178 // be safe we first strip anything not in safeChars, then add single quotes around
1179 // the whole string before passing it to the shell:
1180 std::string
singleQuote("'");
1181 std::string safeStatus
= SanitizeString(strMessage
);
1182 safeStatus
= singleQuote
+safeStatus
+singleQuote
;
1183 boost::replace_all(strCmd
, "%s", safeStatus
);
1185 boost::thread
t(runCommand
, strCmd
); // thread runs free
1188 static void CheckForkWarningConditions()
1190 AssertLockHeld(cs_main
);
1191 // Before we get past initial download, we cannot reliably alert about forks
1192 // (we assume we don't get stuck on a fork before finishing our initial sync)
1193 if (IsInitialBlockDownload())
1196 // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1197 // of our head, drop it
1198 if (pindexBestForkTip
&& chainActive
.Height() - pindexBestForkTip
->nHeight
>= 72)
1199 pindexBestForkTip
= nullptr;
1201 if (pindexBestForkTip
|| (pindexBestInvalid
&& pindexBestInvalid
->nChainWork
> chainActive
.Tip()->nChainWork
+ (GetBlockProof(*chainActive
.Tip()) * 6)))
1203 if (!GetfLargeWorkForkFound() && pindexBestForkBase
)
1205 std::string warning
= std::string("'Warning: Large-work fork detected, forking after block ") +
1206 pindexBestForkBase
->phashBlock
->ToString() + std::string("'");
1207 AlertNotify(warning
);
1209 if (pindexBestForkTip
&& pindexBestForkBase
)
1211 LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__
,
1212 pindexBestForkBase
->nHeight
, pindexBestForkBase
->phashBlock
->ToString(),
1213 pindexBestForkTip
->nHeight
, pindexBestForkTip
->phashBlock
->ToString());
1214 SetfLargeWorkForkFound(true);
1218 LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__
);
1219 SetfLargeWorkInvalidChainFound(true);
1224 SetfLargeWorkForkFound(false);
1225 SetfLargeWorkInvalidChainFound(false);
1229 static void CheckForkWarningConditionsOnNewFork(CBlockIndex
* pindexNewForkTip
)
1231 AssertLockHeld(cs_main
);
1232 // If we are on a fork that is sufficiently large, set a warning flag
1233 CBlockIndex
* pfork
= pindexNewForkTip
;
1234 CBlockIndex
* plonger
= chainActive
.Tip();
1235 while (pfork
&& pfork
!= plonger
)
1237 while (plonger
&& plonger
->nHeight
> pfork
->nHeight
)
1238 plonger
= plonger
->pprev
;
1239 if (pfork
== plonger
)
1241 pfork
= pfork
->pprev
;
1244 // We define a condition where we should warn the user about as a fork of at least 7 blocks
1245 // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1246 // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1247 // hash rate operating on the fork.
1248 // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1249 // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1250 // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1251 if (pfork
&& (!pindexBestForkTip
|| pindexNewForkTip
->nHeight
> pindexBestForkTip
->nHeight
) &&
1252 pindexNewForkTip
->nChainWork
- pfork
->nChainWork
> (GetBlockProof(*pfork
) * 7) &&
1253 chainActive
.Height() - pindexNewForkTip
->nHeight
< 72)
1255 pindexBestForkTip
= pindexNewForkTip
;
1256 pindexBestForkBase
= pfork
;
1259 CheckForkWarningConditions();
1262 void static InvalidChainFound(CBlockIndex
* pindexNew
)
1264 if (!pindexBestInvalid
|| pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
)
1265 pindexBestInvalid
= pindexNew
;
1267 LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1268 pindexNew
->GetBlockHash().ToString(), pindexNew
->nHeight
,
1269 log(pindexNew
->nChainWork
.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
1270 pindexNew
->GetBlockTime()));
1271 CBlockIndex
*tip
= chainActive
.Tip();
1273 LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__
,
1274 tip
->GetBlockHash().ToString(), chainActive
.Height(), log(tip
->nChainWork
.getdouble())/log(2.0),
1275 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip
->GetBlockTime()));
1276 CheckForkWarningConditions();
1279 void CChainState::InvalidBlockFound(CBlockIndex
*pindex
, const CValidationState
&state
) {
1280 if (!state
.CorruptionPossible()) {
1281 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
1282 g_failed_blocks
.insert(pindex
);
1283 setDirtyBlockIndex
.insert(pindex
);
1284 setBlockIndexCandidates
.erase(pindex
);
1285 InvalidChainFound(pindex
);
1289 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, CTxUndo
&txundo
, int nHeight
)
1291 // mark inputs spent
1292 if (!tx
.IsCoinBase()) {
1293 txundo
.vprevout
.reserve(tx
.vin
.size());
1294 for (const CTxIn
&txin
: tx
.vin
) {
1295 txundo
.vprevout
.emplace_back();
1296 bool is_spent
= inputs
.SpendCoin(txin
.prevout
, &txundo
.vprevout
.back());
1301 AddCoins(inputs
, tx
, nHeight
);
1304 void UpdateCoins(const CTransaction
& tx
, CCoinsViewCache
& inputs
, int nHeight
)
1307 UpdateCoins(tx
, inputs
, txundo
, nHeight
);
1310 bool CScriptCheck::operator()() {
1311 const CScript
&scriptSig
= ptxTo
->vin
[nIn
].scriptSig
;
1312 const CScriptWitness
*witness
= &ptxTo
->vin
[nIn
].scriptWitness
;
1313 return VerifyScript(scriptSig
, m_tx_out
.scriptPubKey
, witness
, nFlags
, CachingTransactionSignatureChecker(ptxTo
, nIn
, m_tx_out
.nValue
, cacheStore
, *txdata
), &error
);
1316 int GetSpendHeight(const CCoinsViewCache
& inputs
)
1319 CBlockIndex
* pindexPrev
= mapBlockIndex
.find(inputs
.GetBestBlock())->second
;
1320 return pindexPrev
->nHeight
+ 1;
1324 static CuckooCache::cache
<uint256
, SignatureCacheHasher
> scriptExecutionCache
;
1325 static uint256
scriptExecutionCacheNonce(GetRandHash());
1327 void InitScriptExecutionCache() {
1328 // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
1329 // setup_bytes creates the minimum possible cache (2 elements).
1330 size_t nMaxCacheSize
= std::min(std::max((int64_t)0, gArgs
.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE
) / 2), MAX_MAX_SIG_CACHE_SIZE
) * ((size_t) 1 << 20);
1331 size_t nElems
= scriptExecutionCache
.setup_bytes(nMaxCacheSize
);
1332 LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
1333 (nElems
*sizeof(uint256
)) >>20, (nMaxCacheSize
*2)>>20, nElems
);
1337 * Check whether all inputs of this transaction are valid (no double spends, scripts & sigs, amounts)
1338 * This does not modify the UTXO set.
1340 * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any
1341 * script checks which are not necessary (eg due to script execution cache hits) are, obviously,
1342 * not pushed onto pvChecks/run.
1344 * Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache
1345 * which are matched. This is useful for checking blocks where we will likely never need the cache
1348 * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
1350 bool CheckInputs(const CTransaction
& tx
, CValidationState
&state
, const CCoinsViewCache
&inputs
, bool fScriptChecks
, unsigned int flags
, bool cacheSigStore
, bool cacheFullScriptStore
, PrecomputedTransactionData
& txdata
, std::vector
<CScriptCheck
> *pvChecks
)
1352 if (!tx
.IsCoinBase())
1355 pvChecks
->reserve(tx
.vin
.size());
1357 // The first loop above does all the inexpensive checks.
1358 // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
1359 // Helps prevent CPU exhaustion attacks.
1361 // Skip script verification when connecting blocks under the
1362 // assumevalid block. Assuming the assumevalid block is valid this
1363 // is safe because block merkle hashes are still computed and checked,
1364 // Of course, if an assumed valid block is invalid due to false scriptSigs
1365 // this optimization would allow an invalid chain to be accepted.
1366 if (fScriptChecks
) {
1367 // First check if script executions have been cached with the same
1368 // flags. Note that this assumes that the inputs provided are
1369 // correct (ie that the transaction hash which is in tx's prevouts
1370 // properly commits to the scriptPubKey in the inputs view of that
1372 uint256 hashCacheEntry
;
1373 // We only use the first 19 bytes of nonce to avoid a second SHA
1374 // round - giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64)
1375 static_assert(55 - sizeof(flags
) - 32 >= 128/8, "Want at least 128 bits of nonce for script execution cache");
1376 CSHA256().Write(scriptExecutionCacheNonce
.begin(), 55 - sizeof(flags
) - 32).Write(tx
.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags
, sizeof(flags
)).Finalize(hashCacheEntry
.begin());
1377 AssertLockHeld(cs_main
); //TODO: Remove this requirement by making CuckooCache not require external locks
1378 if (scriptExecutionCache
.contains(hashCacheEntry
, !cacheFullScriptStore
)) {
1382 for (unsigned int i
= 0; i
< tx
.vin
.size(); i
++) {
1383 const COutPoint
&prevout
= tx
.vin
[i
].prevout
;
1384 const Coin
& coin
= inputs
.AccessCoin(prevout
);
1385 assert(!coin
.IsSpent());
1387 // We very carefully only pass in things to CScriptCheck which
1388 // are clearly committed to by tx' witness hash. This provides
1389 // a sanity check that our caching is not introducing consensus
1390 // failures through additional data in, eg, the coins being
1391 // spent being checked as a part of CScriptCheck.
1394 CScriptCheck
check(coin
.out
, tx
, i
, flags
, cacheSigStore
, &txdata
);
1396 pvChecks
->push_back(CScriptCheck());
1397 check
.swap(pvChecks
->back());
1398 } else if (!check()) {
1399 if (flags
& STANDARD_NOT_MANDATORY_VERIFY_FLAGS
) {
1400 // Check whether the failure was caused by a
1401 // non-mandatory script verification check, such as
1402 // non-standard DER encodings or non-null dummy
1403 // arguments; if so, don't trigger DoS protection to
1404 // avoid splitting the network between upgraded and
1405 // non-upgraded nodes.
1406 CScriptCheck
check2(coin
.out
, tx
, i
,
1407 flags
& ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS
, cacheSigStore
, &txdata
);
1409 return state
.Invalid(false, REJECT_NONSTANDARD
, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check
.GetScriptError())));
1411 // Failures of other flags indicate a transaction that is
1412 // invalid in new blocks, e.g. an invalid P2SH. We DoS ban
1413 // such nodes as they are not following the protocol. That
1414 // said during an upgrade careful thought should be taken
1415 // as to the correct behavior - we may want to continue
1416 // peering with non-upgraded nodes even after soft-fork
1417 // super-majority signaling has occurred.
1418 return state
.DoS(100,false, REJECT_INVALID
, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check
.GetScriptError())));
1422 if (cacheFullScriptStore
&& !pvChecks
) {
1423 // We executed all of the provided scripts, and were told to
1424 // cache the result. Do so now.
1425 scriptExecutionCache
.insert(hashCacheEntry
);
1435 bool UndoWriteToDisk(const CBlockUndo
& blockundo
, CDiskBlockPos
& pos
, const uint256
& hashBlock
, const CMessageHeader::MessageStartChars
& messageStart
)
1437 // Open history file to append
1438 CAutoFile
fileout(OpenUndoFile(pos
), SER_DISK
, CLIENT_VERSION
);
1439 if (fileout
.IsNull())
1440 return error("%s: OpenUndoFile failed", __func__
);
1442 // Write index header
1443 unsigned int nSize
= GetSerializeSize(fileout
, blockundo
);
1444 fileout
<< FLATDATA(messageStart
) << nSize
;
1447 long fileOutPos
= ftell(fileout
.Get());
1449 return error("%s: ftell failed", __func__
);
1450 pos
.nPos
= (unsigned int)fileOutPos
;
1451 fileout
<< blockundo
;
1453 // calculate & write checksum
1454 CHashWriter
hasher(SER_GETHASH
, PROTOCOL_VERSION
);
1455 hasher
<< hashBlock
;
1456 hasher
<< blockundo
;
1457 fileout
<< hasher
.GetHash();
1462 static bool UndoReadFromDisk(CBlockUndo
& blockundo
, const CBlockIndex
*pindex
)
1464 CDiskBlockPos pos
= pindex
->GetUndoPos();
1466 return error("%s: no undo data available", __func__
);
1469 // Open history file to read
1470 CAutoFile
filein(OpenUndoFile(pos
, true), SER_DISK
, CLIENT_VERSION
);
1471 if (filein
.IsNull())
1472 return error("%s: OpenUndoFile failed", __func__
);
1475 uint256 hashChecksum
;
1476 CHashVerifier
<CAutoFile
> verifier(&filein
); // We need a CHashVerifier as reserializing may lose data
1478 verifier
<< pindex
->pprev
->GetBlockHash();
1479 verifier
>> blockundo
;
1480 filein
>> hashChecksum
;
1482 catch (const std::exception
& e
) {
1483 return error("%s: Deserialize or I/O error - %s", __func__
, e
.what());
1487 if (hashChecksum
!= verifier
.GetHash())
1488 return error("%s: Checksum mismatch", __func__
);
1493 /** Abort with a message */
1494 bool AbortNode(const std::string
& strMessage
, const std::string
& userMessage
="")
1496 SetMiscWarning(strMessage
);
1497 LogPrintf("*** %s\n", strMessage
);
1498 uiInterface
.ThreadSafeMessageBox(
1499 userMessage
.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage
,
1500 "", CClientUIInterface::MSG_ERROR
);
1505 bool AbortNode(CValidationState
& state
, const std::string
& strMessage
, const std::string
& userMessage
="")
1507 AbortNode(strMessage
, userMessage
);
1508 return state
.Error(strMessage
);
1514 * Restore the UTXO in a Coin at a given COutPoint
1515 * @param undo The Coin to be restored.
1516 * @param view The coins view to which to apply the changes.
1517 * @param out The out point that corresponds to the tx input.
1518 * @return A DisconnectResult as an int
1520 int ApplyTxInUndo(Coin
&& undo
, CCoinsViewCache
& view
, const COutPoint
& out
)
1524 if (view
.HaveCoin(out
)) fClean
= false; // overwriting transaction output
1526 if (undo
.nHeight
== 0) {
1527 // Missing undo metadata (height and coinbase). Older versions included this
1528 // information only in undo records for the last spend of a transactions'
1529 // outputs. This implies that it must be present for some other output of the same tx.
1530 const Coin
& alternate
= AccessByTxid(view
, out
.hash
);
1531 if (!alternate
.IsSpent()) {
1532 undo
.nHeight
= alternate
.nHeight
;
1533 undo
.fCoinBase
= alternate
.fCoinBase
;
1535 return DISCONNECT_FAILED
; // adding output for transaction without known metadata
1538 // The potential_overwrite parameter to AddCoin is only allowed to be false if we know for
1539 // sure that the coin did not already exist in the cache. As we have queried for that above
1540 // using HaveCoin, we don't need to guess. When fClean is false, a coin already existed and
1541 // it is an overwrite.
1542 view
.AddCoin(out
, std::move(undo
), !fClean
);
1544 return fClean
? DISCONNECT_OK
: DISCONNECT_UNCLEAN
;
1547 /** Undo the effects of this block (with given index) on the UTXO set represented by coins.
1548 * When FAILED is returned, view is left in an indeterminate state. */
1549 DisconnectResult
CChainState::DisconnectBlock(const CBlock
& block
, const CBlockIndex
* pindex
, CCoinsViewCache
& view
)
1553 CBlockUndo blockUndo
;
1554 if (!UndoReadFromDisk(blockUndo
, pindex
)) {
1555 error("DisconnectBlock(): failure reading undo data");
1556 return DISCONNECT_FAILED
;
1559 if (blockUndo
.vtxundo
.size() + 1 != block
.vtx
.size()) {
1560 error("DisconnectBlock(): block and undo data inconsistent");
1561 return DISCONNECT_FAILED
;
1564 // undo transactions in reverse order
1565 for (int i
= block
.vtx
.size() - 1; i
>= 0; i
--) {
1566 const CTransaction
&tx
= *(block
.vtx
[i
]);
1567 uint256 hash
= tx
.GetHash();
1568 bool is_coinbase
= tx
.IsCoinBase();
1570 // Check that all outputs are available and match the outputs in the block itself
1572 for (size_t o
= 0; o
< tx
.vout
.size(); o
++) {
1573 if (!tx
.vout
[o
].scriptPubKey
.IsUnspendable()) {
1574 COutPoint
out(hash
, o
);
1576 bool is_spent
= view
.SpendCoin(out
, &coin
);
1577 if (!is_spent
|| tx
.vout
[o
] != coin
.out
|| pindex
->nHeight
!= coin
.nHeight
|| is_coinbase
!= coin
.fCoinBase
) {
1578 fClean
= false; // transaction output mismatch
1584 if (i
> 0) { // not coinbases
1585 CTxUndo
&txundo
= blockUndo
.vtxundo
[i
-1];
1586 if (txundo
.vprevout
.size() != tx
.vin
.size()) {
1587 error("DisconnectBlock(): transaction and undo data inconsistent");
1588 return DISCONNECT_FAILED
;
1590 for (unsigned int j
= tx
.vin
.size(); j
-- > 0;) {
1591 const COutPoint
&out
= tx
.vin
[j
].prevout
;
1592 int res
= ApplyTxInUndo(std::move(txundo
.vprevout
[j
]), view
, out
);
1593 if (res
== DISCONNECT_FAILED
) return DISCONNECT_FAILED
;
1594 fClean
= fClean
&& res
!= DISCONNECT_UNCLEAN
;
1596 // At this point, all of txundo.vprevout should have been moved out.
1600 // move best block pointer to prevout block
1601 view
.SetBestBlock(pindex
->pprev
->GetBlockHash());
1603 return fClean
? DISCONNECT_OK
: DISCONNECT_UNCLEAN
;
1606 void static FlushBlockFile(bool fFinalize
= false)
1608 LOCK(cs_LastBlockFile
);
1610 CDiskBlockPos
posOld(nLastBlockFile
, 0);
1612 FILE *fileOld
= OpenBlockFile(posOld
);
1615 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nSize
);
1616 FileCommit(fileOld
);
1620 fileOld
= OpenUndoFile(posOld
);
1623 TruncateFile(fileOld
, vinfoBlockFile
[nLastBlockFile
].nUndoSize
);
1624 FileCommit(fileOld
);
1629 static bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
);
1631 static bool WriteUndoDataForBlock(const CBlockUndo
& blockundo
, CValidationState
& state
, CBlockIndex
* pindex
, const CChainParams
& chainparams
)
1633 // Write undo information to disk
1634 if (pindex
->GetUndoPos().IsNull()) {
1636 if (!FindUndoPos(state
, pindex
->nFile
, _pos
, ::GetSerializeSize(blockundo
, SER_DISK
, CLIENT_VERSION
) + 40))
1637 return error("ConnectBlock(): FindUndoPos failed");
1638 if (!UndoWriteToDisk(blockundo
, _pos
, pindex
->pprev
->GetBlockHash(), chainparams
.MessageStart()))
1639 return AbortNode(state
, "Failed to write undo data");
1641 // update nUndoPos in block index
1642 pindex
->nUndoPos
= _pos
.nPos
;
1643 pindex
->nStatus
|= BLOCK_HAVE_UNDO
;
1644 setDirtyBlockIndex
.insert(pindex
);
1650 static bool WriteTxIndexDataForBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* pindex
)
1652 if (!fTxIndex
) return true;
1654 CDiskTxPos
pos(pindex
->GetBlockPos(), GetSizeOfCompactSize(block
.vtx
.size()));
1655 std::vector
<std::pair
<uint256
, CDiskTxPos
> > vPos
;
1656 vPos
.reserve(block
.vtx
.size());
1657 for (const CTransactionRef
& tx
: block
.vtx
)
1659 vPos
.push_back(std::make_pair(tx
->GetHash(), pos
));
1660 pos
.nTxOffset
+= ::GetSerializeSize(*tx
, SER_DISK
, CLIENT_VERSION
);
1663 if (!pblocktree
->WriteTxIndex(vPos
)) {
1664 return AbortNode(state
, "Failed to write transaction index");
1670 static CCheckQueue
<CScriptCheck
> scriptcheckqueue(128);
1672 void ThreadScriptCheck() {
1673 RenameThread("bitcoin-scriptch");
1674 scriptcheckqueue
.Thread();
1677 // Protected by cs_main
1678 VersionBitsCache versionbitscache
;
1680 int32_t ComputeBlockVersion(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
1683 int32_t nVersion
= VERSIONBITS_TOP_BITS
;
1685 for (int i
= 0; i
< (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS
; i
++) {
1686 ThresholdState state
= VersionBitsState(pindexPrev
, params
, (Consensus::DeploymentPos
)i
, versionbitscache
);
1687 if (state
== THRESHOLD_LOCKED_IN
|| state
== THRESHOLD_STARTED
) {
1688 nVersion
|= VersionBitsMask(params
, (Consensus::DeploymentPos
)i
);
1696 * Threshold condition checker that triggers when unknown versionbits are seen on the network.
1698 class WarningBitsConditionChecker
: public AbstractThresholdConditionChecker
1704 explicit WarningBitsConditionChecker(int bitIn
) : bit(bitIn
) {}
1706 int64_t BeginTime(const Consensus::Params
& params
) const override
{ return 0; }
1707 int64_t EndTime(const Consensus::Params
& params
) const override
{ return std::numeric_limits
<int64_t>::max(); }
1708 int Period(const Consensus::Params
& params
) const override
{ return params
.nMinerConfirmationWindow
; }
1709 int Threshold(const Consensus::Params
& params
) const override
{ return params
.nRuleChangeActivationThreshold
; }
1711 bool Condition(const CBlockIndex
* pindex
, const Consensus::Params
& params
) const override
1713 return ((pindex
->nVersion
& VERSIONBITS_TOP_MASK
) == VERSIONBITS_TOP_BITS
) &&
1714 ((pindex
->nVersion
>> bit
) & 1) != 0 &&
1715 ((ComputeBlockVersion(pindex
->pprev
, params
) >> bit
) & 1) == 0;
1719 // Protected by cs_main
1720 static ThresholdConditionCache warningcache
[VERSIONBITS_NUM_BITS
];
1722 static unsigned int GetBlockScriptFlags(const CBlockIndex
* pindex
, const Consensus::Params
& consensusparams
) {
1723 AssertLockHeld(cs_main
);
1725 unsigned int flags
= SCRIPT_VERIFY_NONE
;
1727 // Start enforcing P2SH (BIP16)
1728 if (pindex
->nHeight
>= consensusparams
.BIP16Height
) {
1729 flags
|= SCRIPT_VERIFY_P2SH
;
1732 // Start enforcing the DERSIG (BIP66) rule
1733 if (pindex
->nHeight
>= consensusparams
.BIP66Height
) {
1734 flags
|= SCRIPT_VERIFY_DERSIG
;
1737 // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
1738 if (pindex
->nHeight
>= consensusparams
.BIP65Height
) {
1739 flags
|= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY
;
1742 // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
1743 if (VersionBitsState(pindex
->pprev
, consensusparams
, Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
1744 flags
|= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY
;
1747 // Start enforcing WITNESS rules using versionbits logic.
1748 if (IsWitnessEnabled(pindex
->pprev
, consensusparams
)) {
1749 flags
|= SCRIPT_VERIFY_WITNESS
;
1750 flags
|= SCRIPT_VERIFY_NULLDUMMY
;
1758 static int64_t nTimeCheck
= 0;
1759 static int64_t nTimeForks
= 0;
1760 static int64_t nTimeVerify
= 0;
1761 static int64_t nTimeConnect
= 0;
1762 static int64_t nTimeIndex
= 0;
1763 static int64_t nTimeCallbacks
= 0;
1764 static int64_t nTimeTotal
= 0;
1765 static int64_t nBlocksTotal
= 0;
1767 /** Apply the effects of this block (with given index) on the UTXO set represented by coins.
1768 * Validity checks that depend on the UTXO set are also done; ConnectBlock()
1769 * can fail if those validity checks fail (among other reasons). */
1770 bool CChainState::ConnectBlock(const CBlock
& block
, CValidationState
& state
, CBlockIndex
* pindex
,
1771 CCoinsViewCache
& view
, const CChainParams
& chainparams
, bool fJustCheck
)
1773 AssertLockHeld(cs_main
);
1775 // pindex->phashBlock can be null if called by CreateNewBlock/TestBlockValidity
1776 assert((pindex
->phashBlock
== nullptr) ||
1777 (*pindex
->phashBlock
== block
.GetHash()));
1778 int64_t nTimeStart
= GetTimeMicros();
1780 // Check it again in case a previous version let a bad block in
1781 // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1782 // ContextualCheckBlockHeader() here. This means that if we add a new
1783 // consensus rule that is enforced in one of those two functions, then we
1784 // may have let in a block that violates the rule prior to updating the
1785 // software, and we would NOT be enforcing the rule here. Fully solving
1786 // upgrade from one software version to the next after a consensus rule
1787 // change is potentially tricky and issue-specific (see RewindBlockIndex()
1788 // for one general approach that was used for BIP 141 deployment).
1789 // Also, currently the rule against blocks more than 2 hours in the future
1790 // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1791 // re-enforce that rule here (at least until we make it impossible for
1792 // GetAdjustedTime() to go backward).
1793 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), !fJustCheck
, !fJustCheck
))
1794 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
1796 // verify that the view's current state corresponds to the previous block
1797 uint256 hashPrevBlock
= pindex
->pprev
== nullptr ? uint256() : pindex
->pprev
->GetBlockHash();
1798 assert(hashPrevBlock
== view
.GetBestBlock());
1800 // Special case for the genesis block, skipping connection of its transactions
1801 // (its coinbase is unspendable)
1802 if (block
.GetHash() == chainparams
.GetConsensus().hashGenesisBlock
) {
1804 view
.SetBestBlock(pindex
->GetBlockHash());
1810 bool fScriptChecks
= true;
1811 if (!hashAssumeValid
.IsNull()) {
1812 // We've been configured with the hash of a block which has been externally verified to have a valid history.
1813 // A suitable default value is included with the software and updated from time to time. Because validity
1814 // relative to a piece of software is an objective fact these defaults can be easily reviewed.
1815 // This setting doesn't force the selection of any particular chain but makes validating some faster by
1816 // effectively caching the result of part of the verification.
1817 BlockMap::const_iterator it
= mapBlockIndex
.find(hashAssumeValid
);
1818 if (it
!= mapBlockIndex
.end()) {
1819 if (it
->second
->GetAncestor(pindex
->nHeight
) == pindex
&&
1820 pindexBestHeader
->GetAncestor(pindex
->nHeight
) == pindex
&&
1821 pindexBestHeader
->nChainWork
>= nMinimumChainWork
) {
1822 // This block is a member of the assumed verified chain and an ancestor of the best header.
1823 // The equivalent time check discourages hash power from extorting the network via DOS attack
1824 // into accepting an invalid block through telling users they must manually set assumevalid.
1825 // Requiring a software change or burying the invalid block, regardless of the setting, makes
1826 // it hard to hide the implication of the demand. This also avoids having release candidates
1827 // that are hardly doing any signature verification at all in testing without having to
1828 // artificially set the default assumed verified block further back.
1829 // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
1830 // least as good as the expected chain.
1831 fScriptChecks
= (GetBlockProofEquivalentTime(*pindexBestHeader
, *pindex
, *pindexBestHeader
, chainparams
.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
1836 int64_t nTime1
= GetTimeMicros(); nTimeCheck
+= nTime1
- nTimeStart
;
1837 LogPrint(BCLog::BENCH
, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI
* (nTime1
- nTimeStart
), nTimeCheck
* MICRO
, nTimeCheck
* MILLI
/ nBlocksTotal
);
1839 // Do not allow blocks that contain transactions which 'overwrite' older transactions,
1840 // unless those are already completely spent.
1841 // If such overwrites are allowed, coinbases and transactions depending upon those
1842 // can be duplicated to remove the ability to spend the first instance -- even after
1843 // being sent to another address.
1844 // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
1845 // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
1846 // already refuses previously-known transaction ids entirely.
1847 // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
1848 // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
1849 // two in the chain that violate it. This prevents exploiting the issue against nodes during their
1850 // initial block download.
1851 bool fEnforceBIP30
= (!pindex
->phashBlock
) || // Enforce on CreateNewBlock invocations which don't have a hash.
1852 !((pindex
->nHeight
==91842 && pindex
->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
1853 (pindex
->nHeight
==91880 && pindex
->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
1855 // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
1856 // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
1857 // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
1858 // before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
1859 // duplicate transactions descending from the known pairs either.
1860 // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
1861 assert(pindex
->pprev
);
1862 CBlockIndex
*pindexBIP34height
= pindex
->pprev
->GetAncestor(chainparams
.GetConsensus().BIP34Height
);
1863 //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
1864 fEnforceBIP30
= fEnforceBIP30
&& (!pindexBIP34height
|| !(pindexBIP34height
->GetBlockHash() == chainparams
.GetConsensus().BIP34Hash
));
1866 if (fEnforceBIP30
) {
1867 for (const auto& tx
: block
.vtx
) {
1868 for (size_t o
= 0; o
< tx
->vout
.size(); o
++) {
1869 if (view
.HaveCoin(COutPoint(tx
->GetHash(), o
))) {
1870 return state
.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
1871 REJECT_INVALID
, "bad-txns-BIP30");
1877 // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
1878 int nLockTimeFlags
= 0;
1879 if (VersionBitsState(pindex
->pprev
, chainparams
.GetConsensus(), Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
1880 nLockTimeFlags
|= LOCKTIME_VERIFY_SEQUENCE
;
1883 // Get the script flags for this block
1884 unsigned int flags
= GetBlockScriptFlags(pindex
, chainparams
.GetConsensus());
1886 int64_t nTime2
= GetTimeMicros(); nTimeForks
+= nTime2
- nTime1
;
1887 LogPrint(BCLog::BENCH
, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI
* (nTime2
- nTime1
), nTimeForks
* MICRO
, nTimeForks
* MILLI
/ nBlocksTotal
);
1889 CBlockUndo blockundo
;
1891 CCheckQueueControl
<CScriptCheck
> control(fScriptChecks
&& nScriptCheckThreads
? &scriptcheckqueue
: nullptr);
1893 std::vector
<int> prevheights
;
1896 int64_t nSigOpsCost
= 0;
1897 blockundo
.vtxundo
.reserve(block
.vtx
.size() - 1);
1898 std::vector
<PrecomputedTransactionData
> txdata
;
1899 txdata
.reserve(block
.vtx
.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
1900 for (unsigned int i
= 0; i
< block
.vtx
.size(); i
++)
1902 const CTransaction
&tx
= *(block
.vtx
[i
]);
1904 nInputs
+= tx
.vin
.size();
1906 if (!tx
.IsCoinBase())
1909 if (!Consensus::CheckTxInputs(tx
, state
, view
, pindex
->nHeight
, txfee
)) {
1910 return error("%s: Consensus::CheckTxInputs: %s, %s", __func__
, tx
.GetHash().ToString(), FormatStateMessage(state
));
1913 if (!MoneyRange(nFees
)) {
1914 return state
.DoS(100, error("%s: accumulated fee in the block out of range.", __func__
),
1915 REJECT_INVALID
, "bad-txns-accumulated-fee-outofrange");
1918 // Check that transaction is BIP68 final
1919 // BIP68 lock checks (as opposed to nLockTime checks) must
1920 // be in ConnectBlock because they require the UTXO set
1921 prevheights
.resize(tx
.vin
.size());
1922 for (size_t j
= 0; j
< tx
.vin
.size(); j
++) {
1923 prevheights
[j
] = view
.AccessCoin(tx
.vin
[j
].prevout
).nHeight
;
1926 if (!SequenceLocks(tx
, nLockTimeFlags
, &prevheights
, *pindex
)) {
1927 return state
.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__
),
1928 REJECT_INVALID
, "bad-txns-nonfinal");
1932 // GetTransactionSigOpCost counts 3 types of sigops:
1933 // * legacy (always)
1934 // * p2sh (when P2SH enabled in flags and excludes coinbase)
1935 // * witness (when witness enabled in flags and excludes coinbase)
1936 nSigOpsCost
+= GetTransactionSigOpCost(tx
, view
, flags
);
1937 if (nSigOpsCost
> MAX_BLOCK_SIGOPS_COST
)
1938 return state
.DoS(100, error("ConnectBlock(): too many sigops"),
1939 REJECT_INVALID
, "bad-blk-sigops");
1941 txdata
.emplace_back(tx
);
1942 if (!tx
.IsCoinBase())
1944 std::vector
<CScriptCheck
> vChecks
;
1945 bool fCacheResults
= fJustCheck
; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
1946 if (!CheckInputs(tx
, state
, view
, fScriptChecks
, flags
, fCacheResults
, fCacheResults
, txdata
[i
], nScriptCheckThreads
? &vChecks
: nullptr))
1947 return error("ConnectBlock(): CheckInputs on %s failed with %s",
1948 tx
.GetHash().ToString(), FormatStateMessage(state
));
1949 control
.Add(vChecks
);
1954 blockundo
.vtxundo
.push_back(CTxUndo());
1956 UpdateCoins(tx
, view
, i
== 0 ? undoDummy
: blockundo
.vtxundo
.back(), pindex
->nHeight
);
1958 int64_t nTime3
= GetTimeMicros(); nTimeConnect
+= nTime3
- nTime2
;
1959 LogPrint(BCLog::BENCH
, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block
.vtx
.size(), MILLI
* (nTime3
- nTime2
), MILLI
* (nTime3
- nTime2
) / block
.vtx
.size(), nInputs
<= 1 ? 0 : MILLI
* (nTime3
- nTime2
) / (nInputs
-1), nTimeConnect
* MICRO
, nTimeConnect
* MILLI
/ nBlocksTotal
);
1961 CAmount blockReward
= nFees
+ GetBlockSubsidy(pindex
->nHeight
, chainparams
.GetConsensus());
1962 if (block
.vtx
[0]->GetValueOut() > blockReward
)
1963 return state
.DoS(100,
1964 error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
1965 block
.vtx
[0]->GetValueOut(), blockReward
),
1966 REJECT_INVALID
, "bad-cb-amount");
1968 if (!control
.Wait())
1969 return state
.DoS(100, error("%s: CheckQueue failed", __func__
), REJECT_INVALID
, "block-validation-failed");
1970 int64_t nTime4
= GetTimeMicros(); nTimeVerify
+= nTime4
- nTime2
;
1971 LogPrint(BCLog::BENCH
, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs
- 1, MILLI
* (nTime4
- nTime2
), nInputs
<= 1 ? 0 : MILLI
* (nTime4
- nTime2
) / (nInputs
-1), nTimeVerify
* MICRO
, nTimeVerify
* MILLI
/ nBlocksTotal
);
1976 if (!WriteUndoDataForBlock(blockundo
, state
, pindex
, chainparams
))
1979 if (!pindex
->IsValid(BLOCK_VALID_SCRIPTS
)) {
1980 pindex
->RaiseValidity(BLOCK_VALID_SCRIPTS
);
1981 setDirtyBlockIndex
.insert(pindex
);
1984 if (!WriteTxIndexDataForBlock(block
, state
, pindex
))
1987 assert(pindex
->phashBlock
);
1988 // add this block to the view's block chain
1989 view
.SetBestBlock(pindex
->GetBlockHash());
1991 int64_t nTime5
= GetTimeMicros(); nTimeIndex
+= nTime5
- nTime4
;
1992 LogPrint(BCLog::BENCH
, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI
* (nTime5
- nTime4
), nTimeIndex
* MICRO
, nTimeIndex
* MILLI
/ nBlocksTotal
);
1994 int64_t nTime6
= GetTimeMicros(); nTimeCallbacks
+= nTime6
- nTime5
;
1995 LogPrint(BCLog::BENCH
, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI
* (nTime6
- nTime5
), nTimeCallbacks
* MICRO
, nTimeCallbacks
* MILLI
/ nBlocksTotal
);
2001 * Update the on-disk chain state.
2002 * The caches and indexes are flushed depending on the mode we're called with
2003 * if they're too large, if it's been a while since the last write,
2004 * or always and in all cases if we're in prune mode and are deleting files.
2006 bool static FlushStateToDisk(const CChainParams
& chainparams
, CValidationState
&state
, FlushStateMode mode
, int nManualPruneHeight
) {
2007 int64_t nMempoolUsage
= mempool
.DynamicMemoryUsage();
2009 static int64_t nLastWrite
= 0;
2010 static int64_t nLastFlush
= 0;
2011 static int64_t nLastSetChain
= 0;
2012 std::set
<int> setFilesToPrune
;
2013 bool fFlushForPrune
= false;
2014 bool fDoFullFlush
= false;
2018 LOCK(cs_LastBlockFile
);
2019 if (fPruneMode
&& (fCheckForPruning
|| nManualPruneHeight
> 0) && !fReindex
) {
2020 if (nManualPruneHeight
> 0) {
2021 FindFilesToPruneManual(setFilesToPrune
, nManualPruneHeight
);
2023 FindFilesToPrune(setFilesToPrune
, chainparams
.PruneAfterHeight());
2024 fCheckForPruning
= false;
2026 if (!setFilesToPrune
.empty()) {
2027 fFlushForPrune
= true;
2029 pblocktree
->WriteFlag("prunedblockfiles", true);
2034 nNow
= GetTimeMicros();
2035 // Avoid writing/flushing immediately after startup.
2036 if (nLastWrite
== 0) {
2039 if (nLastFlush
== 0) {
2042 if (nLastSetChain
== 0) {
2043 nLastSetChain
= nNow
;
2045 int64_t nMempoolSizeMax
= gArgs
.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE
) * 1000000;
2046 int64_t cacheSize
= pcoinsTip
->DynamicMemoryUsage();
2047 int64_t nTotalSpace
= nCoinCacheUsage
+ std::max
<int64_t>(nMempoolSizeMax
- nMempoolUsage
, 0);
2048 // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
2049 bool fCacheLarge
= mode
== FLUSH_STATE_PERIODIC
&& cacheSize
> std::max((9 * nTotalSpace
) / 10, nTotalSpace
- MAX_BLOCK_COINSDB_USAGE
* 1024 * 1024);
2050 // The cache is over the limit, we have to write now.
2051 bool fCacheCritical
= mode
== FLUSH_STATE_IF_NEEDED
&& cacheSize
> nTotalSpace
;
2052 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2053 bool fPeriodicWrite
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastWrite
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000;
2054 // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2055 bool fPeriodicFlush
= mode
== FLUSH_STATE_PERIODIC
&& nNow
> nLastFlush
+ (int64_t)DATABASE_FLUSH_INTERVAL
* 1000000;
2056 // Combine all conditions that result in a full cache flush.
2057 fDoFullFlush
= (mode
== FLUSH_STATE_ALWAYS
) || fCacheLarge
|| fCacheCritical
|| fPeriodicFlush
|| fFlushForPrune
;
2058 // Write blocks and block index to disk.
2059 if (fDoFullFlush
|| fPeriodicWrite
) {
2060 // Depend on nMinDiskSpace to ensure we can write block index
2061 if (!CheckDiskSpace(0))
2062 return state
.Error("out of disk space");
2063 // First make sure all block and undo data is flushed to disk.
2065 // Then update all block file information (which may refer to block and undo files).
2067 std::vector
<std::pair
<int, const CBlockFileInfo
*> > vFiles
;
2068 vFiles
.reserve(setDirtyFileInfo
.size());
2069 for (std::set
<int>::iterator it
= setDirtyFileInfo
.begin(); it
!= setDirtyFileInfo
.end(); ) {
2070 vFiles
.push_back(std::make_pair(*it
, &vinfoBlockFile
[*it
]));
2071 setDirtyFileInfo
.erase(it
++);
2073 std::vector
<const CBlockIndex
*> vBlocks
;
2074 vBlocks
.reserve(setDirtyBlockIndex
.size());
2075 for (std::set
<CBlockIndex
*>::iterator it
= setDirtyBlockIndex
.begin(); it
!= setDirtyBlockIndex
.end(); ) {
2076 vBlocks
.push_back(*it
);
2077 setDirtyBlockIndex
.erase(it
++);
2079 if (!pblocktree
->WriteBatchSync(vFiles
, nLastBlockFile
, vBlocks
)) {
2080 return AbortNode(state
, "Failed to write to block index database");
2083 // Finally remove any pruned files
2085 UnlinkPrunedFiles(setFilesToPrune
);
2088 // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2090 // Typical Coin structures on disk are around 48 bytes in size.
2091 // Pushing a new one to the database can cause it to be written
2092 // twice (once in the log, and once in the tables). This is already
2093 // an overestimation, as most will delete an existing entry or
2094 // overwrite one. Still, use a conservative safety factor of 2.
2095 if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip
->GetCacheSize()))
2096 return state
.Error("out of disk space");
2097 // Flush the chainstate (which may refer to block index entries).
2098 if (!pcoinsTip
->Flush())
2099 return AbortNode(state
, "Failed to write to coin database");
2103 if (fDoFullFlush
|| ((mode
== FLUSH_STATE_ALWAYS
|| mode
== FLUSH_STATE_PERIODIC
) && nNow
> nLastSetChain
+ (int64_t)DATABASE_WRITE_INTERVAL
* 1000000)) {
2104 // Update best block in wallet (so we can detect restored wallets).
2105 GetMainSignals().SetBestChain(chainActive
.GetLocator());
2106 nLastSetChain
= nNow
;
2108 } catch (const std::runtime_error
& e
) {
2109 return AbortNode(state
, std::string("System error while flushing: ") + e
.what());
2114 void FlushStateToDisk() {
2115 CValidationState state
;
2116 const CChainParams
& chainparams
= Params();
2117 FlushStateToDisk(chainparams
, state
, FLUSH_STATE_ALWAYS
);
2120 void PruneAndFlush() {
2121 CValidationState state
;
2122 fCheckForPruning
= true;
2123 const CChainParams
& chainparams
= Params();
2124 FlushStateToDisk(chainparams
, state
, FLUSH_STATE_NONE
);
2127 static void DoWarning(const std::string
& strWarning
)
2129 static bool fWarned
= false;
2130 SetMiscWarning(strWarning
);
2132 AlertNotify(strWarning
);
2137 /** Check warning conditions and do some notifications on new chain tip set. */
2138 void static UpdateTip(const CBlockIndex
*pindexNew
, const CChainParams
& chainParams
) {
2140 mempool
.AddTransactionsUpdated(1);
2142 cvBlockChange
.notify_all();
2144 std::vector
<std::string
> warningMessages
;
2145 if (!IsInitialBlockDownload())
2148 const CBlockIndex
* pindex
= pindexNew
;
2149 for (int bit
= 0; bit
< VERSIONBITS_NUM_BITS
; bit
++) {
2150 WarningBitsConditionChecker
checker(bit
);
2151 ThresholdState state
= checker
.GetStateFor(pindex
, chainParams
.GetConsensus(), warningcache
[bit
]);
2152 if (state
== THRESHOLD_ACTIVE
|| state
== THRESHOLD_LOCKED_IN
) {
2153 const std::string strWarning
= strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit
);
2154 if (state
== THRESHOLD_ACTIVE
) {
2155 DoWarning(strWarning
);
2157 warningMessages
.push_back(strWarning
);
2161 // Check the version of the last 100 blocks to see if we need to upgrade:
2162 for (int i
= 0; i
< 100 && pindex
!= nullptr; i
++)
2164 int32_t nExpectedVersion
= ComputeBlockVersion(pindex
->pprev
, chainParams
.GetConsensus());
2165 if (pindex
->nVersion
> VERSIONBITS_LAST_OLD_BLOCK_VERSION
&& (pindex
->nVersion
& ~nExpectedVersion
) != 0)
2167 pindex
= pindex
->pprev
;
2170 warningMessages
.push_back(strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded
));
2171 if (nUpgraded
> 100/2)
2173 std::string strWarning
= _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
2174 // notify GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
2175 DoWarning(strWarning
);
2178 LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__
,
2179 pindexNew
->GetBlockHash().ToString(), pindexNew
->nHeight
, pindexNew
->nVersion
,
2180 log(pindexNew
->nChainWork
.getdouble())/log(2.0), (unsigned long)pindexNew
->nChainTx
,
2181 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew
->GetBlockTime()),
2182 GuessVerificationProgress(chainParams
.TxData(), pindexNew
), pcoinsTip
->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip
->GetCacheSize());
2183 if (!warningMessages
.empty())
2184 LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages
, ", "));
2189 /** Disconnect chainActive's tip.
2190 * After calling, the mempool will be in an inconsistent state, with
2191 * transactions from disconnected blocks being added to disconnectpool. You
2192 * should make the mempool consistent again by calling UpdateMempoolForReorg.
2193 * with cs_main held.
2195 * If disconnectpool is nullptr, then no disconnected transactions are added to
2196 * disconnectpool (note that the caller is responsible for mempool consistency
2199 bool CChainState::DisconnectTip(CValidationState
& state
, const CChainParams
& chainparams
, DisconnectedBlockTransactions
*disconnectpool
)
2201 CBlockIndex
*pindexDelete
= chainActive
.Tip();
2202 assert(pindexDelete
);
2203 // Read block from disk.
2204 std::shared_ptr
<CBlock
> pblock
= std::make_shared
<CBlock
>();
2205 CBlock
& block
= *pblock
;
2206 if (!ReadBlockFromDisk(block
, pindexDelete
, chainparams
.GetConsensus()))
2207 return AbortNode(state
, "Failed to read block");
2208 // Apply the block atomically to the chain state.
2209 int64_t nStart
= GetTimeMicros();
2211 CCoinsViewCache
view(pcoinsTip
.get());
2212 assert(view
.GetBestBlock() == pindexDelete
->GetBlockHash());
2213 if (DisconnectBlock(block
, pindexDelete
, view
) != DISCONNECT_OK
)
2214 return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete
->GetBlockHash().ToString());
2215 bool flushed
= view
.Flush();
2218 LogPrint(BCLog::BENCH
, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart
) * MILLI
);
2219 // Write the chain state to disk, if necessary.
2220 if (!FlushStateToDisk(chainparams
, state
, FLUSH_STATE_IF_NEEDED
))
2223 if (disconnectpool
) {
2224 // Save transactions to re-add to mempool at end of reorg
2225 for (auto it
= block
.vtx
.rbegin(); it
!= block
.vtx
.rend(); ++it
) {
2226 disconnectpool
->addTransaction(*it
);
2228 while (disconnectpool
->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE
* 1000) {
2229 // Drop the earliest entry, and remove its children from the mempool.
2230 auto it
= disconnectpool
->queuedTx
.get
<insertion_order
>().begin();
2231 mempool
.removeRecursive(**it
, MemPoolRemovalReason::REORG
);
2232 disconnectpool
->removeEntry(it
);
2236 chainActive
.SetTip(pindexDelete
->pprev
);
2238 UpdateTip(pindexDelete
->pprev
, chainparams
);
2239 // Let wallets know transactions went from 1-confirmed to
2240 // 0-confirmed or conflicted:
2241 GetMainSignals().BlockDisconnected(pblock
);
2245 static int64_t nTimeReadFromDisk
= 0;
2246 static int64_t nTimeConnectTotal
= 0;
2247 static int64_t nTimeFlush
= 0;
2248 static int64_t nTimeChainState
= 0;
2249 static int64_t nTimePostConnect
= 0;
2251 struct PerBlockConnectTrace
{
2252 CBlockIndex
* pindex
= nullptr;
2253 std::shared_ptr
<const CBlock
> pblock
;
2254 std::shared_ptr
<std::vector
<CTransactionRef
>> conflictedTxs
;
2255 PerBlockConnectTrace() : conflictedTxs(std::make_shared
<std::vector
<CTransactionRef
>>()) {}
2258 * Used to track blocks whose transactions were applied to the UTXO state as a
2259 * part of a single ActivateBestChainStep call.
2261 * This class also tracks transactions that are removed from the mempool as
2262 * conflicts (per block) and can be used to pass all those transactions
2263 * through SyncTransaction.
2265 * This class assumes (and asserts) that the conflicted transactions for a given
2266 * block are added via mempool callbacks prior to the BlockConnected() associated
2267 * with those transactions. If any transactions are marked conflicted, it is
2268 * assumed that an associated block will always be added.
2270 * This class is single-use, once you call GetBlocksConnected() you have to throw
2271 * it away and make a new one.
2273 class ConnectTrace
{
2275 std::vector
<PerBlockConnectTrace
> blocksConnected
;
2279 explicit ConnectTrace(CTxMemPool
&_pool
) : blocksConnected(1), pool(_pool
) {
2280 pool
.NotifyEntryRemoved
.connect(boost::bind(&ConnectTrace::NotifyEntryRemoved
, this, _1
, _2
));
2284 pool
.NotifyEntryRemoved
.disconnect(boost::bind(&ConnectTrace::NotifyEntryRemoved
, this, _1
, _2
));
2287 void BlockConnected(CBlockIndex
* pindex
, std::shared_ptr
<const CBlock
> pblock
) {
2288 assert(!blocksConnected
.back().pindex
);
2291 blocksConnected
.back().pindex
= pindex
;
2292 blocksConnected
.back().pblock
= std::move(pblock
);
2293 blocksConnected
.emplace_back();
2296 std::vector
<PerBlockConnectTrace
>& GetBlocksConnected() {
2297 // We always keep one extra block at the end of our list because
2298 // blocks are added after all the conflicted transactions have
2299 // been filled in. Thus, the last entry should always be an empty
2300 // one waiting for the transactions from the next block. We pop
2301 // the last entry here to make sure the list we return is sane.
2302 assert(!blocksConnected
.back().pindex
);
2303 assert(blocksConnected
.back().conflictedTxs
->empty());
2304 blocksConnected
.pop_back();
2305 return blocksConnected
;
2308 void NotifyEntryRemoved(CTransactionRef txRemoved
, MemPoolRemovalReason reason
) {
2309 assert(!blocksConnected
.back().pindex
);
2310 if (reason
== MemPoolRemovalReason::CONFLICT
) {
2311 blocksConnected
.back().conflictedTxs
->emplace_back(std::move(txRemoved
));
2317 * Connect a new block to chainActive. pblock is either nullptr or a pointer to a CBlock
2318 * corresponding to pindexNew, to bypass loading it again from disk.
2320 * The block is added to connectTrace if connection succeeds.
2322 bool CChainState::ConnectTip(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexNew
, const std::shared_ptr
<const CBlock
>& pblock
, ConnectTrace
& connectTrace
, DisconnectedBlockTransactions
&disconnectpool
)
2324 assert(pindexNew
->pprev
== chainActive
.Tip());
2325 // Read block from disk.
2326 int64_t nTime1
= GetTimeMicros();
2327 std::shared_ptr
<const CBlock
> pthisBlock
;
2329 std::shared_ptr
<CBlock
> pblockNew
= std::make_shared
<CBlock
>();
2330 if (!ReadBlockFromDisk(*pblockNew
, pindexNew
, chainparams
.GetConsensus()))
2331 return AbortNode(state
, "Failed to read block");
2332 pthisBlock
= pblockNew
;
2334 pthisBlock
= pblock
;
2336 const CBlock
& blockConnecting
= *pthisBlock
;
2337 // Apply the block atomically to the chain state.
2338 int64_t nTime2
= GetTimeMicros(); nTimeReadFromDisk
+= nTime2
- nTime1
;
2340 LogPrint(BCLog::BENCH
, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2
- nTime1
) * MILLI
, nTimeReadFromDisk
* MICRO
);
2342 CCoinsViewCache
view(pcoinsTip
.get());
2343 bool rv
= ConnectBlock(blockConnecting
, state
, pindexNew
, view
, chainparams
);
2344 GetMainSignals().BlockChecked(blockConnecting
, state
);
2346 if (state
.IsInvalid())
2347 InvalidBlockFound(pindexNew
, state
);
2348 return error("ConnectTip(): ConnectBlock %s failed", pindexNew
->GetBlockHash().ToString());
2350 nTime3
= GetTimeMicros(); nTimeConnectTotal
+= nTime3
- nTime2
;
2351 LogPrint(BCLog::BENCH
, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3
- nTime2
) * MILLI
, nTimeConnectTotal
* MICRO
, nTimeConnectTotal
* MILLI
/ nBlocksTotal
);
2352 bool flushed
= view
.Flush();
2355 int64_t nTime4
= GetTimeMicros(); nTimeFlush
+= nTime4
- nTime3
;
2356 LogPrint(BCLog::BENCH
, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4
- nTime3
) * MILLI
, nTimeFlush
* MICRO
, nTimeFlush
* MILLI
/ nBlocksTotal
);
2357 // Write the chain state to disk, if necessary.
2358 if (!FlushStateToDisk(chainparams
, state
, FLUSH_STATE_IF_NEEDED
))
2360 int64_t nTime5
= GetTimeMicros(); nTimeChainState
+= nTime5
- nTime4
;
2361 LogPrint(BCLog::BENCH
, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5
- nTime4
) * MILLI
, nTimeChainState
* MICRO
, nTimeChainState
* MILLI
/ nBlocksTotal
);
2362 // Remove conflicting transactions from the mempool.;
2363 mempool
.removeForBlock(blockConnecting
.vtx
, pindexNew
->nHeight
);
2364 disconnectpool
.removeForBlock(blockConnecting
.vtx
);
2365 // Update chainActive & related variables.
2366 chainActive
.SetTip(pindexNew
);
2367 UpdateTip(pindexNew
, chainparams
);
2369 int64_t nTime6
= GetTimeMicros(); nTimePostConnect
+= nTime6
- nTime5
; nTimeTotal
+= nTime6
- nTime1
;
2370 LogPrint(BCLog::BENCH
, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6
- nTime5
) * MILLI
, nTimePostConnect
* MICRO
, nTimePostConnect
* MILLI
/ nBlocksTotal
);
2371 LogPrint(BCLog::BENCH
, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6
- nTime1
) * MILLI
, nTimeTotal
* MICRO
, nTimeTotal
* MILLI
/ nBlocksTotal
);
2373 connectTrace
.BlockConnected(pindexNew
, std::move(pthisBlock
));
2378 * Return the tip of the chain with the most work in it, that isn't
2379 * known to be invalid (it's however far from certain to be valid).
2381 CBlockIndex
* CChainState::FindMostWorkChain() {
2383 CBlockIndex
*pindexNew
= nullptr;
2385 // Find the best candidate header.
2387 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::reverse_iterator it
= setBlockIndexCandidates
.rbegin();
2388 if (it
== setBlockIndexCandidates
.rend())
2393 // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2394 // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2395 CBlockIndex
*pindexTest
= pindexNew
;
2396 bool fInvalidAncestor
= false;
2397 while (pindexTest
&& !chainActive
.Contains(pindexTest
)) {
2398 assert(pindexTest
->nChainTx
|| pindexTest
->nHeight
== 0);
2400 // Pruned nodes may have entries in setBlockIndexCandidates for
2401 // which block files have been deleted. Remove those as candidates
2402 // for the most work chain if we come across them; we can't switch
2403 // to a chain unless we have all the non-active-chain parent blocks.
2404 bool fFailedChain
= pindexTest
->nStatus
& BLOCK_FAILED_MASK
;
2405 bool fMissingData
= !(pindexTest
->nStatus
& BLOCK_HAVE_DATA
);
2406 if (fFailedChain
|| fMissingData
) {
2407 // Candidate chain is not usable (either invalid or missing data)
2408 if (fFailedChain
&& (pindexBestInvalid
== nullptr || pindexNew
->nChainWork
> pindexBestInvalid
->nChainWork
))
2409 pindexBestInvalid
= pindexNew
;
2410 CBlockIndex
*pindexFailed
= pindexNew
;
2411 // Remove the entire chain from the set.
2412 while (pindexTest
!= pindexFailed
) {
2414 pindexFailed
->nStatus
|= BLOCK_FAILED_CHILD
;
2415 } else if (fMissingData
) {
2416 // If we're missing data, then add back to mapBlocksUnlinked,
2417 // so that if the block arrives in the future we can try adding
2418 // to setBlockIndexCandidates again.
2419 mapBlocksUnlinked
.insert(std::make_pair(pindexFailed
->pprev
, pindexFailed
));
2421 setBlockIndexCandidates
.erase(pindexFailed
);
2422 pindexFailed
= pindexFailed
->pprev
;
2424 setBlockIndexCandidates
.erase(pindexTest
);
2425 fInvalidAncestor
= true;
2428 pindexTest
= pindexTest
->pprev
;
2430 if (!fInvalidAncestor
)
2435 /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
2436 void CChainState::PruneBlockIndexCandidates() {
2437 // Note that we can't delete the current block itself, as we may need to return to it later in case a
2438 // reorganization to a better block fails.
2439 std::set
<CBlockIndex
*, CBlockIndexWorkComparator
>::iterator it
= setBlockIndexCandidates
.begin();
2440 while (it
!= setBlockIndexCandidates
.end() && setBlockIndexCandidates
.value_comp()(*it
, chainActive
.Tip())) {
2441 setBlockIndexCandidates
.erase(it
++);
2443 // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2444 assert(!setBlockIndexCandidates
.empty());
2448 * Try to make some progress towards making pindexMostWork the active block.
2449 * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork.
2451 bool CChainState::ActivateBestChainStep(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
* pindexMostWork
, const std::shared_ptr
<const CBlock
>& pblock
, bool& fInvalidFound
, ConnectTrace
& connectTrace
)
2453 AssertLockHeld(cs_main
);
2454 const CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2455 const CBlockIndex
*pindexFork
= chainActive
.FindFork(pindexMostWork
);
2457 // Disconnect active blocks which are no longer in the best chain.
2458 bool fBlocksDisconnected
= false;
2459 DisconnectedBlockTransactions disconnectpool
;
2460 while (chainActive
.Tip() && chainActive
.Tip() != pindexFork
) {
2461 if (!DisconnectTip(state
, chainparams
, &disconnectpool
)) {
2462 // This is likely a fatal error, but keep the mempool consistent,
2463 // just in case. Only remove from the mempool in this case.
2464 UpdateMempoolForReorg(disconnectpool
, false);
2467 fBlocksDisconnected
= true;
2470 // Build list of new blocks to connect.
2471 std::vector
<CBlockIndex
*> vpindexToConnect
;
2472 bool fContinue
= true;
2473 int nHeight
= pindexFork
? pindexFork
->nHeight
: -1;
2474 while (fContinue
&& nHeight
!= pindexMostWork
->nHeight
) {
2475 // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2476 // a few blocks along the way.
2477 int nTargetHeight
= std::min(nHeight
+ 32, pindexMostWork
->nHeight
);
2478 vpindexToConnect
.clear();
2479 vpindexToConnect
.reserve(nTargetHeight
- nHeight
);
2480 CBlockIndex
*pindexIter
= pindexMostWork
->GetAncestor(nTargetHeight
);
2481 while (pindexIter
&& pindexIter
->nHeight
!= nHeight
) {
2482 vpindexToConnect
.push_back(pindexIter
);
2483 pindexIter
= pindexIter
->pprev
;
2485 nHeight
= nTargetHeight
;
2487 // Connect new blocks.
2488 for (CBlockIndex
*pindexConnect
: reverse_iterate(vpindexToConnect
)) {
2489 if (!ConnectTip(state
, chainparams
, pindexConnect
, pindexConnect
== pindexMostWork
? pblock
: std::shared_ptr
<const CBlock
>(), connectTrace
, disconnectpool
)) {
2490 if (state
.IsInvalid()) {
2491 // The block violates a consensus rule.
2492 if (!state
.CorruptionPossible())
2493 InvalidChainFound(vpindexToConnect
.back());
2494 state
= CValidationState();
2495 fInvalidFound
= true;
2499 // A system error occurred (disk space, database error, ...).
2500 // Make the mempool consistent with the current tip, just in case
2501 // any observers try to use it before shutdown.
2502 UpdateMempoolForReorg(disconnectpool
, false);
2506 PruneBlockIndexCandidates();
2507 if (!pindexOldTip
|| chainActive
.Tip()->nChainWork
> pindexOldTip
->nChainWork
) {
2508 // We're in a better position than we were. Return temporarily to release the lock.
2516 if (fBlocksDisconnected
) {
2517 // If any blocks were disconnected, disconnectpool may be non empty. Add
2518 // any disconnected transactions back to the mempool.
2519 UpdateMempoolForReorg(disconnectpool
, true);
2521 mempool
.check(pcoinsTip
.get());
2523 // Callbacks/notifications for a new best chain.
2525 CheckForkWarningConditionsOnNewFork(vpindexToConnect
.back());
2527 CheckForkWarningConditions();
2532 static void NotifyHeaderTip() {
2533 bool fNotify
= false;
2534 bool fInitialBlockDownload
= false;
2535 static CBlockIndex
* pindexHeaderOld
= nullptr;
2536 CBlockIndex
* pindexHeader
= nullptr;
2539 pindexHeader
= pindexBestHeader
;
2541 if (pindexHeader
!= pindexHeaderOld
) {
2543 fInitialBlockDownload
= IsInitialBlockDownload();
2544 pindexHeaderOld
= pindexHeader
;
2547 // Send block tip changed notifications without cs_main
2549 uiInterface
.NotifyHeaderTip(fInitialBlockDownload
, pindexHeader
);
2554 * Make the best chain active, in multiple steps. The result is either failure
2555 * or an activated best chain. pblock is either nullptr or a pointer to a block
2556 * that is already loaded (to avoid loading it again from disk).
2558 bool CChainState::ActivateBestChain(CValidationState
&state
, const CChainParams
& chainparams
, std::shared_ptr
<const CBlock
> pblock
) {
2559 // Note that while we're often called here from ProcessNewBlock, this is
2560 // far from a guarantee. Things in the P2P/RPC will often end up calling
2561 // us in the middle of ProcessNewBlock - do not assume pblock is set
2562 // sanely for performance or correctness!
2563 AssertLockNotHeld(cs_main
);
2565 CBlockIndex
*pindexMostWork
= nullptr;
2566 CBlockIndex
*pindexNewTip
= nullptr;
2567 int nStopAtHeight
= gArgs
.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT
);
2569 boost::this_thread::interruption_point();
2571 if (GetMainSignals().CallbacksPending() > 10) {
2572 // Block until the validation queue drains. This should largely
2573 // never happen in normal operation, however may happen during
2574 // reindex, causing memory blowup if we run too far ahead.
2575 SyncWithValidationInterfaceQueue();
2578 if (ShutdownRequested())
2581 const CBlockIndex
*pindexFork
;
2582 bool fInitialDownload
;
2585 ConnectTrace
connectTrace(mempool
); // Destructed before cs_main is unlocked
2587 CBlockIndex
*pindexOldTip
= chainActive
.Tip();
2588 if (pindexMostWork
== nullptr) {
2589 pindexMostWork
= FindMostWorkChain();
2592 // Whether we have anything to do at all.
2593 if (pindexMostWork
== nullptr || pindexMostWork
== chainActive
.Tip())
2596 bool fInvalidFound
= false;
2597 std::shared_ptr
<const CBlock
> nullBlockPtr
;
2598 if (!ActivateBestChainStep(state
, chainparams
, pindexMostWork
, pblock
&& pblock
->GetHash() == pindexMostWork
->GetBlockHash() ? pblock
: nullBlockPtr
, fInvalidFound
, connectTrace
))
2601 if (fInvalidFound
) {
2602 // Wipe cache, we may need another branch now.
2603 pindexMostWork
= nullptr;
2605 pindexNewTip
= chainActive
.Tip();
2606 pindexFork
= chainActive
.FindFork(pindexOldTip
);
2607 fInitialDownload
= IsInitialBlockDownload();
2609 for (const PerBlockConnectTrace
& trace
: connectTrace
.GetBlocksConnected()) {
2610 assert(trace
.pblock
&& trace
.pindex
);
2611 GetMainSignals().BlockConnected(trace
.pblock
, trace
.pindex
, trace
.conflictedTxs
);
2614 // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2616 // Notifications/callbacks that can run without cs_main
2618 // Notify external listeners about the new tip.
2619 GetMainSignals().UpdatedBlockTip(pindexNewTip
, pindexFork
, fInitialDownload
);
2621 // Always notify the UI if a new block tip was connected
2622 if (pindexFork
!= pindexNewTip
) {
2623 uiInterface
.NotifyBlockTip(fInitialDownload
, pindexNewTip
);
2626 if (nStopAtHeight
&& pindexNewTip
&& pindexNewTip
->nHeight
>= nStopAtHeight
) StartShutdown();
2627 } while (pindexNewTip
!= pindexMostWork
);
2628 CheckBlockIndex(chainparams
.GetConsensus());
2630 // Write changes periodically to disk, after relay.
2631 if (!FlushStateToDisk(chainparams
, state
, FLUSH_STATE_PERIODIC
)) {
2637 bool ActivateBestChain(CValidationState
&state
, const CChainParams
& chainparams
, std::shared_ptr
<const CBlock
> pblock
) {
2638 return g_chainstate
.ActivateBestChain(state
, chainparams
, std::move(pblock
));
2641 bool CChainState::PreciousBlock(CValidationState
& state
, const CChainParams
& params
, CBlockIndex
*pindex
)
2645 if (pindex
->nChainWork
< chainActive
.Tip()->nChainWork
) {
2646 // Nothing to do, this block is not at the tip.
2649 if (chainActive
.Tip()->nChainWork
> nLastPreciousChainwork
) {
2650 // The chain has been extended since the last call, reset the counter.
2651 nBlockReverseSequenceId
= -1;
2653 nLastPreciousChainwork
= chainActive
.Tip()->nChainWork
;
2654 setBlockIndexCandidates
.erase(pindex
);
2655 pindex
->nSequenceId
= nBlockReverseSequenceId
;
2656 if (nBlockReverseSequenceId
> std::numeric_limits
<int32_t>::min()) {
2657 // We can't keep reducing the counter if somebody really wants to
2658 // call preciousblock 2**31-1 times on the same set of tips...
2659 nBlockReverseSequenceId
--;
2661 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && pindex
->nChainTx
) {
2662 setBlockIndexCandidates
.insert(pindex
);
2663 PruneBlockIndexCandidates();
2667 return ActivateBestChain(state
, params
, std::shared_ptr
<const CBlock
>());
2669 bool PreciousBlock(CValidationState
& state
, const CChainParams
& params
, CBlockIndex
*pindex
) {
2670 return g_chainstate
.PreciousBlock(state
, params
, pindex
);
2673 bool CChainState::InvalidateBlock(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
*pindex
)
2675 AssertLockHeld(cs_main
);
2677 // We first disconnect backwards and then mark the blocks as invalid.
2678 // This prevents a case where pruned nodes may fail to invalidateblock
2679 // and be left unable to start as they have no tip candidates (as there
2680 // are no blocks that meet the "have data and are not invalid per
2681 // nStatus" criteria for inclusion in setBlockIndexCandidates).
2683 bool pindex_was_in_chain
= false;
2684 CBlockIndex
*invalid_walk_tip
= chainActive
.Tip();
2686 DisconnectedBlockTransactions disconnectpool
;
2687 while (chainActive
.Contains(pindex
)) {
2688 pindex_was_in_chain
= true;
2689 // ActivateBestChain considers blocks already in chainActive
2690 // unconditionally valid already, so force disconnect away from it.
2691 if (!DisconnectTip(state
, chainparams
, &disconnectpool
)) {
2692 // It's probably hopeless to try to make the mempool consistent
2693 // here if DisconnectTip failed, but we can try.
2694 UpdateMempoolForReorg(disconnectpool
, false);
2699 // Now mark the blocks we just disconnected as descendants invalid
2700 // (note this may not be all descendants).
2701 while (pindex_was_in_chain
&& invalid_walk_tip
!= pindex
) {
2702 invalid_walk_tip
->nStatus
|= BLOCK_FAILED_CHILD
;
2703 setDirtyBlockIndex
.insert(invalid_walk_tip
);
2704 setBlockIndexCandidates
.erase(invalid_walk_tip
);
2705 invalid_walk_tip
= invalid_walk_tip
->pprev
;
2708 // Mark the block itself as invalid.
2709 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
2710 setDirtyBlockIndex
.insert(pindex
);
2711 setBlockIndexCandidates
.erase(pindex
);
2712 g_failed_blocks
.insert(pindex
);
2714 // DisconnectTip will add transactions to disconnectpool; try to add these
2715 // back to the mempool.
2716 UpdateMempoolForReorg(disconnectpool
, true);
2718 // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
2720 BlockMap::iterator it
= mapBlockIndex
.begin();
2721 while (it
!= mapBlockIndex
.end()) {
2722 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& !setBlockIndexCandidates
.value_comp()(it
->second
, chainActive
.Tip())) {
2723 setBlockIndexCandidates
.insert(it
->second
);
2728 InvalidChainFound(pindex
);
2729 uiInterface
.NotifyBlockTip(IsInitialBlockDownload(), pindex
->pprev
);
2732 bool InvalidateBlock(CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
*pindex
) {
2733 return g_chainstate
.InvalidateBlock(state
, chainparams
, pindex
);
2736 bool CChainState::ResetBlockFailureFlags(CBlockIndex
*pindex
) {
2737 AssertLockHeld(cs_main
);
2739 int nHeight
= pindex
->nHeight
;
2741 // Remove the invalidity flag from this block and all its descendants.
2742 BlockMap::iterator it
= mapBlockIndex
.begin();
2743 while (it
!= mapBlockIndex
.end()) {
2744 if (!it
->second
->IsValid() && it
->second
->GetAncestor(nHeight
) == pindex
) {
2745 it
->second
->nStatus
&= ~BLOCK_FAILED_MASK
;
2746 setDirtyBlockIndex
.insert(it
->second
);
2747 if (it
->second
->IsValid(BLOCK_VALID_TRANSACTIONS
) && it
->second
->nChainTx
&& setBlockIndexCandidates
.value_comp()(chainActive
.Tip(), it
->second
)) {
2748 setBlockIndexCandidates
.insert(it
->second
);
2750 if (it
->second
== pindexBestInvalid
) {
2751 // Reset invalid block marker if it was pointing to one of those.
2752 pindexBestInvalid
= nullptr;
2754 g_failed_blocks
.erase(it
->second
);
2759 // Remove the invalidity flag from all ancestors too.
2760 while (pindex
!= nullptr) {
2761 if (pindex
->nStatus
& BLOCK_FAILED_MASK
) {
2762 pindex
->nStatus
&= ~BLOCK_FAILED_MASK
;
2763 setDirtyBlockIndex
.insert(pindex
);
2765 pindex
= pindex
->pprev
;
2769 bool ResetBlockFailureFlags(CBlockIndex
*pindex
) {
2770 return g_chainstate
.ResetBlockFailureFlags(pindex
);
2773 CBlockIndex
* CChainState::AddToBlockIndex(const CBlockHeader
& block
)
2775 // Check for duplicate
2776 uint256 hash
= block
.GetHash();
2777 BlockMap::iterator it
= mapBlockIndex
.find(hash
);
2778 if (it
!= mapBlockIndex
.end())
2781 // Construct new block index object
2782 CBlockIndex
* pindexNew
= new CBlockIndex(block
);
2783 // We assign the sequence id to blocks only when the full data is available,
2784 // to avoid miners withholding blocks but broadcasting headers, to get a
2785 // competitive advantage.
2786 pindexNew
->nSequenceId
= 0;
2787 BlockMap::iterator mi
= mapBlockIndex
.insert(std::make_pair(hash
, pindexNew
)).first
;
2788 pindexNew
->phashBlock
= &((*mi
).first
);
2789 BlockMap::iterator miPrev
= mapBlockIndex
.find(block
.hashPrevBlock
);
2790 if (miPrev
!= mapBlockIndex
.end())
2792 pindexNew
->pprev
= (*miPrev
).second
;
2793 pindexNew
->nHeight
= pindexNew
->pprev
->nHeight
+ 1;
2794 pindexNew
->BuildSkip();
2796 pindexNew
->nTimeMax
= (pindexNew
->pprev
? std::max(pindexNew
->pprev
->nTimeMax
, pindexNew
->nTime
) : pindexNew
->nTime
);
2797 pindexNew
->nChainWork
= (pindexNew
->pprev
? pindexNew
->pprev
->nChainWork
: 0) + GetBlockProof(*pindexNew
);
2798 pindexNew
->RaiseValidity(BLOCK_VALID_TREE
);
2799 if (pindexBestHeader
== nullptr || pindexBestHeader
->nChainWork
< pindexNew
->nChainWork
)
2800 pindexBestHeader
= pindexNew
;
2802 setDirtyBlockIndex
.insert(pindexNew
);
2807 /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
2808 bool CChainState::ReceivedBlockTransactions(const CBlock
&block
, CValidationState
& state
, CBlockIndex
*pindexNew
, const CDiskBlockPos
& pos
, const Consensus::Params
& consensusParams
)
2810 pindexNew
->nTx
= block
.vtx
.size();
2811 pindexNew
->nChainTx
= 0;
2812 pindexNew
->nFile
= pos
.nFile
;
2813 pindexNew
->nDataPos
= pos
.nPos
;
2814 pindexNew
->nUndoPos
= 0;
2815 pindexNew
->nStatus
|= BLOCK_HAVE_DATA
;
2816 if (IsWitnessEnabled(pindexNew
->pprev
, consensusParams
)) {
2817 pindexNew
->nStatus
|= BLOCK_OPT_WITNESS
;
2819 pindexNew
->RaiseValidity(BLOCK_VALID_TRANSACTIONS
);
2820 setDirtyBlockIndex
.insert(pindexNew
);
2822 if (pindexNew
->pprev
== nullptr || pindexNew
->pprev
->nChainTx
) {
2823 // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
2824 std::deque
<CBlockIndex
*> queue
;
2825 queue
.push_back(pindexNew
);
2827 // Recursively process any descendant blocks that now may be eligible to be connected.
2828 while (!queue
.empty()) {
2829 CBlockIndex
*pindex
= queue
.front();
2831 pindex
->nChainTx
= (pindex
->pprev
? pindex
->pprev
->nChainTx
: 0) + pindex
->nTx
;
2833 LOCK(cs_nBlockSequenceId
);
2834 pindex
->nSequenceId
= nBlockSequenceId
++;
2836 if (chainActive
.Tip() == nullptr || !setBlockIndexCandidates
.value_comp()(pindex
, chainActive
.Tip())) {
2837 setBlockIndexCandidates
.insert(pindex
);
2839 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
);
2840 while (range
.first
!= range
.second
) {
2841 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator it
= range
.first
;
2842 queue
.push_back(it
->second
);
2844 mapBlocksUnlinked
.erase(it
);
2848 if (pindexNew
->pprev
&& pindexNew
->pprev
->IsValid(BLOCK_VALID_TREE
)) {
2849 mapBlocksUnlinked
.insert(std::make_pair(pindexNew
->pprev
, pindexNew
));
2856 static bool FindBlockPos(CDiskBlockPos
&pos
, unsigned int nAddSize
, unsigned int nHeight
, uint64_t nTime
, bool fKnown
= false)
2858 LOCK(cs_LastBlockFile
);
2860 unsigned int nFile
= fKnown
? pos
.nFile
: nLastBlockFile
;
2861 if (vinfoBlockFile
.size() <= nFile
) {
2862 vinfoBlockFile
.resize(nFile
+ 1);
2866 while (vinfoBlockFile
[nFile
].nSize
+ nAddSize
>= MAX_BLOCKFILE_SIZE
) {
2868 if (vinfoBlockFile
.size() <= nFile
) {
2869 vinfoBlockFile
.resize(nFile
+ 1);
2873 pos
.nPos
= vinfoBlockFile
[nFile
].nSize
;
2876 if ((int)nFile
!= nLastBlockFile
) {
2878 LogPrintf("Leaving block file %i: %s\n", nLastBlockFile
, vinfoBlockFile
[nLastBlockFile
].ToString());
2880 FlushBlockFile(!fKnown
);
2881 nLastBlockFile
= nFile
;
2884 vinfoBlockFile
[nFile
].AddBlock(nHeight
, nTime
);
2886 vinfoBlockFile
[nFile
].nSize
= std::max(pos
.nPos
+ nAddSize
, vinfoBlockFile
[nFile
].nSize
);
2888 vinfoBlockFile
[nFile
].nSize
+= nAddSize
;
2891 unsigned int nOldChunks
= (pos
.nPos
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
2892 unsigned int nNewChunks
= (vinfoBlockFile
[nFile
].nSize
+ BLOCKFILE_CHUNK_SIZE
- 1) / BLOCKFILE_CHUNK_SIZE
;
2893 if (nNewChunks
> nOldChunks
) {
2895 fCheckForPruning
= true;
2896 if (CheckDiskSpace(nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
)) {
2897 FILE *file
= OpenBlockFile(pos
);
2899 LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks
* BLOCKFILE_CHUNK_SIZE
, pos
.nFile
);
2900 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* BLOCKFILE_CHUNK_SIZE
- pos
.nPos
);
2905 return error("out of disk space");
2909 setDirtyFileInfo
.insert(nFile
);
2913 static bool FindUndoPos(CValidationState
&state
, int nFile
, CDiskBlockPos
&pos
, unsigned int nAddSize
)
2917 LOCK(cs_LastBlockFile
);
2919 unsigned int nNewSize
;
2920 pos
.nPos
= vinfoBlockFile
[nFile
].nUndoSize
;
2921 nNewSize
= vinfoBlockFile
[nFile
].nUndoSize
+= nAddSize
;
2922 setDirtyFileInfo
.insert(nFile
);
2924 unsigned int nOldChunks
= (pos
.nPos
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
2925 unsigned int nNewChunks
= (nNewSize
+ UNDOFILE_CHUNK_SIZE
- 1) / UNDOFILE_CHUNK_SIZE
;
2926 if (nNewChunks
> nOldChunks
) {
2928 fCheckForPruning
= true;
2929 if (CheckDiskSpace(nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
)) {
2930 FILE *file
= OpenUndoFile(pos
);
2932 LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks
* UNDOFILE_CHUNK_SIZE
, pos
.nFile
);
2933 AllocateFileRange(file
, pos
.nPos
, nNewChunks
* UNDOFILE_CHUNK_SIZE
- pos
.nPos
);
2938 return state
.Error("out of disk space");
2944 static bool CheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, bool fCheckPOW
= true)
2946 // Check proof of work matches claimed amount
2947 if (fCheckPOW
&& !CheckProofOfWork(block
.GetHash(), block
.nBits
, consensusParams
))
2948 return state
.DoS(50, false, REJECT_INVALID
, "high-hash", false, "proof of work failed");
2953 bool CheckBlock(const CBlock
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, bool fCheckPOW
, bool fCheckMerkleRoot
)
2955 // These are checks that are independent of context.
2960 // Check that the header is valid (particularly PoW). This is mostly
2961 // redundant with the call in AcceptBlockHeader.
2962 if (!CheckBlockHeader(block
, state
, consensusParams
, fCheckPOW
))
2965 // Check the merkle root.
2966 if (fCheckMerkleRoot
) {
2968 uint256 hashMerkleRoot2
= BlockMerkleRoot(block
, &mutated
);
2969 if (block
.hashMerkleRoot
!= hashMerkleRoot2
)
2970 return state
.DoS(100, false, REJECT_INVALID
, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
2972 // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
2973 // of transactions in a block without affecting the merkle root of a block,
2974 // while still invalidating it.
2976 return state
.DoS(100, false, REJECT_INVALID
, "bad-txns-duplicate", true, "duplicate transaction");
2979 // All potential-corruption validation must be done before we do any
2980 // transaction validation, as otherwise we may mark the header as invalid
2981 // because we receive the wrong transactions for it.
2982 // Note that witness malleability is checked in ContextualCheckBlock, so no
2983 // checks that use witness data may be performed here.
2986 if (block
.vtx
.empty() || block
.vtx
.size() * WITNESS_SCALE_FACTOR
> MAX_BLOCK_WEIGHT
|| ::GetSerializeSize(block
, SER_NETWORK
, PROTOCOL_VERSION
| SERIALIZE_TRANSACTION_NO_WITNESS
) * WITNESS_SCALE_FACTOR
> MAX_BLOCK_WEIGHT
)
2987 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-length", false, "size limits failed");
2989 // First transaction must be coinbase, the rest must not be
2990 if (block
.vtx
.empty() || !block
.vtx
[0]->IsCoinBase())
2991 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-missing", false, "first tx is not coinbase");
2992 for (unsigned int i
= 1; i
< block
.vtx
.size(); i
++)
2993 if (block
.vtx
[i
]->IsCoinBase())
2994 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-multiple", false, "more than one coinbase");
2996 // Check transactions
2997 for (const auto& tx
: block
.vtx
)
2998 if (!CheckTransaction(*tx
, state
, false))
2999 return state
.Invalid(false, state
.GetRejectCode(), state
.GetRejectReason(),
3000 strprintf("Transaction check failed (tx hash %s) %s", tx
->GetHash().ToString(), state
.GetDebugMessage()));
3002 unsigned int nSigOps
= 0;
3003 for (const auto& tx
: block
.vtx
)
3005 nSigOps
+= GetLegacySigOpCount(*tx
);
3007 if (nSigOps
* WITNESS_SCALE_FACTOR
> MAX_BLOCK_SIGOPS_COST
)
3008 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
3010 if (fCheckPOW
&& fCheckMerkleRoot
)
3011 block
.fChecked
= true;
3016 bool IsWitnessEnabled(const CBlockIndex
* pindexPrev
, const Consensus::Params
& params
)
3019 return (VersionBitsState(pindexPrev
, params
, Consensus::DEPLOYMENT_SEGWIT
, versionbitscache
) == THRESHOLD_ACTIVE
);
3022 // Compute at which vout of the block's coinbase transaction the witness
3023 // commitment occurs, or -1 if not found.
3024 static int GetWitnessCommitmentIndex(const CBlock
& block
)
3027 if (!block
.vtx
.empty()) {
3028 for (size_t o
= 0; o
< block
.vtx
[0]->vout
.size(); o
++) {
3029 if (block
.vtx
[0]->vout
[o
].scriptPubKey
.size() >= 38 && block
.vtx
[0]->vout
[o
].scriptPubKey
[0] == OP_RETURN
&& block
.vtx
[0]->vout
[o
].scriptPubKey
[1] == 0x24 && block
.vtx
[0]->vout
[o
].scriptPubKey
[2] == 0xaa && block
.vtx
[0]->vout
[o
].scriptPubKey
[3] == 0x21 && block
.vtx
[0]->vout
[o
].scriptPubKey
[4] == 0xa9 && block
.vtx
[0]->vout
[o
].scriptPubKey
[5] == 0xed) {
3037 void UpdateUncommittedBlockStructures(CBlock
& block
, const CBlockIndex
* pindexPrev
, const Consensus::Params
& consensusParams
)
3039 int commitpos
= GetWitnessCommitmentIndex(block
);
3040 static const std::vector
<unsigned char> nonce(32, 0x00);
3041 if (commitpos
!= -1 && IsWitnessEnabled(pindexPrev
, consensusParams
) && !block
.vtx
[0]->HasWitness()) {
3042 CMutableTransaction
tx(*block
.vtx
[0]);
3043 tx
.vin
[0].scriptWitness
.stack
.resize(1);
3044 tx
.vin
[0].scriptWitness
.stack
[0] = nonce
;
3045 block
.vtx
[0] = MakeTransactionRef(std::move(tx
));
3049 std::vector
<unsigned char> GenerateCoinbaseCommitment(CBlock
& block
, const CBlockIndex
* pindexPrev
, const Consensus::Params
& consensusParams
)
3051 std::vector
<unsigned char> commitment
;
3052 int commitpos
= GetWitnessCommitmentIndex(block
);
3053 std::vector
<unsigned char> ret(32, 0x00);
3054 if (consensusParams
.vDeployments
[Consensus::DEPLOYMENT_SEGWIT
].nTimeout
!= 0) {
3055 if (commitpos
== -1) {
3056 uint256 witnessroot
= BlockWitnessMerkleRoot(block
, nullptr);
3057 CHash256().Write(witnessroot
.begin(), 32).Write(ret
.data(), 32).Finalize(witnessroot
.begin());
3060 out
.scriptPubKey
.resize(38);
3061 out
.scriptPubKey
[0] = OP_RETURN
;
3062 out
.scriptPubKey
[1] = 0x24;
3063 out
.scriptPubKey
[2] = 0xaa;
3064 out
.scriptPubKey
[3] = 0x21;
3065 out
.scriptPubKey
[4] = 0xa9;
3066 out
.scriptPubKey
[5] = 0xed;
3067 memcpy(&out
.scriptPubKey
[6], witnessroot
.begin(), 32);
3068 commitment
= std::vector
<unsigned char>(out
.scriptPubKey
.begin(), out
.scriptPubKey
.end());
3069 CMutableTransaction
tx(*block
.vtx
[0]);
3070 tx
.vout
.push_back(out
);
3071 block
.vtx
[0] = MakeTransactionRef(std::move(tx
));
3074 UpdateUncommittedBlockStructures(block
, pindexPrev
, consensusParams
);
3078 /** Context-dependent validity checks.
3079 * By "context", we mean only the previous block headers, but not the UTXO
3080 * set; UTXO-related validity checks are done in ConnectBlock().
3081 * NOTE: This function is not currently invoked by ConnectBlock(), so we
3082 * should consider upgrade issues if we change which consensus rules are
3083 * enforced in this function (eg by adding a new consensus rule). See comment
3084 * in ConnectBlock().
3085 * Note that -reindex-chainstate skips the validation that happens here!
3087 static bool ContextualCheckBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const CChainParams
& params
, const CBlockIndex
* pindexPrev
, int64_t nAdjustedTime
)
3089 assert(pindexPrev
!= nullptr);
3090 const int nHeight
= pindexPrev
->nHeight
+ 1;
3092 // Check proof of work
3093 const Consensus::Params
& consensusParams
= params
.GetConsensus();
3094 if (block
.nBits
!= GetNextWorkRequired(pindexPrev
, &block
, consensusParams
))
3095 return state
.DoS(100, false, REJECT_INVALID
, "bad-diffbits", false, "incorrect proof of work");
3097 // Check against checkpoints
3098 if (fCheckpointsEnabled
) {
3099 // Don't accept any forks from the main chain prior to last checkpoint.
3100 // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
3102 CBlockIndex
* pcheckpoint
= Checkpoints::GetLastCheckpoint(params
.Checkpoints());
3103 if (pcheckpoint
&& nHeight
< pcheckpoint
->nHeight
)
3104 return state
.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__
, nHeight
), REJECT_CHECKPOINT
, "bad-fork-prior-to-checkpoint");
3107 // Check timestamp against prev
3108 if (block
.GetBlockTime() <= pindexPrev
->GetMedianTimePast())
3109 return state
.Invalid(false, REJECT_INVALID
, "time-too-old", "block's timestamp is too early");
3112 if (block
.GetBlockTime() > nAdjustedTime
+ MAX_FUTURE_BLOCK_TIME
)
3113 return state
.Invalid(false, REJECT_INVALID
, "time-too-new", "block timestamp too far in the future");
3115 // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3116 // check for version 2, 3 and 4 upgrades
3117 if((block
.nVersion
< 2 && nHeight
>= consensusParams
.BIP34Height
) ||
3118 (block
.nVersion
< 3 && nHeight
>= consensusParams
.BIP66Height
) ||
3119 (block
.nVersion
< 4 && nHeight
>= consensusParams
.BIP65Height
))
3120 return state
.Invalid(false, REJECT_OBSOLETE
, strprintf("bad-version(0x%08x)", block
.nVersion
),
3121 strprintf("rejected nVersion=0x%08x block", block
.nVersion
));
3126 /** NOTE: This function is not currently invoked by ConnectBlock(), so we
3127 * should consider upgrade issues if we change which consensus rules are
3128 * enforced in this function (eg by adding a new consensus rule). See comment
3129 * in ConnectBlock().
3130 * Note that -reindex-chainstate skips the validation that happens here!
3132 static bool ContextualCheckBlock(const CBlock
& block
, CValidationState
& state
, const Consensus::Params
& consensusParams
, const CBlockIndex
* pindexPrev
)
3134 const int nHeight
= pindexPrev
== nullptr ? 0 : pindexPrev
->nHeight
+ 1;
3136 // Start enforcing BIP113 (Median Time Past) using versionbits logic.
3137 int nLockTimeFlags
= 0;
3138 if (VersionBitsState(pindexPrev
, consensusParams
, Consensus::DEPLOYMENT_CSV
, versionbitscache
) == THRESHOLD_ACTIVE
) {
3139 nLockTimeFlags
|= LOCKTIME_MEDIAN_TIME_PAST
;
3142 int64_t nLockTimeCutoff
= (nLockTimeFlags
& LOCKTIME_MEDIAN_TIME_PAST
)
3143 ? pindexPrev
->GetMedianTimePast()
3144 : block
.GetBlockTime();
3146 // Check that all transactions are finalized
3147 for (const auto& tx
: block
.vtx
) {
3148 if (!IsFinalTx(*tx
, nHeight
, nLockTimeCutoff
)) {
3149 return state
.DoS(10, false, REJECT_INVALID
, "bad-txns-nonfinal", false, "non-final transaction");
3153 // Enforce rule that the coinbase starts with serialized block height
3154 if (nHeight
>= consensusParams
.BIP34Height
)
3156 CScript expect
= CScript() << nHeight
;
3157 if (block
.vtx
[0]->vin
[0].scriptSig
.size() < expect
.size() ||
3158 !std::equal(expect
.begin(), expect
.end(), block
.vtx
[0]->vin
[0].scriptSig
.begin())) {
3159 return state
.DoS(100, false, REJECT_INVALID
, "bad-cb-height", false, "block height mismatch in coinbase");
3163 // Validation for witness commitments.
3164 // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3165 // coinbase (where 0x0000....0000 is used instead).
3166 // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness nonce (unconstrained).
3167 // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3168 // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3169 // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness nonce). In case there are
3170 // multiple, the last one is used.
3171 bool fHaveWitness
= false;
3172 if (VersionBitsState(pindexPrev
, consensusParams
, Consensus::DEPLOYMENT_SEGWIT
, versionbitscache
) == THRESHOLD_ACTIVE
) {
3173 int commitpos
= GetWitnessCommitmentIndex(block
);
3174 if (commitpos
!= -1) {
3175 bool malleated
= false;
3176 uint256 hashWitness
= BlockWitnessMerkleRoot(block
, &malleated
);
3177 // The malleation check is ignored; as the transaction tree itself
3178 // already does not permit it, it is impossible to trigger in the
3180 if (block
.vtx
[0]->vin
[0].scriptWitness
.stack
.size() != 1 || block
.vtx
[0]->vin
[0].scriptWitness
.stack
[0].size() != 32) {
3181 return state
.DoS(100, false, REJECT_INVALID
, "bad-witness-nonce-size", true, strprintf("%s : invalid witness nonce size", __func__
));
3183 CHash256().Write(hashWitness
.begin(), 32).Write(&block
.vtx
[0]->vin
[0].scriptWitness
.stack
[0][0], 32).Finalize(hashWitness
.begin());
3184 if (memcmp(hashWitness
.begin(), &block
.vtx
[0]->vout
[commitpos
].scriptPubKey
[6], 32)) {
3185 return state
.DoS(100, false, REJECT_INVALID
, "bad-witness-merkle-match", true, strprintf("%s : witness merkle commitment mismatch", __func__
));
3187 fHaveWitness
= true;
3191 // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3192 if (!fHaveWitness
) {
3193 for (const auto& tx
: block
.vtx
) {
3194 if (tx
->HasWitness()) {
3195 return state
.DoS(100, false, REJECT_INVALID
, "unexpected-witness", true, strprintf("%s : unexpected witness data found", __func__
));
3200 // After the coinbase witness nonce and commitment are verified,
3201 // we can check if the block weight passes (before we've checked the
3202 // coinbase witness, it would be possible for the weight to be too
3203 // large by filling up the coinbase witness, which doesn't change
3204 // the block hash, so we couldn't mark the block as permanently
3206 if (GetBlockWeight(block
) > MAX_BLOCK_WEIGHT
) {
3207 return state
.DoS(100, false, REJECT_INVALID
, "bad-blk-weight", false, strprintf("%s : weight limit failed", __func__
));
3213 bool CChainState::AcceptBlockHeader(const CBlockHeader
& block
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
)
3215 AssertLockHeld(cs_main
);
3216 // Check for duplicate
3217 uint256 hash
= block
.GetHash();
3218 BlockMap::iterator miSelf
= mapBlockIndex
.find(hash
);
3219 CBlockIndex
*pindex
= nullptr;
3220 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
) {
3222 if (miSelf
!= mapBlockIndex
.end()) {
3223 // Block header is already known.
3224 pindex
= miSelf
->second
;
3227 if (pindex
->nStatus
& BLOCK_FAILED_MASK
)
3228 return state
.Invalid(error("%s: block %s is marked invalid", __func__
, hash
.ToString()), 0, "duplicate");
3232 if (!CheckBlockHeader(block
, state
, chainparams
.GetConsensus()))
3233 return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3235 // Get prev block index
3236 CBlockIndex
* pindexPrev
= nullptr;
3237 BlockMap::iterator mi
= mapBlockIndex
.find(block
.hashPrevBlock
);
3238 if (mi
== mapBlockIndex
.end())
3239 return state
.DoS(10, error("%s: prev block not found", __func__
), 0, "prev-blk-not-found");
3240 pindexPrev
= (*mi
).second
;
3241 if (pindexPrev
->nStatus
& BLOCK_FAILED_MASK
)
3242 return state
.DoS(100, error("%s: prev block invalid", __func__
), REJECT_INVALID
, "bad-prevblk");
3243 if (!ContextualCheckBlockHeader(block
, state
, chainparams
, pindexPrev
, GetAdjustedTime()))
3244 return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__
, hash
.ToString(), FormatStateMessage(state
));
3246 if (!pindexPrev
->IsValid(BLOCK_VALID_SCRIPTS
)) {
3247 for (const CBlockIndex
* failedit
: g_failed_blocks
) {
3248 if (pindexPrev
->GetAncestor(failedit
->nHeight
) == failedit
) {
3249 assert(failedit
->nStatus
& BLOCK_FAILED_VALID
);
3250 CBlockIndex
* invalid_walk
= pindexPrev
;
3251 while (invalid_walk
!= failedit
) {
3252 invalid_walk
->nStatus
|= BLOCK_FAILED_CHILD
;
3253 setDirtyBlockIndex
.insert(invalid_walk
);
3254 invalid_walk
= invalid_walk
->pprev
;
3256 return state
.DoS(100, error("%s: prev block invalid", __func__
), REJECT_INVALID
, "bad-prevblk");
3261 if (pindex
== nullptr)
3262 pindex
= AddToBlockIndex(block
);
3267 CheckBlockIndex(chainparams
.GetConsensus());
3272 // Exposed wrapper for AcceptBlockHeader
3273 bool ProcessNewBlockHeaders(const std::vector
<CBlockHeader
>& headers
, CValidationState
& state
, const CChainParams
& chainparams
, const CBlockIndex
** ppindex
, CBlockHeader
*first_invalid
)
3275 if (first_invalid
!= nullptr) first_invalid
->SetNull();
3278 for (const CBlockHeader
& header
: headers
) {
3279 CBlockIndex
*pindex
= nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
3280 if (!g_chainstate
.AcceptBlockHeader(header
, state
, chainparams
, &pindex
)) {
3281 if (first_invalid
) *first_invalid
= header
;
3293 /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
3294 static CDiskBlockPos
SaveBlockToDisk(const CBlock
& block
, int nHeight
, const CChainParams
& chainparams
, const CDiskBlockPos
* dbp
) {
3295 unsigned int nBlockSize
= ::GetSerializeSize(block
, SER_DISK
, CLIENT_VERSION
);
3296 CDiskBlockPos blockPos
;
3299 if (!FindBlockPos(blockPos
, nBlockSize
+8, nHeight
, block
.GetBlockTime(), dbp
!= nullptr)) {
3300 error("%s: FindBlockPos failed", __func__
);
3301 return CDiskBlockPos();
3303 if (dbp
== nullptr) {
3304 if (!WriteBlockToDisk(block
, blockPos
, chainparams
.MessageStart())) {
3305 AbortNode("Failed to write block");
3306 return CDiskBlockPos();
3312 /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
3313 bool CChainState::AcceptBlock(const std::shared_ptr
<const CBlock
>& pblock
, CValidationState
& state
, const CChainParams
& chainparams
, CBlockIndex
** ppindex
, bool fRequested
, const CDiskBlockPos
* dbp
, bool* fNewBlock
)
3315 const CBlock
& block
= *pblock
;
3317 if (fNewBlock
) *fNewBlock
= false;
3318 AssertLockHeld(cs_main
);
3320 CBlockIndex
*pindexDummy
= nullptr;
3321 CBlockIndex
*&pindex
= ppindex
? *ppindex
: pindexDummy
;
3323 if (!AcceptBlockHeader(block
, state
, chainparams
, &pindex
))
3326 // Try to process all requested blocks that we don't have, but only
3327 // process an unrequested block if it's new and has enough work to
3328 // advance our tip, and isn't too many blocks ahead.
3329 bool fAlreadyHave
= pindex
->nStatus
& BLOCK_HAVE_DATA
;
3330 bool fHasMoreOrSameWork
= (chainActive
.Tip() ? pindex
->nChainWork
>= chainActive
.Tip()->nChainWork
: true);
3331 // Blocks that are too out-of-order needlessly limit the effectiveness of
3332 // pruning, because pruning will not delete block files that contain any
3333 // blocks which are too close in height to the tip. Apply this test
3334 // regardless of whether pruning is enabled; it should generally be safe to
3335 // not process unrequested blocks.
3336 bool fTooFarAhead
= (pindex
->nHeight
> int(chainActive
.Height() + MIN_BLOCKS_TO_KEEP
));
3338 // TODO: Decouple this function from the block download logic by removing fRequested
3339 // This requires some new chain data structure to efficiently look up if a
3340 // block is in a chain leading to a candidate for best tip, despite not
3341 // being such a candidate itself.
3343 // TODO: deal better with return value and error conditions for duplicate
3344 // and unrequested blocks.
3345 if (fAlreadyHave
) return true;
3346 if (!fRequested
) { // If we didn't ask for it:
3347 if (pindex
->nTx
!= 0) return true; // This is a previously-processed block that was pruned
3348 if (!fHasMoreOrSameWork
) return true; // Don't process less-work chains
3349 if (fTooFarAhead
) return true; // Block height is too high
3351 // Protect against DoS attacks from low-work chains.
3352 // If our tip is behind, a peer could try to send us
3353 // low-work blocks on a fake chain that we would never
3354 // request; don't process these.
3355 if (pindex
->nChainWork
< nMinimumChainWork
) return true;
3357 if (fNewBlock
) *fNewBlock
= true;
3359 if (!CheckBlock(block
, state
, chainparams
.GetConsensus()) ||
3360 !ContextualCheckBlock(block
, state
, chainparams
.GetConsensus(), pindex
->pprev
)) {
3361 if (state
.IsInvalid() && !state
.CorruptionPossible()) {
3362 pindex
->nStatus
|= BLOCK_FAILED_VALID
;
3363 setDirtyBlockIndex
.insert(pindex
);
3365 return error("%s: %s", __func__
, FormatStateMessage(state
));
3368 // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
3369 // (but if it does not build on our best tip, let the SendMessages loop relay it)
3370 if (!IsInitialBlockDownload() && chainActive
.Tip() == pindex
->pprev
)
3371 GetMainSignals().NewPoWValidBlock(pindex
, pblock
);
3373 // Write block to history file
3375 CDiskBlockPos blockPos
= SaveBlockToDisk(block
, pindex
->nHeight
, chainparams
, dbp
);
3376 if (blockPos
.IsNull()) {
3377 state
.Error(strprintf("%s: Failed to find position to write new block to disk", __func__
));
3380 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
, chainparams
.GetConsensus()))
3381 return error("AcceptBlock(): ReceivedBlockTransactions failed");
3382 } catch (const std::runtime_error
& e
) {
3383 return AbortNode(state
, std::string("System error: ") + e
.what());
3386 if (fCheckForPruning
)
3387 FlushStateToDisk(chainparams
, state
, FLUSH_STATE_NONE
); // we just allocated more disk space for block files
3389 CheckBlockIndex(chainparams
.GetConsensus());
3394 bool ProcessNewBlock(const CChainParams
& chainparams
, const std::shared_ptr
<const CBlock
> pblock
, bool fForceProcessing
, bool *fNewBlock
)
3396 AssertLockNotHeld(cs_main
);
3399 CBlockIndex
*pindex
= nullptr;
3400 if (fNewBlock
) *fNewBlock
= false;
3401 CValidationState state
;
3402 // Ensure that CheckBlock() passes before calling AcceptBlock, as
3403 // belt-and-suspenders.
3404 bool ret
= CheckBlock(*pblock
, state
, chainparams
.GetConsensus());
3410 ret
= g_chainstate
.AcceptBlock(pblock
, state
, chainparams
, &pindex
, fForceProcessing
, nullptr, fNewBlock
);
3413 GetMainSignals().BlockChecked(*pblock
, state
);
3414 return error("%s: AcceptBlock FAILED (%s)", __func__
, state
.GetDebugMessage());
3420 CValidationState state
; // Only used to report errors, not invalidity - ignore it
3421 if (!g_chainstate
.ActivateBestChain(state
, chainparams
, pblock
))
3422 return error("%s: ActivateBestChain failed", __func__
);
3427 bool TestBlockValidity(CValidationState
& state
, const CChainParams
& chainparams
, const CBlock
& block
, CBlockIndex
* pindexPrev
, bool fCheckPOW
, bool fCheckMerkleRoot
)
3429 AssertLockHeld(cs_main
);
3430 assert(pindexPrev
&& pindexPrev
== chainActive
.Tip());
3431 CCoinsViewCache
viewNew(pcoinsTip
.get());
3432 CBlockIndex
indexDummy(block
);
3433 indexDummy
.pprev
= pindexPrev
;
3434 indexDummy
.nHeight
= pindexPrev
->nHeight
+ 1;
3436 // NOTE: CheckBlockHeader is called by CheckBlock
3437 if (!ContextualCheckBlockHeader(block
, state
, chainparams
, pindexPrev
, GetAdjustedTime()))
3438 return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__
, FormatStateMessage(state
));
3439 if (!CheckBlock(block
, state
, chainparams
.GetConsensus(), fCheckPOW
, fCheckMerkleRoot
))
3440 return error("%s: Consensus::CheckBlock: %s", __func__
, FormatStateMessage(state
));
3441 if (!ContextualCheckBlock(block
, state
, chainparams
.GetConsensus(), pindexPrev
))
3442 return error("%s: Consensus::ContextualCheckBlock: %s", __func__
, FormatStateMessage(state
));
3443 if (!g_chainstate
.ConnectBlock(block
, state
, &indexDummy
, viewNew
, chainparams
, true))
3445 assert(state
.IsValid());
3451 * BLOCK PRUNING CODE
3454 /* Calculate the amount of disk space the block & undo files currently use */
3455 uint64_t CalculateCurrentUsage()
3457 LOCK(cs_LastBlockFile
);
3459 uint64_t retval
= 0;
3460 for (const CBlockFileInfo
&file
: vinfoBlockFile
) {
3461 retval
+= file
.nSize
+ file
.nUndoSize
;
3466 /* Prune a block file (modify associated database entries)*/
3467 void PruneOneBlockFile(const int fileNumber
)
3469 LOCK(cs_LastBlockFile
);
3471 for (const auto& entry
: mapBlockIndex
) {
3472 CBlockIndex
* pindex
= entry
.second
;
3473 if (pindex
->nFile
== fileNumber
) {
3474 pindex
->nStatus
&= ~BLOCK_HAVE_DATA
;
3475 pindex
->nStatus
&= ~BLOCK_HAVE_UNDO
;
3477 pindex
->nDataPos
= 0;
3478 pindex
->nUndoPos
= 0;
3479 setDirtyBlockIndex
.insert(pindex
);
3481 // Prune from mapBlocksUnlinked -- any block we prune would have
3482 // to be downloaded again in order to consider its chain, at which
3483 // point it would be considered as a candidate for
3484 // mapBlocksUnlinked or setBlockIndexCandidates.
3485 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> range
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
3486 while (range
.first
!= range
.second
) {
3487 std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator _it
= range
.first
;
3489 if (_it
->second
== pindex
) {
3490 mapBlocksUnlinked
.erase(_it
);
3496 vinfoBlockFile
[fileNumber
].SetNull();
3497 setDirtyFileInfo
.insert(fileNumber
);
3501 void UnlinkPrunedFiles(const std::set
<int>& setFilesToPrune
)
3503 for (std::set
<int>::iterator it
= setFilesToPrune
.begin(); it
!= setFilesToPrune
.end(); ++it
) {
3504 CDiskBlockPos
pos(*it
, 0);
3505 fs::remove(GetBlockPosFilename(pos
, "blk"));
3506 fs::remove(GetBlockPosFilename(pos
, "rev"));
3507 LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__
, *it
);
3511 /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
3512 static void FindFilesToPruneManual(std::set
<int>& setFilesToPrune
, int nManualPruneHeight
)
3514 assert(fPruneMode
&& nManualPruneHeight
> 0);
3516 LOCK2(cs_main
, cs_LastBlockFile
);
3517 if (chainActive
.Tip() == nullptr)
3520 // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
3521 unsigned int nLastBlockWeCanPrune
= std::min((unsigned)nManualPruneHeight
, chainActive
.Tip()->nHeight
- MIN_BLOCKS_TO_KEEP
);
3523 for (int fileNumber
= 0; fileNumber
< nLastBlockFile
; fileNumber
++) {
3524 if (vinfoBlockFile
[fileNumber
].nSize
== 0 || vinfoBlockFile
[fileNumber
].nHeightLast
> nLastBlockWeCanPrune
)
3526 PruneOneBlockFile(fileNumber
);
3527 setFilesToPrune
.insert(fileNumber
);
3530 LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune
, count
);
3533 /* This function is called from the RPC code for pruneblockchain */
3534 void PruneBlockFilesManual(int nManualPruneHeight
)
3536 CValidationState state
;
3537 const CChainParams
& chainparams
= Params();
3538 FlushStateToDisk(chainparams
, state
, FLUSH_STATE_NONE
, nManualPruneHeight
);
3542 * Prune block and undo files (blk???.dat and undo???.dat) so that the disk space used is less than a user-defined target.
3543 * The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new
3544 * space is allocated in a block or undo file, staying below the target. Changing back to unpruned requires a reindex
3545 * (which in this case means the blockchain must be re-downloaded.)
3547 * Pruning functions are called from FlushStateToDisk when the global fCheckForPruning flag has been set.
3548 * Block and undo files are deleted in lock-step (when blk00003.dat is deleted, so is rev00003.dat.)
3549 * Pruning cannot take place until the longest chain is at least a certain length (100000 on mainnet, 1000 on testnet, 1000 on regtest).
3550 * Pruning will never delete a block within a defined distance (currently 288) from the active chain's tip.
3551 * The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks that were stored in the deleted files.
3552 * A db flag records the fact that at least some block files have been pruned.
3554 * @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
3556 static void FindFilesToPrune(std::set
<int>& setFilesToPrune
, uint64_t nPruneAfterHeight
)
3558 LOCK2(cs_main
, cs_LastBlockFile
);
3559 if (chainActive
.Tip() == nullptr || nPruneTarget
== 0) {
3562 if ((uint64_t)chainActive
.Tip()->nHeight
<= nPruneAfterHeight
) {
3566 unsigned int nLastBlockWeCanPrune
= chainActive
.Tip()->nHeight
- MIN_BLOCKS_TO_KEEP
;
3567 uint64_t nCurrentUsage
= CalculateCurrentUsage();
3568 // We don't check to prune until after we've allocated new space for files
3569 // So we should leave a buffer under our target to account for another allocation
3570 // before the next pruning.
3571 uint64_t nBuffer
= BLOCKFILE_CHUNK_SIZE
+ UNDOFILE_CHUNK_SIZE
;
3572 uint64_t nBytesToPrune
;
3575 if (nCurrentUsage
+ nBuffer
>= nPruneTarget
) {
3576 for (int fileNumber
= 0; fileNumber
< nLastBlockFile
; fileNumber
++) {
3577 nBytesToPrune
= vinfoBlockFile
[fileNumber
].nSize
+ vinfoBlockFile
[fileNumber
].nUndoSize
;
3579 if (vinfoBlockFile
[fileNumber
].nSize
== 0)
3582 if (nCurrentUsage
+ nBuffer
< nPruneTarget
) // are we below our target?
3585 // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3586 if (vinfoBlockFile
[fileNumber
].nHeightLast
> nLastBlockWeCanPrune
)
3589 PruneOneBlockFile(fileNumber
);
3590 // Queue up the files for removal
3591 setFilesToPrune
.insert(fileNumber
);
3592 nCurrentUsage
-= nBytesToPrune
;
3597 LogPrint(BCLog::PRUNE
, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3598 nPruneTarget
/1024/1024, nCurrentUsage
/1024/1024,
3599 ((int64_t)nPruneTarget
- (int64_t)nCurrentUsage
)/1024/1024,
3600 nLastBlockWeCanPrune
, count
);
3603 bool CheckDiskSpace(uint64_t nAdditionalBytes
)
3605 uint64_t nFreeBytesAvailable
= fs::space(GetDataDir()).available
;
3607 // Check for nMinDiskSpace bytes (currently 50MB)
3608 if (nFreeBytesAvailable
< nMinDiskSpace
+ nAdditionalBytes
)
3609 return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
3614 static FILE* OpenDiskFile(const CDiskBlockPos
&pos
, const char *prefix
, bool fReadOnly
)
3618 fs::path path
= GetBlockPosFilename(pos
, prefix
);
3619 fs::create_directories(path
.parent_path());
3620 FILE* file
= fsbridge::fopen(path
, fReadOnly
? "rb": "rb+");
3621 if (!file
&& !fReadOnly
)
3622 file
= fsbridge::fopen(path
, "wb+");
3624 LogPrintf("Unable to open file %s\n", path
.string());
3628 if (fseek(file
, pos
.nPos
, SEEK_SET
)) {
3629 LogPrintf("Unable to seek to position %u of %s\n", pos
.nPos
, path
.string());
3637 FILE* OpenBlockFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3638 return OpenDiskFile(pos
, "blk", fReadOnly
);
3641 /** Open an undo file (rev?????.dat) */
3642 static FILE* OpenUndoFile(const CDiskBlockPos
&pos
, bool fReadOnly
) {
3643 return OpenDiskFile(pos
, "rev", fReadOnly
);
3646 fs::path
GetBlockPosFilename(const CDiskBlockPos
&pos
, const char *prefix
)
3648 return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix
, pos
.nFile
);
3651 CBlockIndex
* CChainState::InsertBlockIndex(const uint256
& hash
)
3657 BlockMap::iterator mi
= mapBlockIndex
.find(hash
);
3658 if (mi
!= mapBlockIndex
.end())
3659 return (*mi
).second
;
3662 CBlockIndex
* pindexNew
= new CBlockIndex();
3663 mi
= mapBlockIndex
.insert(std::make_pair(hash
, pindexNew
)).first
;
3664 pindexNew
->phashBlock
= &((*mi
).first
);
3669 bool CChainState::LoadBlockIndex(const Consensus::Params
& consensus_params
, CBlockTreeDB
& blocktree
)
3671 if (!blocktree
.LoadBlockIndexGuts(consensus_params
, [this](const uint256
& hash
){ return this->InsertBlockIndex(hash
); }))
3674 boost::this_thread::interruption_point();
3676 // Calculate nChainWork
3677 std::vector
<std::pair
<int, CBlockIndex
*> > vSortedByHeight
;
3678 vSortedByHeight
.reserve(mapBlockIndex
.size());
3679 for (const std::pair
<uint256
, CBlockIndex
*>& item
: mapBlockIndex
)
3681 CBlockIndex
* pindex
= item
.second
;
3682 vSortedByHeight
.push_back(std::make_pair(pindex
->nHeight
, pindex
));
3684 sort(vSortedByHeight
.begin(), vSortedByHeight
.end());
3685 for (const std::pair
<int, CBlockIndex
*>& item
: vSortedByHeight
)
3687 CBlockIndex
* pindex
= item
.second
;
3688 pindex
->nChainWork
= (pindex
->pprev
? pindex
->pprev
->nChainWork
: 0) + GetBlockProof(*pindex
);
3689 pindex
->nTimeMax
= (pindex
->pprev
? std::max(pindex
->pprev
->nTimeMax
, pindex
->nTime
) : pindex
->nTime
);
3690 // We can link the chain of blocks for which we've received transactions at some point.
3691 // Pruned nodes may have deleted the block.
3692 if (pindex
->nTx
> 0) {
3693 if (pindex
->pprev
) {
3694 if (pindex
->pprev
->nChainTx
) {
3695 pindex
->nChainTx
= pindex
->pprev
->nChainTx
+ pindex
->nTx
;
3697 pindex
->nChainTx
= 0;
3698 mapBlocksUnlinked
.insert(std::make_pair(pindex
->pprev
, pindex
));
3701 pindex
->nChainTx
= pindex
->nTx
;
3704 if (!(pindex
->nStatus
& BLOCK_FAILED_MASK
) && pindex
->pprev
&& (pindex
->pprev
->nStatus
& BLOCK_FAILED_MASK
)) {
3705 pindex
->nStatus
|= BLOCK_FAILED_CHILD
;
3706 setDirtyBlockIndex
.insert(pindex
);
3708 if (pindex
->IsValid(BLOCK_VALID_TRANSACTIONS
) && (pindex
->nChainTx
|| pindex
->pprev
== nullptr))
3709 setBlockIndexCandidates
.insert(pindex
);
3710 if (pindex
->nStatus
& BLOCK_FAILED_MASK
&& (!pindexBestInvalid
|| pindex
->nChainWork
> pindexBestInvalid
->nChainWork
))
3711 pindexBestInvalid
= pindex
;
3713 pindex
->BuildSkip();
3714 if (pindex
->IsValid(BLOCK_VALID_TREE
) && (pindexBestHeader
== nullptr || CBlockIndexWorkComparator()(pindexBestHeader
, pindex
)))
3715 pindexBestHeader
= pindex
;
3721 bool static LoadBlockIndexDB(const CChainParams
& chainparams
)
3723 if (!g_chainstate
.LoadBlockIndex(chainparams
.GetConsensus(), *pblocktree
))
3726 // Load block file info
3727 pblocktree
->ReadLastBlockFile(nLastBlockFile
);
3728 vinfoBlockFile
.resize(nLastBlockFile
+ 1);
3729 LogPrintf("%s: last block file = %i\n", __func__
, nLastBlockFile
);
3730 for (int nFile
= 0; nFile
<= nLastBlockFile
; nFile
++) {
3731 pblocktree
->ReadBlockFileInfo(nFile
, vinfoBlockFile
[nFile
]);
3733 LogPrintf("%s: last block file info: %s\n", __func__
, vinfoBlockFile
[nLastBlockFile
].ToString());
3734 for (int nFile
= nLastBlockFile
+ 1; true; nFile
++) {
3735 CBlockFileInfo info
;
3736 if (pblocktree
->ReadBlockFileInfo(nFile
, info
)) {
3737 vinfoBlockFile
.push_back(info
);
3743 // Check presence of blk files
3744 LogPrintf("Checking all blk files are present...\n");
3745 std::set
<int> setBlkDataFiles
;
3746 for (const std::pair
<uint256
, CBlockIndex
*>& item
: mapBlockIndex
)
3748 CBlockIndex
* pindex
= item
.second
;
3749 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) {
3750 setBlkDataFiles
.insert(pindex
->nFile
);
3753 for (std::set
<int>::iterator it
= setBlkDataFiles
.begin(); it
!= setBlkDataFiles
.end(); it
++)
3755 CDiskBlockPos
pos(*it
, 0);
3756 if (CAutoFile(OpenBlockFile(pos
, true), SER_DISK
, CLIENT_VERSION
).IsNull()) {
3761 // Check whether we have ever pruned block & undo files
3762 pblocktree
->ReadFlag("prunedblockfiles", fHavePruned
);
3764 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
3766 // Check whether we need to continue reindexing
3767 bool fReindexing
= false;
3768 pblocktree
->ReadReindexing(fReindexing
);
3769 if(fReindexing
) fReindex
= true;
3771 // Check whether we have a transaction index
3772 pblocktree
->ReadFlag("txindex", fTxIndex
);
3773 LogPrintf("%s: transaction index %s\n", __func__
, fTxIndex
? "enabled" : "disabled");
3778 bool LoadChainTip(const CChainParams
& chainparams
)
3780 if (chainActive
.Tip() && chainActive
.Tip()->GetBlockHash() == pcoinsTip
->GetBestBlock()) return true;
3782 if (pcoinsTip
->GetBestBlock().IsNull() && mapBlockIndex
.size() == 1) {
3783 // In case we just added the genesis block, connect it now, so
3784 // that we always have a chainActive.Tip() when we return.
3785 LogPrintf("%s: Connecting genesis block...\n", __func__
);
3786 CValidationState state
;
3787 if (!ActivateBestChain(state
, chainparams
)) {
3792 // Load pointer to end of best chain
3793 BlockMap::iterator it
= mapBlockIndex
.find(pcoinsTip
->GetBestBlock());
3794 if (it
== mapBlockIndex
.end())
3796 chainActive
.SetTip(it
->second
);
3798 g_chainstate
.PruneBlockIndexCandidates();
3800 LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
3801 chainActive
.Tip()->GetBlockHash().ToString(), chainActive
.Height(),
3802 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive
.Tip()->GetBlockTime()),
3803 GuessVerificationProgress(chainparams
.TxData(), chainActive
.Tip()));
3807 CVerifyDB::CVerifyDB()
3809 uiInterface
.ShowProgress(_("Verifying blocks..."), 0, false);
3812 CVerifyDB::~CVerifyDB()
3814 uiInterface
.ShowProgress("", 100, false);
3817 bool CVerifyDB::VerifyDB(const CChainParams
& chainparams
, CCoinsView
*coinsview
, int nCheckLevel
, int nCheckDepth
)
3820 if (chainActive
.Tip() == nullptr || chainActive
.Tip()->pprev
== nullptr)
3823 // Verify blocks in the best chain
3824 if (nCheckDepth
<= 0 || nCheckDepth
> chainActive
.Height())
3825 nCheckDepth
= chainActive
.Height();
3826 nCheckLevel
= std::max(0, std::min(4, nCheckLevel
));
3827 LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth
, nCheckLevel
);
3828 CCoinsViewCache
coins(coinsview
);
3829 CBlockIndex
* pindexState
= chainActive
.Tip();
3830 CBlockIndex
* pindexFailure
= nullptr;
3831 int nGoodTransactions
= 0;
3832 CValidationState state
;
3834 LogPrintf("[0%%]...");
3835 for (CBlockIndex
* pindex
= chainActive
.Tip(); pindex
&& pindex
->pprev
; pindex
= pindex
->pprev
)
3837 boost::this_thread::interruption_point();
3838 int percentageDone
= std::max(1, std::min(99, (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* (nCheckLevel
>= 4 ? 50 : 100))));
3839 if (reportDone
< percentageDone
/10) {
3840 // report every 10% step
3841 LogPrintf("[%d%%]...", percentageDone
);
3842 reportDone
= percentageDone
/10;
3844 uiInterface
.ShowProgress(_("Verifying blocks..."), percentageDone
, false);
3845 if (pindex
->nHeight
< chainActive
.Height()-nCheckDepth
)
3847 if (fPruneMode
&& !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) {
3848 // If pruning, only go back as far as we have data.
3849 LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex
->nHeight
);
3853 // check level 0: read from disk
3854 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
3855 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3856 // check level 1: verify block validity
3857 if (nCheckLevel
>= 1 && !CheckBlock(block
, state
, chainparams
.GetConsensus()))
3858 return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__
,
3859 pindex
->nHeight
, pindex
->GetBlockHash().ToString(), FormatStateMessage(state
));
3860 // check level 2: verify undo validity
3861 if (nCheckLevel
>= 2 && pindex
) {
3863 if (!pindex
->GetUndoPos().IsNull()) {
3864 if (!UndoReadFromDisk(undo
, pindex
)) {
3865 return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3869 // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
3870 if (nCheckLevel
>= 3 && pindex
== pindexState
&& (coins
.DynamicMemoryUsage() + pcoinsTip
->DynamicMemoryUsage()) <= nCoinCacheUsage
) {
3871 assert(coins
.GetBestBlock() == pindex
->GetBlockHash());
3872 DisconnectResult res
= g_chainstate
.DisconnectBlock(block
, pindex
, coins
);
3873 if (res
== DISCONNECT_FAILED
) {
3874 return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3876 pindexState
= pindex
->pprev
;
3877 if (res
== DISCONNECT_UNCLEAN
) {
3878 nGoodTransactions
= 0;
3879 pindexFailure
= pindex
;
3881 nGoodTransactions
+= block
.vtx
.size();
3884 if (ShutdownRequested())
3888 return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive
.Height() - pindexFailure
->nHeight
+ 1, nGoodTransactions
);
3890 // check level 4: try reconnecting blocks
3891 if (nCheckLevel
>= 4) {
3892 CBlockIndex
*pindex
= pindexState
;
3893 while (pindex
!= chainActive
.Tip()) {
3894 boost::this_thread::interruption_point();
3895 uiInterface
.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive
.Height() - pindex
->nHeight
)) / (double)nCheckDepth
* 50))), false);
3896 pindex
= chainActive
.Next(pindex
);
3898 if (!ReadBlockFromDisk(block
, pindex
, chainparams
.GetConsensus()))
3899 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3900 if (!g_chainstate
.ConnectBlock(block
, state
, pindex
, coins
, chainparams
))
3901 return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3905 LogPrintf("[DONE].\n");
3906 LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive
.Height() - pindexState
->nHeight
, nGoodTransactions
);
3911 /** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
3912 bool CChainState::RollforwardBlock(const CBlockIndex
* pindex
, CCoinsViewCache
& inputs
, const CChainParams
& params
)
3914 // TODO: merge with ConnectBlock
3916 if (!ReadBlockFromDisk(block
, pindex
, params
.GetConsensus())) {
3917 return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex
->nHeight
, pindex
->GetBlockHash().ToString());
3920 for (const CTransactionRef
& tx
: block
.vtx
) {
3921 if (!tx
->IsCoinBase()) {
3922 for (const CTxIn
&txin
: tx
->vin
) {
3923 inputs
.SpendCoin(txin
.prevout
);
3926 // Pass check = true as every addition may be an overwrite.
3927 AddCoins(inputs
, *tx
, pindex
->nHeight
, true);
3932 bool CChainState::ReplayBlocks(const CChainParams
& params
, CCoinsView
* view
)
3936 CCoinsViewCache
cache(view
);
3938 std::vector
<uint256
> hashHeads
= view
->GetHeadBlocks();
3939 if (hashHeads
.empty()) return true; // We're already in a consistent state.
3940 if (hashHeads
.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
3942 uiInterface
.ShowProgress(_("Replaying blocks..."), 0, false);
3943 LogPrintf("Replaying blocks\n");
3945 const CBlockIndex
* pindexOld
= nullptr; // Old tip during the interrupted flush.
3946 const CBlockIndex
* pindexNew
; // New tip during the interrupted flush.
3947 const CBlockIndex
* pindexFork
= nullptr; // Latest block common to both the old and the new tip.
3949 if (mapBlockIndex
.count(hashHeads
[0]) == 0) {
3950 return error("ReplayBlocks(): reorganization to unknown block requested");
3952 pindexNew
= mapBlockIndex
[hashHeads
[0]];
3954 if (!hashHeads
[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
3955 if (mapBlockIndex
.count(hashHeads
[1]) == 0) {
3956 return error("ReplayBlocks(): reorganization from unknown block requested");
3958 pindexOld
= mapBlockIndex
[hashHeads
[1]];
3959 pindexFork
= LastCommonAncestor(pindexOld
, pindexNew
);
3960 assert(pindexFork
!= nullptr);
3963 // Rollback along the old branch.
3964 while (pindexOld
!= pindexFork
) {
3965 if (pindexOld
->nHeight
> 0) { // Never disconnect the genesis block.
3967 if (!ReadBlockFromDisk(block
, pindexOld
, params
.GetConsensus())) {
3968 return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld
->nHeight
, pindexOld
->GetBlockHash().ToString());
3970 LogPrintf("Rolling back %s (%i)\n", pindexOld
->GetBlockHash().ToString(), pindexOld
->nHeight
);
3971 DisconnectResult res
= DisconnectBlock(block
, pindexOld
, cache
);
3972 if (res
== DISCONNECT_FAILED
) {
3973 return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld
->nHeight
, pindexOld
->GetBlockHash().ToString());
3975 // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
3976 // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
3977 // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
3978 // the result is still a version of the UTXO set with the effects of that block undone.
3980 pindexOld
= pindexOld
->pprev
;
3983 // Roll forward from the forking point to the new tip.
3984 int nForkHeight
= pindexFork
? pindexFork
->nHeight
: 0;
3985 for (int nHeight
= nForkHeight
+ 1; nHeight
<= pindexNew
->nHeight
; ++nHeight
) {
3986 const CBlockIndex
* pindex
= pindexNew
->GetAncestor(nHeight
);
3987 LogPrintf("Rolling forward %s (%i)\n", pindex
->GetBlockHash().ToString(), nHeight
);
3988 if (!RollforwardBlock(pindex
, cache
, params
)) return false;
3991 cache
.SetBestBlock(pindexNew
->GetBlockHash());
3993 uiInterface
.ShowProgress("", 100, false);
3997 bool ReplayBlocks(const CChainParams
& params
, CCoinsView
* view
) {
3998 return g_chainstate
.ReplayBlocks(params
, view
);
4001 bool CChainState::RewindBlockIndex(const CChainParams
& params
)
4005 // Note that during -reindex-chainstate we are called with an empty chainActive!
4008 while (nHeight
<= chainActive
.Height()) {
4009 if (IsWitnessEnabled(chainActive
[nHeight
- 1], params
.GetConsensus()) && !(chainActive
[nHeight
]->nStatus
& BLOCK_OPT_WITNESS
)) {
4015 // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
4016 CValidationState state
;
4017 CBlockIndex
* pindex
= chainActive
.Tip();
4018 while (chainActive
.Height() >= nHeight
) {
4019 if (fPruneMode
&& !(chainActive
.Tip()->nStatus
& BLOCK_HAVE_DATA
)) {
4020 // If pruning, don't try rewinding past the HAVE_DATA point;
4021 // since older blocks can't be served anyway, there's
4022 // no need to walk further, and trying to DisconnectTip()
4023 // will fail (and require a needless reindex/redownload
4024 // of the blockchain).
4027 if (!DisconnectTip(state
, params
, nullptr)) {
4028 return error("RewindBlockIndex: unable to disconnect block at height %i", pindex
->nHeight
);
4030 // Occasionally flush state to disk.
4031 if (!FlushStateToDisk(params
, state
, FLUSH_STATE_PERIODIC
))
4035 // Reduce validity flag and have-data flags.
4036 // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
4037 // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
4038 for (const auto& entry
: mapBlockIndex
) {
4039 CBlockIndex
* pindexIter
= entry
.second
;
4041 // Note: If we encounter an insufficiently validated block that
4042 // is on chainActive, it must be because we are a pruning node, and
4043 // this block or some successor doesn't HAVE_DATA, so we were unable to
4044 // rewind all the way. Blocks remaining on chainActive at this point
4045 // must not have their validity reduced.
4046 if (IsWitnessEnabled(pindexIter
->pprev
, params
.GetConsensus()) && !(pindexIter
->nStatus
& BLOCK_OPT_WITNESS
) && !chainActive
.Contains(pindexIter
)) {
4048 pindexIter
->nStatus
= std::min
<unsigned int>(pindexIter
->nStatus
& BLOCK_VALID_MASK
, BLOCK_VALID_TREE
) | (pindexIter
->nStatus
& ~BLOCK_VALID_MASK
);
4049 // Remove have-data flags.
4050 pindexIter
->nStatus
&= ~(BLOCK_HAVE_DATA
| BLOCK_HAVE_UNDO
);
4051 // Remove storage location.
4052 pindexIter
->nFile
= 0;
4053 pindexIter
->nDataPos
= 0;
4054 pindexIter
->nUndoPos
= 0;
4055 // Remove various other things
4056 pindexIter
->nTx
= 0;
4057 pindexIter
->nChainTx
= 0;
4058 pindexIter
->nSequenceId
= 0;
4059 // Make sure it gets written.
4060 setDirtyBlockIndex
.insert(pindexIter
);
4062 setBlockIndexCandidates
.erase(pindexIter
);
4063 std::pair
<std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
, std::multimap
<CBlockIndex
*, CBlockIndex
*>::iterator
> ret
= mapBlocksUnlinked
.equal_range(pindexIter
->pprev
);
4064 while (ret
.first
!= ret
.second
) {
4065 if (ret
.first
->second
== pindexIter
) {
4066 mapBlocksUnlinked
.erase(ret
.first
++);
4071 } else if (pindexIter
->IsValid(BLOCK_VALID_TRANSACTIONS
) && pindexIter
->nChainTx
) {
4072 setBlockIndexCandidates
.insert(pindexIter
);
4076 if (chainActive
.Tip() != nullptr) {
4077 // We can't prune block index candidates based on our tip if we have
4078 // no tip due to chainActive being empty!
4079 PruneBlockIndexCandidates();
4081 CheckBlockIndex(params
.GetConsensus());
4087 bool RewindBlockIndex(const CChainParams
& params
) {
4088 if (!g_chainstate
.RewindBlockIndex(params
)) {
4092 if (chainActive
.Tip() != nullptr) {
4093 // FlushStateToDisk can possibly read chainActive. Be conservative
4094 // and skip it here, we're about to -reindex-chainstate anyway, so
4095 // it'll get called a bunch real soon.
4096 CValidationState state
;
4097 if (!FlushStateToDisk(params
, state
, FLUSH_STATE_ALWAYS
)) {
4105 void CChainState::UnloadBlockIndex() {
4106 nBlockSequenceId
= 1;
4107 g_failed_blocks
.clear();
4108 setBlockIndexCandidates
.clear();
4111 // May NOT be used after any connections are up as much
4112 // of the peer-processing logic assumes a consistent
4113 // block index state
4114 void UnloadBlockIndex()
4117 chainActive
.SetTip(nullptr);
4118 pindexBestInvalid
= nullptr;
4119 pindexBestHeader
= nullptr;
4121 mapBlocksUnlinked
.clear();
4122 vinfoBlockFile
.clear();
4124 setDirtyBlockIndex
.clear();
4125 setDirtyFileInfo
.clear();
4126 versionbitscache
.Clear();
4127 for (int b
= 0; b
< VERSIONBITS_NUM_BITS
; b
++) {
4128 warningcache
[b
].clear();
4131 for (BlockMap::value_type
& entry
: mapBlockIndex
) {
4132 delete entry
.second
;
4134 mapBlockIndex
.clear();
4135 fHavePruned
= false;
4137 g_chainstate
.UnloadBlockIndex();
4140 bool LoadBlockIndex(const CChainParams
& chainparams
)
4142 // Load block index from databases
4143 bool needs_init
= fReindex
;
4145 bool ret
= LoadBlockIndexDB(chainparams
);
4146 if (!ret
) return false;
4147 needs_init
= mapBlockIndex
.empty();
4151 // Everything here is for *new* reindex/DBs. Thus, though
4152 // LoadBlockIndexDB may have set fReindex if we shut down
4153 // mid-reindex previously, we don't check fReindex and
4154 // instead only check it prior to LoadBlockIndexDB to set
4157 LogPrintf("Initializing databases...\n");
4158 // Use the provided setting for -txindex in the new database
4159 fTxIndex
= gArgs
.GetBoolArg("-txindex", DEFAULT_TXINDEX
);
4160 pblocktree
->WriteFlag("txindex", fTxIndex
);
4165 bool CChainState::LoadGenesisBlock(const CChainParams
& chainparams
)
4169 // Check whether we're already initialized by checking for genesis in
4170 // mapBlockIndex. Note that we can't use chainActive here, since it is
4171 // set based on the coins db, not the block index db, which is the only
4172 // thing loaded at this point.
4173 if (mapBlockIndex
.count(chainparams
.GenesisBlock().GetHash()))
4177 CBlock
&block
= const_cast<CBlock
&>(chainparams
.GenesisBlock());
4178 CDiskBlockPos blockPos
= SaveBlockToDisk(block
, 0, chainparams
, nullptr);
4179 if (blockPos
.IsNull())
4180 return error("%s: writing genesis block to disk failed", __func__
);
4181 CBlockIndex
*pindex
= AddToBlockIndex(block
);
4182 CValidationState state
;
4183 if (!ReceivedBlockTransactions(block
, state
, pindex
, blockPos
, chainparams
.GetConsensus()))
4184 return error("%s: genesis block not accepted", __func__
);
4185 } catch (const std::runtime_error
& e
) {
4186 return error("%s: failed to write genesis block: %s", __func__
, e
.what());
4192 bool LoadGenesisBlock(const CChainParams
& chainparams
)
4194 return g_chainstate
.LoadGenesisBlock(chainparams
);
4197 bool LoadExternalBlockFile(const CChainParams
& chainparams
, FILE* fileIn
, CDiskBlockPos
*dbp
)
4199 // Map of disk positions for blocks with unknown parent (only used for reindex)
4200 static std::multimap
<uint256
, CDiskBlockPos
> mapBlocksUnknownParent
;
4201 int64_t nStart
= GetTimeMillis();
4205 // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4206 CBufferedFile
blkdat(fileIn
, 2*MAX_BLOCK_SERIALIZED_SIZE
, MAX_BLOCK_SERIALIZED_SIZE
+8, SER_DISK
, CLIENT_VERSION
);
4207 uint64_t nRewind
= blkdat
.GetPos();
4208 while (!blkdat
.eof()) {
4209 boost::this_thread::interruption_point();
4211 blkdat
.SetPos(nRewind
);
4212 nRewind
++; // start one byte further next time, in case of failure
4213 blkdat
.SetLimit(); // remove former limit
4214 unsigned int nSize
= 0;
4217 unsigned char buf
[CMessageHeader::MESSAGE_START_SIZE
];
4218 blkdat
.FindByte(chainparams
.MessageStart()[0]);
4219 nRewind
= blkdat
.GetPos()+1;
4220 blkdat
>> FLATDATA(buf
);
4221 if (memcmp(buf
, chainparams
.MessageStart(), CMessageHeader::MESSAGE_START_SIZE
))
4225 if (nSize
< 80 || nSize
> MAX_BLOCK_SERIALIZED_SIZE
)
4227 } catch (const std::exception
&) {
4228 // no valid block header found; don't complain
4233 uint64_t nBlockPos
= blkdat
.GetPos();
4235 dbp
->nPos
= nBlockPos
;
4236 blkdat
.SetLimit(nBlockPos
+ nSize
);
4237 blkdat
.SetPos(nBlockPos
);
4238 std::shared_ptr
<CBlock
> pblock
= std::make_shared
<CBlock
>();
4239 CBlock
& block
= *pblock
;
4241 nRewind
= blkdat
.GetPos();
4243 // detect out of order blocks, and store them for later
4244 uint256 hash
= block
.GetHash();
4245 if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
.find(block
.hashPrevBlock
) == mapBlockIndex
.end()) {
4246 LogPrint(BCLog::REINDEX
, "%s: Out of order block %s, parent %s not known\n", __func__
, hash
.ToString(),
4247 block
.hashPrevBlock
.ToString());
4249 mapBlocksUnknownParent
.insert(std::make_pair(block
.hashPrevBlock
, *dbp
));
4253 // process in case the block isn't known yet
4254 if (mapBlockIndex
.count(hash
) == 0 || (mapBlockIndex
[hash
]->nStatus
& BLOCK_HAVE_DATA
) == 0) {
4256 CValidationState state
;
4257 if (g_chainstate
.AcceptBlock(pblock
, state
, chainparams
, nullptr, true, dbp
, nullptr))
4259 if (state
.IsError())
4261 } else if (hash
!= chainparams
.GetConsensus().hashGenesisBlock
&& mapBlockIndex
[hash
]->nHeight
% 1000 == 0) {
4262 LogPrint(BCLog::REINDEX
, "Block Import: already had block %s at height %d\n", hash
.ToString(), mapBlockIndex
[hash
]->nHeight
);
4265 // Activate the genesis block so normal node progress can continue
4266 if (hash
== chainparams
.GetConsensus().hashGenesisBlock
) {
4267 CValidationState state
;
4268 if (!ActivateBestChain(state
, chainparams
)) {
4275 // Recursively process earlier encountered successors of this block
4276 std::deque
<uint256
> queue
;
4277 queue
.push_back(hash
);
4278 while (!queue
.empty()) {
4279 uint256 head
= queue
.front();
4281 std::pair
<std::multimap
<uint256
, CDiskBlockPos
>::iterator
, std::multimap
<uint256
, CDiskBlockPos
>::iterator
> range
= mapBlocksUnknownParent
.equal_range(head
);
4282 while (range
.first
!= range
.second
) {
4283 std::multimap
<uint256
, CDiskBlockPos
>::iterator it
= range
.first
;
4284 std::shared_ptr
<CBlock
> pblockrecursive
= std::make_shared
<CBlock
>();
4285 if (ReadBlockFromDisk(*pblockrecursive
, it
->second
, chainparams
.GetConsensus()))
4287 LogPrint(BCLog::REINDEX
, "%s: Processing out of order child %s of %s\n", __func__
, pblockrecursive
->GetHash().ToString(),
4290 CValidationState dummy
;
4291 if (g_chainstate
.AcceptBlock(pblockrecursive
, dummy
, chainparams
, nullptr, true, &it
->second
, nullptr))
4294 queue
.push_back(pblockrecursive
->GetHash());
4298 mapBlocksUnknownParent
.erase(it
);
4302 } catch (const std::exception
& e
) {
4303 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__
, e
.what());
4306 } catch (const std::runtime_error
& e
) {
4307 AbortNode(std::string("System error: ") + e
.what());
4310 LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded
, GetTimeMillis() - nStart
);
4314 void CChainState::CheckBlockIndex(const Consensus::Params
& consensusParams
)
4316 if (!fCheckBlockIndex
) {
4322 // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4323 // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
4324 // iterating the block tree require that chainActive has been initialized.)
4325 if (chainActive
.Height() < 0) {
4326 assert(mapBlockIndex
.size() <= 1);
4330 // Build forward-pointing map of the entire block tree.
4331 std::multimap
<CBlockIndex
*,CBlockIndex
*> forward
;
4332 for (auto& entry
: mapBlockIndex
) {
4333 forward
.insert(std::make_pair(entry
.second
->pprev
, entry
.second
));
4336 assert(forward
.size() == mapBlockIndex
.size());
4338 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeGenesis
= forward
.equal_range(nullptr);
4339 CBlockIndex
*pindex
= rangeGenesis
.first
->second
;
4340 rangeGenesis
.first
++;
4341 assert(rangeGenesis
.first
== rangeGenesis
.second
); // There is only one index entry with parent nullptr.
4343 // Iterate over the entire block tree, using depth-first search.
4344 // Along the way, remember whether there are blocks on the path from genesis
4345 // block being explored which are the first to have certain properties.
4348 CBlockIndex
* pindexFirstInvalid
= nullptr; // Oldest ancestor of pindex which is invalid.
4349 CBlockIndex
* pindexFirstMissing
= nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4350 CBlockIndex
* pindexFirstNeverProcessed
= nullptr; // Oldest ancestor of pindex for which nTx == 0.
4351 CBlockIndex
* pindexFirstNotTreeValid
= nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4352 CBlockIndex
* pindexFirstNotTransactionsValid
= nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4353 CBlockIndex
* pindexFirstNotChainValid
= nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4354 CBlockIndex
* pindexFirstNotScriptsValid
= nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4355 while (pindex
!= nullptr) {
4357 if (pindexFirstInvalid
== nullptr && pindex
->nStatus
& BLOCK_FAILED_VALID
) pindexFirstInvalid
= pindex
;
4358 if (pindexFirstMissing
== nullptr && !(pindex
->nStatus
& BLOCK_HAVE_DATA
)) pindexFirstMissing
= pindex
;
4359 if (pindexFirstNeverProcessed
== nullptr && pindex
->nTx
== 0) pindexFirstNeverProcessed
= pindex
;
4360 if (pindex
->pprev
!= nullptr && pindexFirstNotTreeValid
== nullptr && (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TREE
) pindexFirstNotTreeValid
= pindex
;
4361 if (pindex
->pprev
!= nullptr && pindexFirstNotTransactionsValid
== nullptr && (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_TRANSACTIONS
) pindexFirstNotTransactionsValid
= pindex
;
4362 if (pindex
->pprev
!= nullptr && pindexFirstNotChainValid
== nullptr && (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_CHAIN
) pindexFirstNotChainValid
= pindex
;
4363 if (pindex
->pprev
!= nullptr && pindexFirstNotScriptsValid
== nullptr && (pindex
->nStatus
& BLOCK_VALID_MASK
) < BLOCK_VALID_SCRIPTS
) pindexFirstNotScriptsValid
= pindex
;
4365 // Begin: actual consistency checks.
4366 if (pindex
->pprev
== nullptr) {
4367 // Genesis block checks.
4368 assert(pindex
->GetBlockHash() == consensusParams
.hashGenesisBlock
); // Genesis block's hash must match.
4369 assert(pindex
== chainActive
.Genesis()); // The current active chain's genesis block must be this block.
4371 if (pindex
->nChainTx
== 0) assert(pindex
->nSequenceId
<= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4372 // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4373 // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4375 // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4376 assert(!(pindex
->nStatus
& BLOCK_HAVE_DATA
) == (pindex
->nTx
== 0));
4377 assert(pindexFirstMissing
== pindexFirstNeverProcessed
);
4379 // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4380 if (pindex
->nStatus
& BLOCK_HAVE_DATA
) assert(pindex
->nTx
> 0);
4382 if (pindex
->nStatus
& BLOCK_HAVE_UNDO
) assert(pindex
->nStatus
& BLOCK_HAVE_DATA
);
4383 assert(((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TRANSACTIONS
) == (pindex
->nTx
> 0)); // This is pruning-independent.
4384 // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
4385 assert((pindexFirstNeverProcessed
!= nullptr) == (pindex
->nChainTx
== 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
4386 assert((pindexFirstNotTransactionsValid
!= nullptr) == (pindex
->nChainTx
== 0));
4387 assert(pindex
->nHeight
== nHeight
); // nHeight must be consistent.
4388 assert(pindex
->pprev
== nullptr || pindex
->nChainWork
>= pindex
->pprev
->nChainWork
); // For every block except the genesis block, the chainwork must be larger than the parent's.
4389 assert(nHeight
< 2 || (pindex
->pskip
&& (pindex
->pskip
->nHeight
< nHeight
))); // The pskip pointer must point back for all but the first 2 blocks.
4390 assert(pindexFirstNotTreeValid
== nullptr); // All mapBlockIndex entries must at least be TREE valid
4391 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_TREE
) assert(pindexFirstNotTreeValid
== nullptr); // TREE valid implies all parents are TREE valid
4392 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_CHAIN
) assert(pindexFirstNotChainValid
== nullptr); // CHAIN valid implies all parents are CHAIN valid
4393 if ((pindex
->nStatus
& BLOCK_VALID_MASK
) >= BLOCK_VALID_SCRIPTS
) assert(pindexFirstNotScriptsValid
== nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
4394 if (pindexFirstInvalid
== nullptr) {
4395 // Checks for not-invalid blocks.
4396 assert((pindex
->nStatus
& BLOCK_FAILED_MASK
) == 0); // The failed mask cannot be set for blocks without invalid parents.
4398 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && pindexFirstNeverProcessed
== nullptr) {
4399 if (pindexFirstInvalid
== nullptr) {
4400 // If this block sorts at least as good as the current tip and
4401 // is valid and we have all data for its parents, it must be in
4402 // setBlockIndexCandidates. chainActive.Tip() must also be there
4403 // even if some data has been pruned.
4404 if (pindexFirstMissing
== nullptr || pindex
== chainActive
.Tip()) {
4405 assert(setBlockIndexCandidates
.count(pindex
));
4407 // If some parent is missing, then it could be that this block was in
4408 // setBlockIndexCandidates but had to be removed because of the missing data.
4409 // In this case it must be in mapBlocksUnlinked -- see test below.
4411 } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4412 assert(setBlockIndexCandidates
.count(pindex
) == 0);
4414 // Check whether this block is in mapBlocksUnlinked.
4415 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangeUnlinked
= mapBlocksUnlinked
.equal_range(pindex
->pprev
);
4416 bool foundInUnlinked
= false;
4417 while (rangeUnlinked
.first
!= rangeUnlinked
.second
) {
4418 assert(rangeUnlinked
.first
->first
== pindex
->pprev
);
4419 if (rangeUnlinked
.first
->second
== pindex
) {
4420 foundInUnlinked
= true;
4423 rangeUnlinked
.first
++;
4425 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
!= nullptr && pindexFirstInvalid
== nullptr) {
4426 // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
4427 assert(foundInUnlinked
);
4429 if (!(pindex
->nStatus
& BLOCK_HAVE_DATA
)) assert(!foundInUnlinked
); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
4430 if (pindexFirstMissing
== nullptr) assert(!foundInUnlinked
); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
4431 if (pindex
->pprev
&& (pindex
->nStatus
& BLOCK_HAVE_DATA
) && pindexFirstNeverProcessed
== nullptr && pindexFirstMissing
!= nullptr) {
4432 // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4433 assert(fHavePruned
); // We must have pruned.
4434 // This block may have entered mapBlocksUnlinked if:
4435 // - it has a descendant that at some point had more work than the
4437 // - we tried switching to that descendant but were missing
4438 // data for some intermediate block between chainActive and the
4440 // So if this block is itself better than chainActive.Tip() and it wasn't in
4441 // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
4442 if (!CBlockIndexWorkComparator()(pindex
, chainActive
.Tip()) && setBlockIndexCandidates
.count(pindex
) == 0) {
4443 if (pindexFirstInvalid
== nullptr) {
4444 assert(foundInUnlinked
);
4448 // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4449 // End: actual consistency checks.
4451 // Try descending into the first subnode.
4452 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> range
= forward
.equal_range(pindex
);
4453 if (range
.first
!= range
.second
) {
4454 // A subnode was found.
4455 pindex
= range
.first
->second
;
4459 // This is a leaf node.
4460 // Move upwards until we reach a node of which we have not yet visited the last child.
4462 // We are going to either move to a parent or a sibling of pindex.
4463 // If pindex was the first with a certain property, unset the corresponding variable.
4464 if (pindex
== pindexFirstInvalid
) pindexFirstInvalid
= nullptr;
4465 if (pindex
== pindexFirstMissing
) pindexFirstMissing
= nullptr;
4466 if (pindex
== pindexFirstNeverProcessed
) pindexFirstNeverProcessed
= nullptr;
4467 if (pindex
== pindexFirstNotTreeValid
) pindexFirstNotTreeValid
= nullptr;
4468 if (pindex
== pindexFirstNotTransactionsValid
) pindexFirstNotTransactionsValid
= nullptr;
4469 if (pindex
== pindexFirstNotChainValid
) pindexFirstNotChainValid
= nullptr;
4470 if (pindex
== pindexFirstNotScriptsValid
) pindexFirstNotScriptsValid
= nullptr;
4472 CBlockIndex
* pindexPar
= pindex
->pprev
;
4473 // Find which child we just visited.
4474 std::pair
<std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
,std::multimap
<CBlockIndex
*,CBlockIndex
*>::iterator
> rangePar
= forward
.equal_range(pindexPar
);
4475 while (rangePar
.first
->second
!= pindex
) {
4476 assert(rangePar
.first
!= rangePar
.second
); // Our parent must have at least the node we're coming from as child.
4479 // Proceed to the next one.
4481 if (rangePar
.first
!= rangePar
.second
) {
4482 // Move to the sibling.
4483 pindex
= rangePar
.first
->second
;
4494 // Check that we actually traversed the entire map.
4495 assert(nNodes
== forward
.size());
4498 std::string
CBlockFileInfo::ToString() const
4500 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks
, nSize
, nHeightFirst
, nHeightLast
, DateTimeStrFormat("%Y-%m-%d", nTimeFirst
), DateTimeStrFormat("%Y-%m-%d", nTimeLast
));
4503 CBlockFileInfo
* GetBlockFileInfo(size_t n
)
4505 LOCK(cs_LastBlockFile
);
4507 return &vinfoBlockFile
.at(n
);
4510 ThresholdState
VersionBitsTipState(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
4513 return VersionBitsState(chainActive
.Tip(), params
, pos
, versionbitscache
);
4516 BIP9Stats
VersionBitsTipStatistics(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
4519 return VersionBitsStatistics(chainActive
.Tip(), params
, pos
);
4522 int VersionBitsTipStateSinceHeight(const Consensus::Params
& params
, Consensus::DeploymentPos pos
)
4525 return VersionBitsStateSinceHeight(chainActive
.Tip(), params
, pos
, versionbitscache
);
4528 static const uint64_t MEMPOOL_DUMP_VERSION
= 1;
4530 bool LoadMempool(void)
4532 const CChainParams
& chainparams
= Params();
4533 int64_t nExpiryTimeout
= gArgs
.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY
) * 60 * 60;
4534 FILE* filestr
= fsbridge::fopen(GetDataDir() / "mempool.dat", "rb");
4535 CAutoFile
file(filestr
, SER_DISK
, CLIENT_VERSION
);
4536 if (file
.IsNull()) {
4537 LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
4542 int64_t expired
= 0;
4544 int64_t already_there
= 0;
4545 int64_t nNow
= GetTime();
4550 if (version
!= MEMPOOL_DUMP_VERSION
) {
4563 CAmount amountdelta
= nFeeDelta
;
4565 mempool
.PrioritiseTransaction(tx
->GetHash(), amountdelta
);
4567 CValidationState state
;
4568 if (nTime
+ nExpiryTimeout
> nNow
) {
4570 AcceptToMemoryPoolWithTime(chainparams
, mempool
, state
, tx
, nullptr /* pfMissingInputs */, nTime
,
4571 nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */);
4572 if (state
.IsValid()) {
4575 // mempool may contain the transaction already, e.g. from
4576 // wallet(s) having loaded it while we were processing
4577 // mempool transactions; consider these as valid, instead of
4578 // failed, but mark them as 'already there'
4579 if (mempool
.exists(tx
->GetHash())) {
4588 if (ShutdownRequested())
4591 std::map
<uint256
, CAmount
> mapDeltas
;
4594 for (const auto& i
: mapDeltas
) {
4595 mempool
.PrioritiseTransaction(i
.first
, i
.second
);
4597 } catch (const std::exception
& e
) {
4598 LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e
.what());
4602 LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there\n", count
, failed
, expired
, already_there
);
4606 bool DumpMempool(void)
4608 int64_t start
= GetTimeMicros();
4610 std::map
<uint256
, CAmount
> mapDeltas
;
4611 std::vector
<TxMempoolInfo
> vinfo
;
4615 for (const auto &i
: mempool
.mapDeltas
) {
4616 mapDeltas
[i
.first
] = i
.second
;
4618 vinfo
= mempool
.infoAll();
4621 int64_t mid
= GetTimeMicros();
4624 FILE* filestr
= fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb");
4629 CAutoFile
file(filestr
, SER_DISK
, CLIENT_VERSION
);
4631 uint64_t version
= MEMPOOL_DUMP_VERSION
;
4634 file
<< (uint64_t)vinfo
.size();
4635 for (const auto& i
: vinfo
) {
4637 file
<< (int64_t)i
.nTime
;
4638 file
<< (int64_t)i
.nFeeDelta
;
4639 mapDeltas
.erase(i
.tx
->GetHash());
4643 FileCommit(file
.Get());
4645 RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
4646 int64_t last
= GetTimeMicros();
4647 LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid
-start
)*MICRO
, (last
-mid
)*MICRO
);
4648 } catch (const std::exception
& e
) {
4649 LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e
.what());
4655 //! Guess how far we are in the verification process at the given block index
4656 double GuessVerificationProgress(const ChainTxData
& data
, const CBlockIndex
*pindex
) {
4657 if (pindex
== nullptr)
4660 int64_t nNow
= time(nullptr);
4664 if (pindex
->nChainTx
<= data
.nTxCount
) {
4665 fTxTotal
= data
.nTxCount
+ (nNow
- data
.nTime
) * data
.dTxRate
;
4667 fTxTotal
= pindex
->nChainTx
+ (nNow
- pindex
->GetBlockTime()) * data
.dTxRate
;
4670 return pindex
->nChainTx
/ fTxTotal
;
4679 BlockMap::iterator it1
= mapBlockIndex
.begin();
4680 for (; it1
!= mapBlockIndex
.end(); it1
++)
4681 delete (*it1
).second
;
4682 mapBlockIndex
.clear();
4684 } instance_of_cmaincleanup
;