util: Work around (virtual) memory exhaustion on 32-bit w/ glibc
[bitcoinplatinum.git] / src / blockencodings.cpp
blob90120b82eba3df3dc990e281db6370c85821a5a4
1 // Copyright (c) 2016 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #include "blockencodings.h"
6 #include "consensus/consensus.h"
7 #include "consensus/validation.h"
8 #include "chainparams.h"
9 #include "hash.h"
10 #include "random.h"
11 #include "streams.h"
12 #include "txmempool.h"
13 #include "validation.h"
14 #include "util.h"
16 #include <unordered_map>
18 #define MIN_TRANSACTION_BASE_SIZE (::GetSerializeSize(CTransaction(), SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS))
20 CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, bool fUseWTXID) :
21 nonce(GetRand(std::numeric_limits<uint64_t>::max())),
22 shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) {
23 FillShortTxIDSelector();
24 //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
25 prefilledtxn[0] = {0, block.vtx[0]};
26 for (size_t i = 1; i < block.vtx.size(); i++) {
27 const CTransaction& tx = *block.vtx[i];
28 shorttxids[i - 1] = GetShortID(fUseWTXID ? tx.GetWitnessHash() : tx.GetHash());
32 void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
33 CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
34 stream << header << nonce;
35 CSHA256 hasher;
36 hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
37 uint256 shorttxidhash;
38 hasher.Finalize(shorttxidhash.begin());
39 shorttxidk0 = shorttxidhash.GetUint64(0);
40 shorttxidk1 = shorttxidhash.GetUint64(1);
43 uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const uint256& txhash) const {
44 static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids");
45 return SipHashUint256(shorttxidk0, shorttxidk1, txhash) & 0xffffffffffffL;
50 ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<std::pair<uint256, CTransactionRef>>& extra_txn) {
51 if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty()))
52 return READ_STATUS_INVALID;
53 if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_BASE_SIZE / MIN_TRANSACTION_BASE_SIZE)
54 return READ_STATUS_INVALID;
56 assert(header.IsNull() && txn_available.empty());
57 header = cmpctblock.header;
58 txn_available.resize(cmpctblock.BlockTxCount());
60 int32_t lastprefilledindex = -1;
61 for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) {
62 if (cmpctblock.prefilledtxn[i].tx->IsNull())
63 return READ_STATUS_INVALID;
65 lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
66 if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
67 return READ_STATUS_INVALID;
68 if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
69 // If we are inserting a tx at an index greater than our full list of shorttxids
70 // plus the number of prefilled txn we've inserted, then we have txn for which we
71 // have neither a prefilled txn or a shorttxid!
72 return READ_STATUS_INVALID;
74 txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
76 prefilled_count = cmpctblock.prefilledtxn.size();
78 // Calculate map of txids -> positions and check mempool to see what we have (or don't)
79 // Because well-formed cmpctblock messages will have a (relatively) uniform distribution
80 // of short IDs, any highly-uneven distribution of elements can be safely treated as a
81 // READ_STATUS_FAILED.
82 std::unordered_map<uint64_t, uint16_t> shorttxids(cmpctblock.shorttxids.size());
83 uint16_t index_offset = 0;
84 for (size_t i = 0; i < cmpctblock.shorttxids.size(); i++) {
85 while (txn_available[i + index_offset])
86 index_offset++;
87 shorttxids[cmpctblock.shorttxids[i]] = i + index_offset;
88 // To determine the chance that the number of entries in a bucket exceeds N,
89 // we use the fact that the number of elements in a single bucket is
90 // binomially distributed (with n = the number of shorttxids S, and p =
91 // 1 / the number of buckets), that in the worst case the number of buckets is
92 // equal to S (due to std::unordered_map having a default load factor of 1.0),
93 // and that the chance for any bucket to exceed N elements is at most
94 // buckets * (the chance that any given bucket is above N elements).
95 // Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
96 // If we assume blocks of up to 16000, allowing 12 elements per bucket should
97 // only fail once per ~1 million block transfers (per peer and connection).
98 if (shorttxids.bucket_size(shorttxids.bucket(cmpctblock.shorttxids[i])) > 12)
99 return READ_STATUS_FAILED;
101 // TODO: in the shortid-collision case, we should instead request both transactions
102 // which collided. Falling back to full-block-request here is overkill.
103 if (shorttxids.size() != cmpctblock.shorttxids.size())
104 return READ_STATUS_FAILED; // Short ID collision
106 std::vector<bool> have_txn(txn_available.size());
108 LOCK(pool->cs);
109 const std::vector<std::pair<uint256, CTxMemPool::txiter> >& vTxHashes = pool->vTxHashes;
110 for (size_t i = 0; i < vTxHashes.size(); i++) {
111 uint64_t shortid = cmpctblock.GetShortID(vTxHashes[i].first);
112 std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
113 if (idit != shorttxids.end()) {
114 if (!have_txn[idit->second]) {
115 txn_available[idit->second] = vTxHashes[i].second->GetSharedTx();
116 have_txn[idit->second] = true;
117 mempool_count++;
118 } else {
119 // If we find two mempool txn that match the short id, just request it.
120 // This should be rare enough that the extra bandwidth doesn't matter,
121 // but eating a round-trip due to FillBlock failure would be annoying
122 if (txn_available[idit->second]) {
123 txn_available[idit->second].reset();
124 mempool_count--;
128 // Though ideally we'd continue scanning for the two-txn-match-shortid case,
129 // the performance win of an early exit here is too good to pass up and worth
130 // the extra risk.
131 if (mempool_count == shorttxids.size())
132 break;
136 for (size_t i = 0; i < extra_txn.size(); i++) {
137 uint64_t shortid = cmpctblock.GetShortID(extra_txn[i].first);
138 std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
139 if (idit != shorttxids.end()) {
140 if (!have_txn[idit->second]) {
141 txn_available[idit->second] = extra_txn[i].second;
142 have_txn[idit->second] = true;
143 mempool_count++;
144 extra_count++;
145 } else {
146 // If we find two mempool/extra txn that match the short id, just
147 // request it.
148 // This should be rare enough that the extra bandwidth doesn't matter,
149 // but eating a round-trip due to FillBlock failure would be annoying
150 // Note that we don't want duplication between extra_txn and mempool to
151 // trigger this case, so we compare witness hashes first
152 if (txn_available[idit->second] &&
153 txn_available[idit->second]->GetWitnessHash() != extra_txn[i].second->GetWitnessHash()) {
154 txn_available[idit->second].reset();
155 mempool_count--;
156 extra_count--;
160 // Though ideally we'd continue scanning for the two-txn-match-shortid case,
161 // the performance win of an early exit here is too good to pass up and worth
162 // the extra risk.
163 if (mempool_count == shorttxids.size())
164 break;
167 LogPrint("cmpctblock", "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock, SER_NETWORK, PROTOCOL_VERSION));
169 return READ_STATUS_OK;
172 bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const {
173 assert(!header.IsNull());
174 assert(index < txn_available.size());
175 return txn_available[index] ? true : false;
178 ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing) {
179 assert(!header.IsNull());
180 uint256 hash = header.GetHash();
181 block = header;
182 block.vtx.resize(txn_available.size());
184 size_t tx_missing_offset = 0;
185 for (size_t i = 0; i < txn_available.size(); i++) {
186 if (!txn_available[i]) {
187 if (vtx_missing.size() <= tx_missing_offset)
188 return READ_STATUS_INVALID;
189 block.vtx[i] = vtx_missing[tx_missing_offset++];
190 } else
191 block.vtx[i] = std::move(txn_available[i]);
194 // Make sure we can't call FillBlock again.
195 header.SetNull();
196 txn_available.clear();
198 if (vtx_missing.size() != tx_missing_offset)
199 return READ_STATUS_INVALID;
201 CValidationState state;
202 if (!CheckBlock(block, state, Params().GetConsensus())) {
203 // TODO: We really want to just check merkle tree manually here,
204 // but that is expensive, and CheckBlock caches a block's
205 // "checked-status" (in the CBlock?). CBlock should be able to
206 // check its own merkle root and cache that check.
207 if (state.CorruptionPossible())
208 return READ_STATUS_FAILED; // Possible Short ID collision
209 return READ_STATUS_CHECKBLOCK_FAILED;
212 LogPrint("cmpctblock", "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
213 if (vtx_missing.size() < 5) {
214 for (const auto& tx : vtx_missing)
215 LogPrint("cmpctblock", "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
218 return READ_STATUS_OK;