diff --git a/src/block_proof.cpp b/src/block_proof.cpp index 1478e2246e6..0c1a279d688 100644 --- a/src/block_proof.cpp +++ b/src/block_proof.cpp @@ -13,7 +13,7 @@ bool CheckChallenge(const CBlockHeader& block, const CBlockIndex& indexLast, const Consensus::Params& params) { if (g_signed_blocks) { - return block.proof.challenge == indexLast.proof.challenge; + return block.proof.challenge == indexLast.get_proof().challenge; } else { return block.nBits == GetNextWorkRequired(&indexLast, &block, params); } diff --git a/src/chain.h b/src/chain.h index 80d56325f78..74cc29fb80d 100644 --- a/src/chain.h +++ b/src/chain.h @@ -192,10 +192,51 @@ class CBlockIndex uint32_t nTime{0}; uint32_t nBits{0}; uint32_t nNonce{0}; - CProof proof{}; + +protected: + std::optional proof{}; // Dynamic federation fields - DynaFedParams dynafed_params{}; - CScriptWitness m_signblock_witness{}; + std::optional m_dynafed_params{}; + std::optional m_signblock_witness{}; + + bool m_trimmed{false}; + + friend class CBlockTreeDB; + +public: + + // Irrevocably remove blocksigning and dynafed-related stuff from this + // in-memory copy of the block header. + void trim() { + assert_untrimmed(); + m_trimmed = true; + proof = std::nullopt; + m_dynafed_params = std::nullopt; + m_signblock_witness = std::nullopt; + } + + bool trimmed() const { + return m_trimmed; + } + + void assert_untrimmed() const { + assert(!m_trimmed); + } + + const CProof& get_proof() const { + assert_untrimmed(); + return proof.value(); + } + + const DynaFedParams& dynafed_params() const { + assert_untrimmed(); + return m_dynafed_params.value(); + } + + const CScriptWitness& signblock_witness() const { + assert_untrimmed(); + return m_signblock_witness.value(); + } //! (memory only) Sequential id assigned to distinguish order in which blocks are received. int32_t nSequenceId{0}; @@ -214,7 +255,7 @@ class CBlockIndex nBits{block.nBits}, nNonce{block.nNonce}, proof{block.proof}, - dynafed_params{block.m_dynafed_params}, + m_dynafed_params{block.m_dynafed_params}, m_signblock_witness{block.m_signblock_witness} { } @@ -239,6 +280,7 @@ class CBlockIndex CBlockHeader GetBlockHeader() const { + assert_untrimmed(); CBlockHeader block; block.nVersion = nVersion; if (pprev) @@ -250,9 +292,9 @@ class CBlockIndex } block.nBits = nBits; block.nNonce = nNonce; - block.proof = proof; - block.m_dynafed_params = dynafed_params; - block.m_signblock_witness = m_signblock_witness; + block.proof = proof.value(); + block.m_dynafed_params = m_dynafed_params.value(); + block.m_signblock_witness = m_signblock_witness.value(); return block; } @@ -366,12 +408,12 @@ class CDiskBlockIndex : public CBlockIndex nVersion = ~CBlockHeader::DYNAFED_HF_MASK & nVersion; return is_dyna; } else { - return !dynafed_params.IsNull(); + return !dynafed_params().IsNull(); } } bool RemoveDynaFedMaskOnSerialize(bool for_read) const { assert(!for_read); - return !dynafed_params.IsNull(); + return !dynafed_params().IsNull(); } SERIALIZE_METHODS(CDiskBlockIndex, obj) @@ -394,7 +436,7 @@ class CDiskBlockIndex : public CBlockIndex READWRITE(obj.nVersion); } else { int32_t nVersion = obj.nVersion; - if (!obj.dynafed_params.IsNull()) { + if (!obj.dynafed_params().IsNull()) { nVersion |= CBlockHeader::DYNAFED_HF_MASK; } READWRITE(nVersion); @@ -404,13 +446,19 @@ class CDiskBlockIndex : public CBlockIndex READWRITE(obj.hashPrev); READWRITE(obj.hashMerkleRoot); READWRITE(obj.nTime); + + // Allocate objects in the optional<> fields when reading, since READWRITE will not do this + SER_READ(obj, obj.m_dynafed_params = DynaFedParams()); + SER_READ(obj, obj.m_signblock_witness = CScriptWitness()); + SER_READ(obj, obj.proof = CProof()); + // For compatibility with elements 0.14 based chains if (g_signed_blocks) { if (is_dyna) { - READWRITE(obj.dynafed_params); - READWRITE(obj.m_signblock_witness.stack); + READWRITE(obj.m_dynafed_params.value()); + READWRITE(obj.m_signblock_witness.value().stack); } else { - READWRITE(obj.proof); + READWRITE(obj.proof.value()); } } else { READWRITE(obj.nBits); @@ -420,6 +468,7 @@ class CDiskBlockIndex : public CBlockIndex uint256 GetBlockHash() const { + assert_untrimmed(); CBlockHeader block; block.nVersion = nVersion; block.hashPrevBlock = hashPrev; @@ -430,8 +479,8 @@ class CDiskBlockIndex : public CBlockIndex } block.nBits = nBits; block.nNonce = nNonce; - block.proof = proof; - block.m_dynafed_params = dynafed_params; + block.proof = proof.value(); + block.m_dynafed_params = m_dynafed_params.value(); return block.GetHash(); } diff --git a/src/dynafed.cpp b/src/dynafed.cpp index be123c5d433..5ef34edcbd2 100644 --- a/src/dynafed.cpp +++ b/src/dynafed.cpp @@ -15,7 +15,7 @@ bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consens for (int32_t height = next_height - 1; height >= (int32_t)(next_height - consensus.dynamic_epoch_length); --height) { const CBlockIndex* p_epoch_walk = pindexPrev->GetAncestor(height); assert(p_epoch_walk); - const DynaFedParamEntry& proposal = p_epoch_walk->dynafed_params.m_proposed; + const DynaFedParamEntry& proposal = p_epoch_walk->dynafed_params().m_proposed; const uint256 proposal_root = proposal.CalculateRoot(); vote_tally[proposal_root]++; // Short-circuit once 4/5 threshold is reached @@ -56,13 +56,13 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde // may be pre-dynafed params const CBlockIndex* p_epoch_start = pindexPrev->GetAncestor(epoch_start_height); assert(p_epoch_start); - if (p_epoch_start->dynafed_params.IsNull()) { + if (p_epoch_start->dynafed_params().IsNull()) { // We need to construct the "full" current parameters of pre-dynafed // consensus // Convert signblockscript to P2WSH uint256 signblock_witness_program; - CSHA256().Write(p_epoch_start->proof.challenge.data(), p_epoch_start->proof.challenge.size()).Finalize(signblock_witness_program.begin()); + CSHA256().Write(p_epoch_start->get_proof().challenge.data(), p_epoch_start->get_proof().challenge.size()).Finalize(signblock_witness_program.begin()); CScript p2wsh_signblock_script = CScript() << OP_0 << ToByteVector(signblock_witness_program); // Make P2SH-P2WSH-ness of non-dynafed fedpegscript explicit @@ -75,7 +75,7 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde // Put them in winning proposal winning_proposal = DynaFedParamEntry(p2wsh_signblock_script, consensus.max_block_signature_size, sh_wsh_fedpeg_program, consensus.fedpegScript, consensus.first_extension_space); } else { - winning_proposal = p_epoch_start->dynafed_params.m_current; + winning_proposal = p_epoch_start->dynafed_params().m_current; } return winning_proposal; } @@ -93,7 +93,7 @@ DynaFedParamEntry ComputeNextBlockCurrentParameters(const CBlockIndex* pindexPre // Return appropriate format based on epoch age or if we *just* activated // dynafed via BIP9 - if (epoch_age == 0 || pindexPrev->dynafed_params.IsNull()) { + if (epoch_age == 0 || pindexPrev->dynafed_params().IsNull()) { return entry; } else { return DynaFedParamEntry(entry.m_signblockscript, entry.m_signblock_witness_limit, entry.CalculateExtraRoot()); diff --git a/src/init.cpp b/src/init.cpp index 468d65147dc..6be2511ae9e 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -425,6 +425,7 @@ void SetupServerArgs(ArgsManager& argsman) hidden_args.emplace_back("-sysperms"); #endif argsman.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-trim_headers", strprintf("Trim old headers in memory (by default older than 2 epochs), removing blocksigning and dynafed-related fields. Saves memory, but blocks us from serving blocks or headers to peers, and removes trimmed fields from some JSON RPC outputs. (default: false)"), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-blockfilterindex=", strprintf("Maintain an index of compact filters by block (default: %s, values: %s).", DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) + " If is not supplied or if = 1, indexes for all known types are enabled.", @@ -979,6 +980,26 @@ bool AppInitParameterInteraction(const ArgsManager& args) fPruneMode = true; } + uint32_t epoch_length = chainparams.GetConsensus().dynamic_epoch_length; + if (epoch_length == std::numeric_limits::max()) { + // That's the default value, for non-dynafed chains and some tests. Pick a more sensible default here. + epoch_length = 20160; + } + + if (args.IsArgSet("-trim_headers")) { + LogPrintf("Configured for header-trimming mode. This will reduce memory usage substantially, but we will be unable to serve as a full P2P peer, and certain header fields may be missing from JSON RPC output.\n"); + fTrimHeaders = true; + // This calculation is driven by GetValidFedpegScripts in pegins.cpp, which walks the chain + // back to current epoch start, and then an additional total_valid_epochs on top of that. + // We add one epoch here for the current partial epoch, and then another one for good luck. + + nMustKeepFullHeaders = (chainparams.GetConsensus().total_valid_epochs + 2) * epoch_length; + // This is the number of headers we can have in flight downloading at a time, beyond the + // set of blocks we've already validated. Capping this is necessary to keep memory usage + // bounded during IBD. + } + nHeaderDownloadBuffer = epoch_length * 2; + nConnectTimeout = args.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT); if (nConnectTimeout <= 0) { nConnectTimeout = DEFAULT_CONNECT_TIMEOUT; @@ -1690,7 +1711,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // if pruning, unset the service bit and perform the initial blockstore prune // after any wallet rescanning has taken place. - if (fPruneMode) { + if (fPruneMode || fTrimHeaders) { LogPrintf("Unsetting NODE_NETWORK on prune mode\n"); nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK); if (!fReindex) { @@ -1702,6 +1723,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } } + if (fTrimHeaders) { + LogPrintf("Unsetting NODE_NETWORK_LIMITED on header trim mode\n"); + nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK_LIMITED); + } + if (DeploymentEnabled(chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) { // Advertise witness capabilities. // The option to not set NODE_WITNESS is only used in the tests and should be removed. diff --git a/src/miner.cpp b/src/miner.cpp index 75a6bbd9be5..9bee5755de1 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -29,7 +29,7 @@ void ResetChallenge(CBlockHeader& block, const CBlockIndex& indexLast, const Consensus::Params& params) { - block.proof.challenge = indexLast.proof.challenge; + block.proof.challenge = indexLast.get_proof().challenge; } void ResetProof(CBlockHeader& block) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index a53dabe4297..b3eb795d651 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -48,7 +48,7 @@ static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min; /** Headers download timeout. * Timeout = base + per_header * (expected number of headers) */ static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min; -static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms; +static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 2ms; /** Protect at least this many outbound peers from disconnection due to slow/ * behind headers chain. */ @@ -910,6 +910,10 @@ bool PeerManagerImpl::TipMayBeStale() bool PeerManagerImpl::CanDirectFetch() { + if(!m_chainman.ActiveChain().Tip()) { + LogPrint(BCLog::NET, "Tried to call CanDirectFetch with no currently-active chain.\n"); + return false; + } return m_chainman.ActiveChain().Tip()->GetBlockTime() > GetAdjustedTime() - m_chainparams.GetConsensus().nPowTargetSpacing * 20; } @@ -2007,6 +2011,36 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, return; } + // If we are already too far ahead of where we want to be on headers, discard + // the received headers. We can still get ahead by up to a single maximum-sized + // headers message here, but never further, so that's fine. + if (pindexBestHeader) { + uint64_t headers_ahead = pindexBestHeader->nHeight - m_chainman.ActiveHeight(); + bool too_far_ahead = fTrimHeaders && (headers_ahead >= nHeaderDownloadBuffer); + if (too_far_ahead) { + LOCK(cs_main); + CNodeState *nodestate = State(pfrom.GetId()); + if ((nodestate->pindexBestKnownBlock == nullptr) || + (nodestate->pindexBestKnownBlock->nHeight < m_chainman.ActiveHeight())) { + // Our notion of what blocks a peer has available is based on its pindexBestKnownBlock, + // which is based on headers recieved from it. If we don't have one, or it's too old, + // then we can never get blocks from this peer until we accept headers from it first. + LogPrint(BCLog::NET, "NOT discarding headers from peer=%d, to update its block availability. (current best header %d, active chain height %d)\n", pfrom.GetId(), pindexBestHeader->nHeight, m_chainman.ActiveHeight()); + } else { + LogPrint(BCLog::NET, "Discarding received headers and pausing header sync from peer=%d, because we are too far ahead of block sync. (%d > %d)\n", pfrom.GetId(), pindexBestHeader->nHeight, m_chainman.ActiveHeight()); + if (nodestate->fSyncStarted) { + // Cancel sync from this node, so we don't penalize it later. + // This will cause us to automatically start syncing from a different node (or restart syncing from the same node) later, + // if we still need to sync headers. + nSyncStarted--; + nodestate->fSyncStarted = false; + nodestate->m_headers_sync_timeout = 0us; + } + return; + } + } + } + bool received_new_header = false; const CBlockIndex *pindexLast = nullptr; { @@ -2084,14 +2118,29 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, nodestate->m_last_block_announcement = GetTime(); } - if (nCount == MAX_HEADERS_RESULTS && !all_duplicate) { - // Headers message had its maximum size; the peer may have more headers. - // TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue - // from there instead. - // HOWEVER, if all headers we got this time were duplicates that we already had, don't ask for any more. - LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", - pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height); - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256())); + // If a peer gives us as many headers as possible, this is implicitly a signal that the + // peer has more headers to send us. In Bitcoin Core, the node always asks for more + // headers at this point. Our logic is slightly more complex, to work around an apparent + // bug in the Bitcoin Core state machine, where we can end up downloading headers from + /// lots of peers at the same time by accident. + if (nCount == MAX_HEADERS_RESULTS) { + if (all_duplicate && !nodestate->fSyncStarted) { + // In this case two things are true: + // 1) This node's most recent batch of headers only included ones we already had. + // 2) We don't have this node marked as a peer to header-sync from. + // This happens when some exogenous event, like an INV of a new block, causes us + // to ask a peer for an unbounded number of headers, when we're already in the + // process of downloading the headers from a different peer. + // In this case the right thing to do is simply stop syncing headers from this + // peer; it's redundant. Here we do nothing; since we don't ask the peer for + // more headers, it will stop sending them. + } else { + // TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue + // from there instead. + LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", + pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256())); + } } // If this set of headers is valid and ends in a block with at least as @@ -3133,10 +3182,17 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId()); for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) { + if (pindex->trimmed()) { + // For simplicity, if any of the headers they're asking for are trimmed, + // just drop the request. + LogPrint(BCLog::NET, "%s: ignoring getheaders from peer=%i which would return at least one trimmed header\n", __func__, pfrom.GetId()); + return; + } vHeaders.push_back(pindex->GetBlockHeader()); if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) break; } + // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty // headers message). In both cases it's safe to update @@ -4476,7 +4532,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (pindexBestHeader == nullptr) pindexBestHeader = m_chainman.ActiveChain().Tip(); bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do. - if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { + uint64_t headers_ahead = pindexBestHeader->nHeight - m_chainman.ActiveHeight(); + // ELEMENTS: Only download if our headers aren't "too far ahead" of our blocks. + bool got_enough_headers = fTrimHeaders && (headers_ahead >= nHeaderDownloadBuffer); + if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex && !got_enough_headers) { // Only actively request headers from a single peer, unless we're close to today. if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { state.fSyncStarted = true; diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index da817aea3b0..5a2744d5537 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -25,6 +25,9 @@ std::atomic_bool fReindex(false); bool fHavePruned = false; bool fPruneMode = false; uint64_t nPruneTarget = 0; +bool fTrimHeaders = false; +uint64_t nMustKeepFullHeaders = std::numeric_limits::max(); +uint64_t nHeaderDownloadBuffer = std::numeric_limits::max(); // TODO make namespace { RecursiveMutex cs_LastBlockFile; @@ -409,6 +412,17 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus return true; } +bool ReadBlockHeaderFromDisk(CBlockHeader& header, const CBlockIndex* pindex, const Consensus::Params& consensusParams) +{ + // Not very efficient: read a block and throw away all but the header. + CBlock tmp; + if (!ReadBlockFromDisk(tmp, pindex, consensusParams)) { + return false; + } + header = tmp.GetBlockHeader(); + return true; +} + bool ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start) { FlatFilePos hpos = pos; diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 7c7bf681788..df582b06d1b 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -44,6 +44,13 @@ extern bool fHavePruned; extern bool fPruneMode; /** Number of MiB of block files that we're trying to stay below. */ extern uint64_t nPruneTarget; +/** True if we're running in -trim_headers mode. */ +extern bool fTrimHeaders; +/** Minimum number of full untrimmed headers to keep, for blocks we have. */ +extern uint64_t nMustKeepFullHeaders; +/** Target number of headers to download beyond the blocks we have. */ +// NOTE: this currently only operates when in header trim mode, but it's really independent of that. +extern uint64_t nHeaderDownloadBuffer; //! Check whether the block associated with this index entry is pruned or not. bool IsBlockPruned(const CBlockIndex* pblockindex); @@ -71,6 +78,8 @@ bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::P bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams); bool ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start); bool ReadRawBlockFromDisk(std::vector& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start); +// ELEMENTS: +bool ReadBlockHeaderFromDisk(class CBlockHeader& header, const CBlockIndex* pindex, const Consensus::Params& consensusParams); bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex); bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams); diff --git a/src/pegins.cpp b/src/pegins.cpp index e9a7b6c74eb..dc133d1fbce 100644 --- a/src/pegins.cpp +++ b/src/pegins.cpp @@ -487,8 +487,8 @@ std::vector> GetValidFedpegScripts(const CBlockIndex break; } - if (!p_epoch_start->dynafed_params.IsNull()) { - fedpegscripts.push_back(std::make_pair(p_epoch_start->dynafed_params.m_current.m_fedpeg_program, p_epoch_start->dynafed_params.m_current.m_fedpegscript)); + if (!p_epoch_start->dynafed_params().IsNull()) { + fedpegscripts.push_back(std::make_pair(p_epoch_start->dynafed_params().m_current.m_fedpeg_program, p_epoch_start->dynafed_params().m_current.m_fedpegscript)); } else { fedpegscripts.push_back(std::make_pair(GetScriptForDestination(ScriptHash(GetScriptForDestination(WitnessV0ScriptHash(params.fedpegScript)))), params.fedpegScript)); } diff --git a/src/rest.cpp b/src/rest.cpp index 7453e966db5..519412e2287 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -221,7 +221,13 @@ static bool rest_headers(const std::any& context, case RetFormat::BINARY: { CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { - ssHeader << pindex->GetBlockHeader(); + if (pindex->trimmed()) { + CBlockHeader tmp; + ReadBlockHeaderFromDisk(tmp, pindex, Params().GetConsensus()); + ssHeader << tmp; + } else { + ssHeader << pindex->GetBlockHeader(); + } } std::string binaryHeader = ssHeader.str(); @@ -233,7 +239,14 @@ static bool rest_headers(const std::any& context, case RetFormat::HEX: { CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { - ssHeader << pindex->GetBlockHeader(); + if (pindex->trimmed()) { + CBlockHeader tmp; + ReadBlockHeaderFromDisk(tmp, pindex, Params().GetConsensus()); + ssHeader << tmp; + + } else { + ssHeader << pindex->GetBlockHeader(); + } } std::string strHex = HexStr(ssHeader) + "\n"; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index b5c85aa77ba..32977c89997 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -248,13 +248,26 @@ UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex result.pushKV("difficulty", GetDifficulty(blockindex)); result.pushKV("chainwork", blockindex->nChainWork.GetHex()); } else { - if (blockindex->dynafed_params.IsNull()) { - result.pushKV("signblock_witness_asm", ScriptToAsmStr(blockindex->proof.solution)); - result.pushKV("signblock_witness_hex", HexStr(blockindex->proof.solution)); - result.pushKV("signblock_challenge", HexStr(blockindex->proof.challenge)); + if (blockindex->dynafed_params().IsNull()) { + if (blockindex->trimmed()) { + result.pushKV("signblock_witness_asm", ""); + result.pushKV("signblock_witness_hex", ""); + result.pushKV("signblock_challenge", ""); + result.pushKV("warning", "Fields missing due to -trim_headers flag."); + } else { + result.pushKV("signblock_witness_asm", ScriptToAsmStr(blockindex->get_proof().solution)); + result.pushKV("signblock_witness_hex", HexStr(blockindex->get_proof().solution)); + result.pushKV("signblock_challenge", HexStr(blockindex->get_proof().challenge)); + } } else { - result.pushKV("signblock_witness_hex", EncodeHexScriptWitness(blockindex->m_signblock_witness)); - result.pushKV("dynamic_parameters", dynaParamsToJSON(blockindex->dynafed_params)); + if (blockindex->trimmed()) { + result.pushKV("signblock_witness_hex", ""); + result.pushKV("dynamic_parameters", ""); + result.pushKV("warning", "Fields missing due to -trim_headers flag."); + } else { + result.pushKV("signblock_witness_hex", EncodeHexScriptWitness(blockindex->signblock_witness())); + result.pushKV("dynamic_parameters", dynaParamsToJSON(blockindex->dynafed_params())); + } } } result.pushKV("nTx", (uint64_t)blockindex->nTx); @@ -267,7 +280,13 @@ UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIndex* blockindex, bool txDetails) { - UniValue result = blockheaderToJSON(tip, blockindex); + UniValue result; + if (blockindex->trimmed()) { + CBlockIndex tmp = CBlockIndex(block.GetBlockHeader()); + result = blockheaderToJSON(tip, &tmp); + } else { + result = blockheaderToJSON(tip, blockindex); + } result.pushKV("strippedsize", (int)::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS)); result.pushKV("size", (int)::GetSerializeSize(block, PROTOCOL_VERSION)); @@ -969,7 +988,13 @@ static RPCHelpMan getblockheader() if (!fVerbose) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); - ssBlock << pblockindex->GetBlockHeader(); + if (pblockindex->trimmed()) { + CBlockHeader tmp; + ReadBlockHeaderFromDisk(tmp, pblockindex, Params().GetConsensus()); + ssBlock << tmp; + } else { + ssBlock << pblockindex->GetBlockHeader(); + } std::string strHex = HexStr(ssBlock); return strHex; } diff --git a/src/txdb.cpp b/src/txdb.cpp index f616ab3288d..ae85840111b 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -307,12 +307,50 @@ bool CBlockTreeDB::WritePAKList(const std::vector >& return Write(std::make_pair(DB_PAK, uint256S("1")), offline_list) && Write(std::make_pair(DB_PAK, uint256S("2")), online_list) && Write(std::make_pair(DB_PAK, uint256S("3")), reject); } -bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function insertBlockIndex) +/** Note that we only get a conservative (lower) estimate of the max header height here, + * obtained by sampling the first 10,000 headers on disk (which are in random order) and + * taking the highest block we see. */ +bool CBlockTreeDB::WalkBlockIndexGutsForMaxHeight(int* nHeight) { + std::unique_ptr pcursor(NewIterator()); + *nHeight = 0; + int i = 0; + pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); + while (pcursor->Valid()) { + if (ShutdownRequested()) return false; + std::pair key; + if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) { + i++; + if (i > 10'000) { + // Under the (accurate) assumption that the headers on disk are effectively in random height order, + // we have a good-enough (conservative) estimate of the max height very quickly, and don't need to + // waste more time. Shortcutting like this will cause us to keep a few extra headers, which is fine. + break; + } + CDiskBlockIndex diskindex; + if (pcursor->GetValue(diskindex)) { + if (diskindex.nHeight > *nHeight) { + *nHeight = diskindex.nHeight; + } + pcursor->Next(); + } else { + return error("%s: failed to read value", __func__); + } + } else { + break; + } + } + return true; +} + +bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function insertBlockIndex, int trimBelowHeight) { std::unique_ptr pcursor(NewIterator()); pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); + int n_untrimmed = 0; + int n_total = 0; + // Load m_block_index while (pcursor->Valid()) { if (ShutdownRequested()) return false; @@ -332,19 +370,27 @@ bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, pindexNew->nTime = diskindex.nTime; pindexNew->nBits = diskindex.nBits; pindexNew->nNonce = diskindex.nNonce; - pindexNew->proof = diskindex.proof; pindexNew->nStatus = diskindex.nStatus; pindexNew->nTx = diskindex.nTx; - pindexNew->dynafed_params = diskindex.dynafed_params; - pindexNew->m_signblock_witness = diskindex.m_signblock_witness; - - const uint256 block_hash = pindexNew->GetBlockHash(); - // Only validate one of every 1000 block header for sanity check - if (pindexNew->nHeight % 1000 == 0 && - block_hash != consensusParams.hashGenesisBlock && - !CheckProof(pindexNew->GetBlockHeader(), consensusParams)) { - return error("%s: CheckProof: %s, %s", __func__, block_hash.ToString(), pindexNew->ToString()); + + n_total++; + if (diskindex.nHeight >= trimBelowHeight) { + n_untrimmed++; + pindexNew->proof = diskindex.proof; + pindexNew->m_dynafed_params = diskindex.m_dynafed_params; + pindexNew->m_signblock_witness = diskindex.m_signblock_witness; + + const uint256 block_hash = pindexNew->GetBlockHash(); + // Only validate one of every 1000 block header for sanity check + if (pindexNew->nHeight % 1000 == 0 && + block_hash != consensusParams.hashGenesisBlock && + !CheckProof(pindexNew->GetBlockHeader(), consensusParams)) { + return error("%s: CheckProof: %s, %s", __func__, block_hash.ToString(), pindexNew->ToString()); + } + } else { + pindexNew->m_trimmed = true; } + pcursor->Next(); } else { return error("%s: failed to read value", __func__); @@ -354,6 +400,7 @@ bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, } } + LogPrintf("LoadBlockIndexGuts: loaded %d total / %d untrimmed (fully in-memory) headers\n", n_total, n_untrimmed); return true; } diff --git a/src/txdb.h b/src/txdb.h index 7756026c105..9d2461d4732 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -85,8 +85,9 @@ class CBlockTreeDB : public CDBWrapper void ReadReindexing(bool &fReindexing); bool WriteFlag(const std::string &name, bool fValue); bool ReadFlag(const std::string &name, bool &fValue); - bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function insertBlockIndex); + bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function insertBlockIndex, int trimBelowHeight); // ELEMENTS: + bool WalkBlockIndexGutsForMaxHeight(int* nHeight); bool ReadPAKList(std::vector >& offline_list, std::vector >& online_list, bool& reject); bool WritePAKList(const std::vector >& offline_list, const std::vector >& online_list, bool reject); }; diff --git a/src/validation.cpp b/src/validation.cpp index c96515f3d71..d11b3e0d2d2 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -76,7 +76,7 @@ static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000; /** Maximum kilobytes for transactions to store for processing during reorg */ static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000; /** Time to wait between writing blocks/block index to disk. */ -static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1}; +static constexpr std::chrono::minutes DATABASE_WRITE_INTERVAL{5}; /** Time to wait between flushing chainstate to disk. */ static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24}; /** Maximum age of our tip for us to be considered current for fee estimation */ @@ -2346,6 +2346,7 @@ bool CChainState::FlushStateToDisk( } std::vector vBlocks; vBlocks.reserve(setDirtyBlockIndex.size()); + std::set setTrimmableBlockIndex(setDirtyBlockIndex); for (std::set::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { vBlocks.push_back(*it); setDirtyBlockIndex.erase(it++); @@ -2353,6 +2354,32 @@ bool CChainState::FlushStateToDisk( if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { return AbortNode(state, "Failed to write to block index database"); } + + if (fTrimHeaders) { + LogPrintf("Flushing block index, trimming headers, setTrimmableBlockIndex.size(): %d\n", setTrimmableBlockIndex.size()); + int trim_height = m_chain.Height() - nMustKeepFullHeaders; + int min_height = std::numeric_limits::max(); + CBlockIndex* min_index = nullptr; + for (std::set::iterator it = setTrimmableBlockIndex.begin(); it != setTrimmableBlockIndex.end(); it++) { + (*it)->assert_untrimmed(); + if ((*it)->nHeight < trim_height) { + (*it)->trim(); + if ((*it)->nHeight < min_height) { + min_height = (*it)->nHeight; + min_index = *it; + } + } + } + + // Handle any remaining untrimmed blocks that were too recent for trimming last time we flushed. + if (min_index) { + min_index = min_index->pprev; + while (min_index && !min_index->trimmed()) { + min_index->trim(); + min_index = min_index->pprev; + } + } + } } // Finally remove any pruned files if (fFlushForPrune) { @@ -2464,13 +2491,13 @@ void CChainState::UpdateTip(const CBlockIndex* pindexNew) !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : ""); // Do some logging if dynafed parameters changed. - if (pindexNew->pprev && !pindexNew->dynafed_params.IsNull()) { + if (pindexNew->pprev && !pindexNew->dynafed_params().IsNull()) { int height = pindexNew->nHeight; uint256 hash = pindexNew->GetBlockHash(); - uint256 root = pindexNew->dynafed_params.m_current.CalculateRoot(); - if (pindexNew->pprev->dynafed_params.IsNull()) { + uint256 root = pindexNew->dynafed_params().m_current.CalculateRoot(); + if (pindexNew->pprev->dynafed_params().IsNull()) { LogPrintf("Dynafed activated in block %d:%s: %s\n", height, hash.GetHex(), root.GetHex()); - } else if (root != pindexNew->pprev->dynafed_params.m_current.CalculateRoot()) { + } else if (root != pindexNew->pprev->dynafed_params().m_current.CalculateRoot()) { LogPrintf("New dynafed parameters activated in block %d:%s: %s\n", height, hash.GetHex(), root.GetHex()); } } @@ -4110,7 +4137,19 @@ bool BlockManager::LoadBlockIndex( CBlockTreeDB& blocktree, std::set& block_index_candidates) { - if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) + int trim_below_height = 0; + if (fTrimHeaders) { + int max_height = 0; + if (!blocktree.WalkBlockIndexGutsForMaxHeight(&max_height)) { + LogPrintf("LoadBlockIndex: Failed to WalkBlockIndexGutsForMaxHeight.\n"); + return false; + } + + int must_keep_headers = (consensus_params.total_valid_epochs + 2) * consensus_params.dynamic_epoch_length; + int extra_headers_buffer = consensus_params.dynamic_epoch_length * 2; // XXX arbitrary + trim_below_height = max_height - must_keep_headers - extra_headers_buffer; + } + if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, trim_below_height)) return false; // Calculate nChainWork diff --git a/src/validation.h b/src/validation.h index 7f04f1a7409..f4ebc713d9d 100644 --- a/src/validation.h +++ b/src/validation.h @@ -649,6 +649,7 @@ class CChainState //! @returns A reference to the in-memory cache of the UTXO set. CCoinsViewCache& CoinsTip() EXCLUSIVE_LOCKS_REQUIRED(cs_main) { + assert(m_coins_views); assert(m_coins_views->m_cacheview); return *m_coins_views->m_cacheview.get(); } diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh index 737d35a397e..c4ab41d4e26 100755 --- a/test/lint/lint-locale-dependence.sh +++ b/test/lint/lint-locale-dependence.sh @@ -40,9 +40,11 @@ export LC_ALL=C KNOWN_VIOLATIONS=( "src/bitcoin-tx.cpp.*stoul" "src/bitcoin-tx.cpp.*trim_right" + "src/chain.h.*trim" "src/dbwrapper.cpp.*stoul" "src/dbwrapper.cpp:.*vsnprintf" "src/httprpc.cpp.*trim" + "src/init.cpp.*trim" "src/node/blockstorage.cpp:.*atoi" "src/qt/rpcconsole.cpp:.*atoi" "src/rest.cpp:.*strtol"