Skip to content

Improve elements memory usage #1190

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 16 commits into from
Jan 26, 2023
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/block_proof.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
bool CheckChallenge(const CBlockHeader& block, const CBlockIndex& indexLast, const Consensus::Params& params)
{
if (g_signed_blocks) {
return block.proof.challenge == indexLast.proof.challenge;
return block.proof.challenge == indexLast.get_proof().challenge;
} else {
return block.nBits == GetNextWorkRequired(&indexLast, &block, params);
}
Expand Down
79 changes: 64 additions & 15 deletions src/chain.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,10 +192,51 @@ class CBlockIndex
uint32_t nTime{0};
uint32_t nBits{0};
uint32_t nNonce{0};
CProof proof{};

protected:
std::optional<CProof> proof{};
// Dynamic federation fields
DynaFedParams dynafed_params{};
CScriptWitness m_signblock_witness{};
std::optional<DynaFedParams> m_dynafed_params{};
std::optional<CScriptWitness> m_signblock_witness{};

bool m_trimmed{false};

friend class CBlockTreeDB;

public:

// Irrevocably remove blocksigning and dynafed-related stuff from this
// in-memory copy of the block header.
void trim() {
assert_untrimmed();
m_trimmed = true;
proof = std::nullopt;
m_dynafed_params = std::nullopt;
m_signblock_witness = std::nullopt;
}

bool trimmed() const {
return m_trimmed;
}

void assert_untrimmed() const {
assert(!m_trimmed);
}

const CProof& get_proof() const {
assert_untrimmed();
return proof.value();
}

const DynaFedParams& dynafed_params() const {
assert_untrimmed();
return m_dynafed_params.value();
}

const CScriptWitness& signblock_witness() const {
assert_untrimmed();
return m_signblock_witness.value();
}

//! (memory only) Sequential id assigned to distinguish order in which blocks are received.
int32_t nSequenceId{0};
Expand All @@ -214,7 +255,7 @@ class CBlockIndex
nBits{block.nBits},
nNonce{block.nNonce},
proof{block.proof},
dynafed_params{block.m_dynafed_params},
m_dynafed_params{block.m_dynafed_params},
m_signblock_witness{block.m_signblock_witness}
{
}
Expand All @@ -239,6 +280,7 @@ class CBlockIndex

CBlockHeader GetBlockHeader() const
{
assert_untrimmed();
CBlockHeader block;
block.nVersion = nVersion;
if (pprev)
Expand All @@ -250,9 +292,9 @@ class CBlockIndex
}
block.nBits = nBits;
block.nNonce = nNonce;
block.proof = proof;
block.m_dynafed_params = dynafed_params;
block.m_signblock_witness = m_signblock_witness;
block.proof = proof.value();
block.m_dynafed_params = m_dynafed_params.value();
block.m_signblock_witness = m_signblock_witness.value();
return block;
}

Expand Down Expand Up @@ -366,12 +408,12 @@ class CDiskBlockIndex : public CBlockIndex
nVersion = ~CBlockHeader::DYNAFED_HF_MASK & nVersion;
return is_dyna;
} else {
return !dynafed_params.IsNull();
return !dynafed_params().IsNull();
}
}
bool RemoveDynaFedMaskOnSerialize(bool for_read) const {
assert(!for_read);
return !dynafed_params.IsNull();
return !dynafed_params().IsNull();
}

SERIALIZE_METHODS(CDiskBlockIndex, obj)
Expand All @@ -394,7 +436,7 @@ class CDiskBlockIndex : public CBlockIndex
READWRITE(obj.nVersion);
} else {
int32_t nVersion = obj.nVersion;
if (!obj.dynafed_params.IsNull()) {
if (!obj.dynafed_params().IsNull()) {
nVersion |= CBlockHeader::DYNAFED_HF_MASK;
}
READWRITE(nVersion);
Expand All @@ -404,13 +446,19 @@ class CDiskBlockIndex : public CBlockIndex
READWRITE(obj.hashPrev);
READWRITE(obj.hashMerkleRoot);
READWRITE(obj.nTime);

// Allocate objects in the optional<> fields when reading, since READWRITE will not do this
SER_READ(obj, obj.m_dynafed_params = DynaFedParams());
SER_READ(obj, obj.m_signblock_witness = CScriptWitness());
SER_READ(obj, obj.proof = CProof());

// For compatibility with elements 0.14 based chains
if (g_signed_blocks) {
if (is_dyna) {
READWRITE(obj.dynafed_params);
READWRITE(obj.m_signblock_witness.stack);
READWRITE(obj.m_dynafed_params.value());
READWRITE(obj.m_signblock_witness.value().stack);
} else {
READWRITE(obj.proof);
READWRITE(obj.proof.value());
}
} else {
READWRITE(obj.nBits);
Expand All @@ -420,6 +468,7 @@ class CDiskBlockIndex : public CBlockIndex

uint256 GetBlockHash() const
{
assert_untrimmed();
CBlockHeader block;
block.nVersion = nVersion;
block.hashPrevBlock = hashPrev;
Expand All @@ -430,8 +479,8 @@ class CDiskBlockIndex : public CBlockIndex
}
block.nBits = nBits;
block.nNonce = nNonce;
block.proof = proof;
block.m_dynafed_params = dynafed_params;
block.proof = proof.value();
block.m_dynafed_params = m_dynafed_params.value();
return block.GetHash();
}

Expand Down
10 changes: 5 additions & 5 deletions src/dynafed.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consens
for (int32_t height = next_height - 1; height >= (int32_t)(next_height - consensus.dynamic_epoch_length); --height) {
const CBlockIndex* p_epoch_walk = pindexPrev->GetAncestor(height);
assert(p_epoch_walk);
const DynaFedParamEntry& proposal = p_epoch_walk->dynafed_params.m_proposed;
const DynaFedParamEntry& proposal = p_epoch_walk->dynafed_params().m_proposed;
const uint256 proposal_root = proposal.CalculateRoot();
vote_tally[proposal_root]++;
// Short-circuit once 4/5 threshold is reached
Expand Down Expand Up @@ -56,13 +56,13 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde
// may be pre-dynafed params
const CBlockIndex* p_epoch_start = pindexPrev->GetAncestor(epoch_start_height);
assert(p_epoch_start);
if (p_epoch_start->dynafed_params.IsNull()) {
if (p_epoch_start->dynafed_params().IsNull()) {
// We need to construct the "full" current parameters of pre-dynafed
// consensus

// Convert signblockscript to P2WSH
uint256 signblock_witness_program;
CSHA256().Write(p_epoch_start->proof.challenge.data(), p_epoch_start->proof.challenge.size()).Finalize(signblock_witness_program.begin());
CSHA256().Write(p_epoch_start->get_proof().challenge.data(), p_epoch_start->get_proof().challenge.size()).Finalize(signblock_witness_program.begin());
CScript p2wsh_signblock_script = CScript() << OP_0 << ToByteVector(signblock_witness_program);

// Make P2SH-P2WSH-ness of non-dynafed fedpegscript explicit
Expand All @@ -75,7 +75,7 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde
// Put them in winning proposal
winning_proposal = DynaFedParamEntry(p2wsh_signblock_script, consensus.max_block_signature_size, sh_wsh_fedpeg_program, consensus.fedpegScript, consensus.first_extension_space);
} else {
winning_proposal = p_epoch_start->dynafed_params.m_current;
winning_proposal = p_epoch_start->dynafed_params().m_current;
}
return winning_proposal;
}
Expand All @@ -93,7 +93,7 @@ DynaFedParamEntry ComputeNextBlockCurrentParameters(const CBlockIndex* pindexPre

// Return appropriate format based on epoch age or if we *just* activated
// dynafed via BIP9
if (epoch_age == 0 || pindexPrev->dynafed_params.IsNull()) {
if (epoch_age == 0 || pindexPrev->dynafed_params().IsNull()) {
return entry;
} else {
return DynaFedParamEntry(entry.m_signblockscript, entry.m_signblock_witness_limit, entry.CalculateExtraRoot());
Expand Down
25 changes: 24 additions & 1 deletion src/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,7 @@ void SetupServerArgs(ArgsManager& argsman)
hidden_args.emplace_back("-sysperms");
#endif
argsman.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-trim_headers", strprintf("Trim old headers in memory, removing blocksigning and dynafed-related fields. Saves memory, but blocks us from serving blocks or headers to peers, and removes trimmed fields from some JSON RPC outputs. (default: false)"), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-blockfilterindex=<type>",
strprintf("Maintain an index of compact filters by block (default: %s, values: %s).", DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) +
" If <type> is not supplied or if <type> = 1, indexes for all known types are enabled.",
Expand Down Expand Up @@ -979,6 +980,23 @@ bool AppInitParameterInteraction(const ArgsManager& args)
fPruneMode = true;
}

if (args.IsArgSet("-trim_headers")) {
LogPrintf("Configured for header-trimming mode. This will reduce memory usage substantially, but we will be unable to serve as a full P2P peer, and certain header fields may be missing from JSON RPC output.\n");
fTrimHeaders = true;
// This calculation is driven by GetValidFedpegScripts in pegins.cpp, which walks the chain
// back to current epoch start, and then an additional total_valid_epochs on top of that.
// We add one epoch here for the current partial epoch, and then another one for good luck.
// NB: If we're non-dynafed, then:
// - total_valid_epochs = 1
// - dynamic_epoch_length = std::numeric_limits<uint32_t>::max()
// So this will work out to an unhelpfully-large number. XXX: Is this a problem?
nMustKeepFullHeaders = (chainparams.GetConsensus().total_valid_epochs + 2) * (chainparams.GetConsensus().dynamic_epoch_length);
// This is the number of headers we can have in flight downloading at a time, beyond the
// set of blocks we've already validated. Capping this is necessary to keep memory usage
// bounded during IBD.
}
nHeaderDownloadBuffer = chainparams.GetConsensus().dynamic_epoch_length * 2;

nConnectTimeout = args.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
if (nConnectTimeout <= 0) {
nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
Expand Down Expand Up @@ -1690,7 +1708,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)

// if pruning, unset the service bit and perform the initial blockstore prune
// after any wallet rescanning has taken place.
if (fPruneMode) {
if (fPruneMode || fTrimHeaders) {
LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
if (!fReindex) {
Expand All @@ -1702,6 +1720,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
}
}

if (fTrimHeaders) {
LogPrintf("Unsetting NODE_NETWORK_LIMITED on header trim mode\n");
nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK_LIMITED);
}

if (DeploymentEnabled(chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
// Advertise witness capabilities.
// The option to not set NODE_WITNESS is only used in the tests and should be removed.
Expand Down
2 changes: 1 addition & 1 deletion src/miner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

void ResetChallenge(CBlockHeader& block, const CBlockIndex& indexLast, const Consensus::Params& params)
{
block.proof.challenge = indexLast.proof.challenge;
block.proof.challenge = indexLast.get_proof().challenge;
}

void ResetProof(CBlockHeader& block)
Expand Down
63 changes: 53 additions & 10 deletions src/net_processing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
/** Headers download timeout.
* Timeout = base + per_header * (expected number of headers) */
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 2ms;
/** Protect at least this many outbound peers from disconnection due to slow/
* behind headers chain.
*/
Expand Down Expand Up @@ -2084,14 +2084,47 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
nodestate->m_last_block_announcement = GetTime();
}

if (nCount == MAX_HEADERS_RESULTS && !all_duplicate) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue
// from there instead.
// HOWEVER, if all headers we got this time were duplicates that we already had, don't ask for any more.
LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256()));
uint64_t headers_ahead = pindexLast->nHeight - m_chainman.ActiveHeight();
bool got_enough_headers = fTrimHeaders && (headers_ahead >= nHeaderDownloadBuffer);

// If a peer gives us as many headers as possible, this is implicitly a signal that the
// peer has more headers to send us. In Bitcoin Core, the node always asks for more
// headers at this point. Our logic is slightly more complex, because:
// (1) There is an apparent bug in the Bitcoin Core state machine here, where we can
// end up downloading headers from lots of peers at the same time by accident, which we
// work around rather than truly fix;
// (2) For various reasons we may want to avoid letting the header downloads get "too
// far ahead" of block downloads, so we may pause syncing for that reasons.
if (nCount == MAX_HEADERS_RESULTS) {
if (all_duplicate && !nodestate->fSyncStarted) {
// In this case two things are true:
// 1) This node's most recent batch of headers only included ones we already had.
// 2) We don't have this node marked as a peer to header-sync from.
// This happens when some exogenous event, like an INV of a new block, causes us
// to ask a peer for an unbounded number of headers, when we're already in the
// process of downloading the headers from a different peer.
// In this case the right thing to do is simply stop syncing headers from this
// peer; it's redundant. Here we do nothing; since we don't ask the peer for
// more headers, it will stop sending them.
} else if (got_enough_headers) {
// If we're trying to save memory on headers, and we've already got plenty of headers,
// pause until we're ready for more.
LogPrint(BCLog::NET, "Pausing header sync from peer=%d, because the last one was too far ahead of block sync (%d >> %d)\n", pfrom.GetId(), pindexLast->nHeight, m_chainman.ActiveHeight());
if (nodestate->fSyncStarted) {
// Cancel sync from this node, so we don't penalize it later.
// This will cause us to automatically start syncing from a different node (or restart syncing from the same node) later,
// if we still need to sync headers.
nSyncStarted--;
nodestate->fSyncStarted = false;
nodestate->m_headers_sync_timeout = 0us;
}
} else {
// TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue
// from there instead.
LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256()));
}
}

// If this set of headers is valid and ends in a block with at least as
Expand Down Expand Up @@ -3133,10 +3166,17 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
{
if (pindex->trimmed()) {
// For simplicity, if any of the headers they're asking for are trimmed,
// just drop the request.
LogPrint(BCLog::NET, "%s: ignoring getheaders from peer=%i which would return at least one trimmed header\n", __func__, pfrom.GetId());
return;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is for the GETHEADERS message.

Does the GETBLOCK message still work fine if the headers are not in memory? I have not tested it. It doesn't appear that this call needs in-memory headers to find blocks from the index, but I just want to confirm.

Copy link
Contributor Author

@gwillen gwillen Jan 25, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I haven't tested this specific case other than by running against the live network. But I systematically checked for references to the trimmed fields, so I should have caught this if it were an issue. Looking at it now, I can see that it never calls GetBlockHeader (which is the usual suspect) or any of the other offending methods -- it calls ReadBlockFromDisk, which is safe.

}
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}

// pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
// if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
// headers message). In both cases it's safe to update
Expand Down Expand Up @@ -4476,7 +4516,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (pindexBestHeader == nullptr)
pindexBestHeader = m_chainman.ActiveChain().Tip();
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do.
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
uint64_t headers_ahead = pindexBestHeader->nHeight - m_chainman.ActiveHeight();
// ELEMENTS: Only download if our headers aren't "too far ahead" of our blocks.
bool got_enough_headers = fTrimHeaders && (headers_ahead >= nHeaderDownloadBuffer);
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex && !got_enough_headers) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
state.fSyncStarted = true;
Expand Down
14 changes: 14 additions & 0 deletions src/node/blockstorage.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ std::atomic_bool fReindex(false);
bool fHavePruned = false;
bool fPruneMode = false;
uint64_t nPruneTarget = 0;
bool fTrimHeaders = false;
uint64_t nMustKeepFullHeaders = std::numeric_limits<uint64_t>::max();
uint64_t nHeaderDownloadBuffer = std::numeric_limits<uint64_t>::max();

// TODO make namespace {
RecursiveMutex cs_LastBlockFile;
Expand Down Expand Up @@ -409,6 +412,17 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus
return true;
}

bool ReadBlockHeaderFromDisk(CBlockHeader& header, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
{
// Not very efficient: read a block and throw away all but the header.
CBlock tmp;
if (!ReadBlockFromDisk(tmp, pindex, consensusParams)) {
return false;
}
header = tmp.GetBlockHeader();
return true;
}

bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start)
{
FlatFilePos hpos = pos;
Expand Down
Loading