Skip to content

Improve elements memory usage #1190

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 16 commits into from
Jan 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/block_proof.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
bool CheckChallenge(const CBlockHeader& block, const CBlockIndex& indexLast, const Consensus::Params& params)
{
if (g_signed_blocks) {
return block.proof.challenge == indexLast.proof.challenge;
return block.proof.challenge == indexLast.get_proof().challenge;
} else {
return block.nBits == GetNextWorkRequired(&indexLast, &block, params);
}
Expand Down
79 changes: 64 additions & 15 deletions src/chain.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,10 +192,51 @@ class CBlockIndex
uint32_t nTime{0};
uint32_t nBits{0};
uint32_t nNonce{0};
CProof proof{};

protected:
std::optional<CProof> proof{};
// Dynamic federation fields
DynaFedParams dynafed_params{};
CScriptWitness m_signblock_witness{};
std::optional<DynaFedParams> m_dynafed_params{};
std::optional<CScriptWitness> m_signblock_witness{};

bool m_trimmed{false};

friend class CBlockTreeDB;

public:

// Irrevocably remove blocksigning and dynafed-related stuff from this
// in-memory copy of the block header.
void trim() {
assert_untrimmed();
m_trimmed = true;
proof = std::nullopt;
m_dynafed_params = std::nullopt;
m_signblock_witness = std::nullopt;
}

bool trimmed() const {
return m_trimmed;
}

void assert_untrimmed() const {
assert(!m_trimmed);
}

const CProof& get_proof() const {
assert_untrimmed();
return proof.value();
}

const DynaFedParams& dynafed_params() const {
assert_untrimmed();
return m_dynafed_params.value();
}

const CScriptWitness& signblock_witness() const {
assert_untrimmed();
return m_signblock_witness.value();
}

//! (memory only) Sequential id assigned to distinguish order in which blocks are received.
int32_t nSequenceId{0};
Expand All @@ -214,7 +255,7 @@ class CBlockIndex
nBits{block.nBits},
nNonce{block.nNonce},
proof{block.proof},
dynafed_params{block.m_dynafed_params},
m_dynafed_params{block.m_dynafed_params},
m_signblock_witness{block.m_signblock_witness}
{
}
Expand All @@ -239,6 +280,7 @@ class CBlockIndex

CBlockHeader GetBlockHeader() const
{
assert_untrimmed();
CBlockHeader block;
block.nVersion = nVersion;
if (pprev)
Expand All @@ -250,9 +292,9 @@ class CBlockIndex
}
block.nBits = nBits;
block.nNonce = nNonce;
block.proof = proof;
block.m_dynafed_params = dynafed_params;
block.m_signblock_witness = m_signblock_witness;
block.proof = proof.value();
block.m_dynafed_params = m_dynafed_params.value();
block.m_signblock_witness = m_signblock_witness.value();
return block;
}

Expand Down Expand Up @@ -366,12 +408,12 @@ class CDiskBlockIndex : public CBlockIndex
nVersion = ~CBlockHeader::DYNAFED_HF_MASK & nVersion;
return is_dyna;
} else {
return !dynafed_params.IsNull();
return !dynafed_params().IsNull();
}
}
bool RemoveDynaFedMaskOnSerialize(bool for_read) const {
assert(!for_read);
return !dynafed_params.IsNull();
return !dynafed_params().IsNull();
}

SERIALIZE_METHODS(CDiskBlockIndex, obj)
Expand All @@ -394,7 +436,7 @@ class CDiskBlockIndex : public CBlockIndex
READWRITE(obj.nVersion);
} else {
int32_t nVersion = obj.nVersion;
if (!obj.dynafed_params.IsNull()) {
if (!obj.dynafed_params().IsNull()) {
nVersion |= CBlockHeader::DYNAFED_HF_MASK;
}
READWRITE(nVersion);
Expand All @@ -404,13 +446,19 @@ class CDiskBlockIndex : public CBlockIndex
READWRITE(obj.hashPrev);
READWRITE(obj.hashMerkleRoot);
READWRITE(obj.nTime);

// Allocate objects in the optional<> fields when reading, since READWRITE will not do this
SER_READ(obj, obj.m_dynafed_params = DynaFedParams());
SER_READ(obj, obj.m_signblock_witness = CScriptWitness());
SER_READ(obj, obj.proof = CProof());

// For compatibility with elements 0.14 based chains
if (g_signed_blocks) {
if (is_dyna) {
READWRITE(obj.dynafed_params);
READWRITE(obj.m_signblock_witness.stack);
READWRITE(obj.m_dynafed_params.value());
READWRITE(obj.m_signblock_witness.value().stack);
} else {
READWRITE(obj.proof);
READWRITE(obj.proof.value());
}
} else {
READWRITE(obj.nBits);
Expand All @@ -420,6 +468,7 @@ class CDiskBlockIndex : public CBlockIndex

uint256 GetBlockHash() const
{
assert_untrimmed();
CBlockHeader block;
block.nVersion = nVersion;
block.hashPrevBlock = hashPrev;
Expand All @@ -430,8 +479,8 @@ class CDiskBlockIndex : public CBlockIndex
}
block.nBits = nBits;
block.nNonce = nNonce;
block.proof = proof;
block.m_dynafed_params = dynafed_params;
block.proof = proof.value();
block.m_dynafed_params = m_dynafed_params.value();
return block.GetHash();
}

Expand Down
10 changes: 5 additions & 5 deletions src/dynafed.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consens
for (int32_t height = next_height - 1; height >= (int32_t)(next_height - consensus.dynamic_epoch_length); --height) {
const CBlockIndex* p_epoch_walk = pindexPrev->GetAncestor(height);
assert(p_epoch_walk);
const DynaFedParamEntry& proposal = p_epoch_walk->dynafed_params.m_proposed;
const DynaFedParamEntry& proposal = p_epoch_walk->dynafed_params().m_proposed;
const uint256 proposal_root = proposal.CalculateRoot();
vote_tally[proposal_root]++;
// Short-circuit once 4/5 threshold is reached
Expand Down Expand Up @@ -56,13 +56,13 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde
// may be pre-dynafed params
const CBlockIndex* p_epoch_start = pindexPrev->GetAncestor(epoch_start_height);
assert(p_epoch_start);
if (p_epoch_start->dynafed_params.IsNull()) {
if (p_epoch_start->dynafed_params().IsNull()) {
// We need to construct the "full" current parameters of pre-dynafed
// consensus

// Convert signblockscript to P2WSH
uint256 signblock_witness_program;
CSHA256().Write(p_epoch_start->proof.challenge.data(), p_epoch_start->proof.challenge.size()).Finalize(signblock_witness_program.begin());
CSHA256().Write(p_epoch_start->get_proof().challenge.data(), p_epoch_start->get_proof().challenge.size()).Finalize(signblock_witness_program.begin());
CScript p2wsh_signblock_script = CScript() << OP_0 << ToByteVector(signblock_witness_program);

// Make P2SH-P2WSH-ness of non-dynafed fedpegscript explicit
Expand All @@ -75,7 +75,7 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde
// Put them in winning proposal
winning_proposal = DynaFedParamEntry(p2wsh_signblock_script, consensus.max_block_signature_size, sh_wsh_fedpeg_program, consensus.fedpegScript, consensus.first_extension_space);
} else {
winning_proposal = p_epoch_start->dynafed_params.m_current;
winning_proposal = p_epoch_start->dynafed_params().m_current;
}
return winning_proposal;
}
Expand All @@ -93,7 +93,7 @@ DynaFedParamEntry ComputeNextBlockCurrentParameters(const CBlockIndex* pindexPre

// Return appropriate format based on epoch age or if we *just* activated
// dynafed via BIP9
if (epoch_age == 0 || pindexPrev->dynafed_params.IsNull()) {
if (epoch_age == 0 || pindexPrev->dynafed_params().IsNull()) {
return entry;
} else {
return DynaFedParamEntry(entry.m_signblockscript, entry.m_signblock_witness_limit, entry.CalculateExtraRoot());
Expand Down
28 changes: 27 additions & 1 deletion src/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,7 @@ void SetupServerArgs(ArgsManager& argsman)
hidden_args.emplace_back("-sysperms");
#endif
argsman.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-trim_headers", strprintf("Trim old headers in memory (by default older than 2 epochs), removing blocksigning and dynafed-related fields. Saves memory, but blocks us from serving blocks or headers to peers, and removes trimmed fields from some JSON RPC outputs. (default: false)"), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-blockfilterindex=<type>",
strprintf("Maintain an index of compact filters by block (default: %s, values: %s).", DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) +
" If <type> is not supplied or if <type> = 1, indexes for all known types are enabled.",
Expand Down Expand Up @@ -979,6 +980,26 @@ bool AppInitParameterInteraction(const ArgsManager& args)
fPruneMode = true;
}

uint32_t epoch_length = chainparams.GetConsensus().dynamic_epoch_length;
if (epoch_length == std::numeric_limits<uint32_t>::max()) {
// That's the default value, for non-dynafed chains and some tests. Pick a more sensible default here.
epoch_length = 20160;
}

if (args.IsArgSet("-trim_headers")) {
LogPrintf("Configured for header-trimming mode. This will reduce memory usage substantially, but we will be unable to serve as a full P2P peer, and certain header fields may be missing from JSON RPC output.\n");
fTrimHeaders = true;
// This calculation is driven by GetValidFedpegScripts in pegins.cpp, which walks the chain
// back to current epoch start, and then an additional total_valid_epochs on top of that.
// We add one epoch here for the current partial epoch, and then another one for good luck.

nMustKeepFullHeaders = (chainparams.GetConsensus().total_valid_epochs + 2) * epoch_length;
// This is the number of headers we can have in flight downloading at a time, beyond the
// set of blocks we've already validated. Capping this is necessary to keep memory usage
// bounded during IBD.
}
nHeaderDownloadBuffer = epoch_length * 2;

nConnectTimeout = args.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
if (nConnectTimeout <= 0) {
nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
Expand Down Expand Up @@ -1690,7 +1711,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)

// if pruning, unset the service bit and perform the initial blockstore prune
// after any wallet rescanning has taken place.
if (fPruneMode) {
if (fPruneMode || fTrimHeaders) {
LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
if (!fReindex) {
Expand All @@ -1702,6 +1723,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
}
}

if (fTrimHeaders) {
LogPrintf("Unsetting NODE_NETWORK_LIMITED on header trim mode\n");
nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK_LIMITED);
}

if (DeploymentEnabled(chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
// Advertise witness capabilities.
// The option to not set NODE_WITNESS is only used in the tests and should be removed.
Expand Down
2 changes: 1 addition & 1 deletion src/miner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

void ResetChallenge(CBlockHeader& block, const CBlockIndex& indexLast, const Consensus::Params& params)
{
block.proof.challenge = indexLast.proof.challenge;
block.proof.challenge = indexLast.get_proof().challenge;
}

void ResetProof(CBlockHeader& block)
Expand Down
79 changes: 69 additions & 10 deletions src/net_processing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
/** Headers download timeout.
* Timeout = base + per_header * (expected number of headers) */
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 2ms;
/** Protect at least this many outbound peers from disconnection due to slow/
* behind headers chain.
*/
Expand Down Expand Up @@ -910,6 +910,10 @@ bool PeerManagerImpl::TipMayBeStale()

bool PeerManagerImpl::CanDirectFetch()
{
if(!m_chainman.ActiveChain().Tip()) {
LogPrint(BCLog::NET, "Tried to call CanDirectFetch with no currently-active chain.\n");
return false;
}
return m_chainman.ActiveChain().Tip()->GetBlockTime() > GetAdjustedTime() - m_chainparams.GetConsensus().nPowTargetSpacing * 20;
}

Expand Down Expand Up @@ -2007,6 +2011,36 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
return;
}

// If we are already too far ahead of where we want to be on headers, discard
// the received headers. We can still get ahead by up to a single maximum-sized
// headers message here, but never further, so that's fine.
if (pindexBestHeader) {
uint64_t headers_ahead = pindexBestHeader->nHeight - m_chainman.ActiveHeight();
bool too_far_ahead = fTrimHeaders && (headers_ahead >= nHeaderDownloadBuffer);
if (too_far_ahead) {
LOCK(cs_main);
CNodeState *nodestate = State(pfrom.GetId());
if ((nodestate->pindexBestKnownBlock == nullptr) ||
(nodestate->pindexBestKnownBlock->nHeight < m_chainman.ActiveHeight())) {
// Our notion of what blocks a peer has available is based on its pindexBestKnownBlock,
// which is based on headers recieved from it. If we don't have one, or it's too old,
// then we can never get blocks from this peer until we accept headers from it first.
LogPrint(BCLog::NET, "NOT discarding headers from peer=%d, to update its block availability. (current best header %d, active chain height %d)\n", pfrom.GetId(), pindexBestHeader->nHeight, m_chainman.ActiveHeight());
} else {
LogPrint(BCLog::NET, "Discarding received headers and pausing header sync from peer=%d, because we are too far ahead of block sync. (%d > %d)\n", pfrom.GetId(), pindexBestHeader->nHeight, m_chainman.ActiveHeight());
if (nodestate->fSyncStarted) {
// Cancel sync from this node, so we don't penalize it later.
// This will cause us to automatically start syncing from a different node (or restart syncing from the same node) later,
// if we still need to sync headers.
nSyncStarted--;
nodestate->fSyncStarted = false;
nodestate->m_headers_sync_timeout = 0us;
}
return;
}
}
}

bool received_new_header = false;
const CBlockIndex *pindexLast = nullptr;
{
Expand Down Expand Up @@ -2084,14 +2118,29 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
nodestate->m_last_block_announcement = GetTime();
}

if (nCount == MAX_HEADERS_RESULTS && !all_duplicate) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue
// from there instead.
// HOWEVER, if all headers we got this time were duplicates that we already had, don't ask for any more.
LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256()));
// If a peer gives us as many headers as possible, this is implicitly a signal that the
// peer has more headers to send us. In Bitcoin Core, the node always asks for more
// headers at this point. Our logic is slightly more complex, to work around an apparent
// bug in the Bitcoin Core state machine, where we can end up downloading headers from
/// lots of peers at the same time by accident.
if (nCount == MAX_HEADERS_RESULTS) {
if (all_duplicate && !nodestate->fSyncStarted) {
// In this case two things are true:
// 1) This node's most recent batch of headers only included ones we already had.
// 2) We don't have this node marked as a peer to header-sync from.
// This happens when some exogenous event, like an INV of a new block, causes us
// to ask a peer for an unbounded number of headers, when we're already in the
// process of downloading the headers from a different peer.
// In this case the right thing to do is simply stop syncing headers from this
// peer; it's redundant. Here we do nothing; since we don't ask the peer for
// more headers, it will stop sending them.
} else {
// TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue
// from there instead.
LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256()));
}
}

// If this set of headers is valid and ends in a block with at least as
Expand Down Expand Up @@ -3133,10 +3182,17 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
{
if (pindex->trimmed()) {
// For simplicity, if any of the headers they're asking for are trimmed,
// just drop the request.
LogPrint(BCLog::NET, "%s: ignoring getheaders from peer=%i which would return at least one trimmed header\n", __func__, pfrom.GetId());
return;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is for the GETHEADERS message.

Does the GETBLOCK message still work fine if the headers are not in memory? I have not tested it. It doesn't appear that this call needs in-memory headers to find blocks from the index, but I just want to confirm.

Copy link
Contributor Author

@gwillen gwillen Jan 25, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I haven't tested this specific case other than by running against the live network. But I systematically checked for references to the trimmed fields, so I should have caught this if it were an issue. Looking at it now, I can see that it never calls GetBlockHeader (which is the usual suspect) or any of the other offending methods -- it calls ReadBlockFromDisk, which is safe.

}
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}

// pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
// if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
// headers message). In both cases it's safe to update
Expand Down Expand Up @@ -4476,7 +4532,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (pindexBestHeader == nullptr)
pindexBestHeader = m_chainman.ActiveChain().Tip();
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do.
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
uint64_t headers_ahead = pindexBestHeader->nHeight - m_chainman.ActiveHeight();
// ELEMENTS: Only download if our headers aren't "too far ahead" of our blocks.
bool got_enough_headers = fTrimHeaders && (headers_ahead >= nHeaderDownloadBuffer);
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex && !got_enough_headers) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
state.fSyncStarted = true;
Expand Down
Loading