Skip to content

Commit 051bc74

Browse files
authored
Make GC triggering and heap resizing consistent (mmtk#1266)
This PR fixes a bug where the MemBalancer did not increase the heap size enough to accommodate the amount of side metadata needed by the pending allocation. It manifested as looping infinitely between triggering GC and (not actually) resizing the heap size after a GC when the minimum heap size is too small. Now it always includes the side metadata amount when increasing heap size. This PR also refactors the calculation of "shifting right and rounding up" which is used in multiple places. We also replace `alloc_rshift` with `log_data_meta_ratio` for two reasons. (1) The previous implementation would cause unsigned overflow before converting the result to `i32`. (2) `log_data_meta_ratio` has clearer semantics.
1 parent c61e6c8 commit 051bc74

File tree

6 files changed

+75
-14
lines changed

6 files changed

+75
-14
lines changed

src/policy/lockfreeimmortalspace.rs

+5-1
Original file line numberDiff line numberDiff line change
@@ -129,10 +129,14 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
129129
unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.total_bytes) };
130130
}
131131

132+
fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
133+
self.metadata.calculate_reserved_pages(data_pages)
134+
}
135+
132136
fn reserved_pages(&self) -> usize {
133137
let cursor = self.cursor.load(Ordering::Relaxed);
134138
let data_pages = conversions::bytes_to_pages_up(self.limit - cursor);
135-
let meta_pages = self.metadata.calculate_reserved_pages(data_pages);
139+
let meta_pages = self.estimate_side_meta_pages(data_pages);
136140
data_pages + meta_pages
137141
}
138142

src/policy/marksweepspace/malloc_ms/global.rs

+5-1
Original file line numberDiff line numberDiff line change
@@ -215,14 +215,18 @@ impl<VM: VMBinding> Space<VM> for MallocSpace<VM> {
215215
"MallocSpace"
216216
}
217217

218+
fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
219+
self.metadata.calculate_reserved_pages(data_pages)
220+
}
221+
218222
#[allow(clippy::assertions_on_constants)]
219223
fn reserved_pages(&self) -> usize {
220224
use crate::util::constants::LOG_BYTES_IN_PAGE;
221225
// Assume malloc pages are no smaller than 4K pages. Otherwise the substraction below will fail.
222226
debug_assert!(LOG_BYTES_IN_MALLOC_PAGE >= LOG_BYTES_IN_PAGE);
223227
let data_pages = self.active_pages.load(Ordering::SeqCst)
224228
<< (LOG_BYTES_IN_MALLOC_PAGE - LOG_BYTES_IN_PAGE);
225-
let meta_pages = self.metadata.calculate_reserved_pages(data_pages);
229+
let meta_pages = self.estimate_side_meta_pages(data_pages);
226230
data_pages + meta_pages
227231
}
228232

src/policy/space.rs

+16-2
Original file line numberDiff line numberDiff line change
@@ -109,9 +109,12 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
109109

110110
// Clear the request, and inform GC trigger about the pending allocation.
111111
pr.clear_request(pages_reserved);
112+
113+
let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
114+
let total_pages_reserved = pages_reserved + meta_pages_reserved;
112115
self.get_gc_trigger()
113116
.policy
114-
.on_pending_allocation(pages_reserved);
117+
.on_pending_allocation(total_pages_reserved);
115118

116119
VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
117120
unsafe { Address::zero() }
@@ -313,9 +316,20 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
313316
.mark_as_mapped(self.common().start, self.common().extent);
314317
}
315318

319+
/// Estimate the amount of side metadata memory needed for a give data memory size in pages. The
320+
/// result will over-estimate the amount of metadata pages needed, with at least one page per
321+
/// side metadata. This relatively accurately describes the number of side metadata pages the
322+
/// space actually consumes.
323+
///
324+
/// This function is used for both triggering GC (via [`Space::reserved_pages`]) and resizing
325+
/// the heap (via [`crate::util::heap::GCTriggerPolicy::on_pending_allocation`]).
326+
fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
327+
self.common().metadata.calculate_reserved_pages(data_pages)
328+
}
329+
316330
fn reserved_pages(&self) -> usize {
317331
let data_pages = self.get_page_resource().reserved_pages();
318-
let meta_pages = self.common().metadata.calculate_reserved_pages(data_pages);
332+
let meta_pages = self.estimate_side_meta_pages(data_pages);
319333
data_pages + meta_pages
320334
}
321335

src/util/conversions.rs

+9
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,15 @@ pub fn bytes_to_formatted_string(bytes: usize) -> String {
9494
format!("{}{}", num, UNITS.last().unwrap())
9595
}
9696

97+
/// Shift `num` by `bits` to the right. Add 1 to the result if any `1` bits are shifted out to the
98+
/// right. This is equivalent to dividing `num` by 2 to the power of `bits`, rounding up.
99+
///
100+
/// This function has undefined behavior if `bits` is greater or equal to the number of bits in
101+
/// `usize`.
102+
pub const fn rshift_align_up(num: usize, bits: usize) -> usize {
103+
(num + ((1 << bits) - 1)) >> bits
104+
}
105+
97106
#[cfg(test)]
98107
mod tests {
99108
use crate::util::conversions::*;

src/util/metadata/side_metadata/global.rs

+5-4
Original file line numberDiff line numberDiff line change
@@ -1362,12 +1362,13 @@ impl SideMetadataContext {
13621362
pub fn calculate_reserved_pages(&self, data_pages: usize) -> usize {
13631363
let mut total = 0;
13641364
for spec in self.global.iter() {
1365-
let rshift = addr_rshift(spec);
1366-
total += (data_pages + ((1 << rshift) - 1)) >> rshift;
1365+
// This rounds up. No matter how small `data_pages` is, the side metadata size will be
1366+
// at least one page. This behavior is *intended*. This over-estimated amount is used
1367+
// for triggering GC and resizing the heap.
1368+
total += data_to_meta_size_round_up(spec, data_pages);
13671369
}
13681370
for spec in self.local.iter() {
1369-
let rshift = addr_rshift(spec);
1370-
total += (data_pages + ((1 << rshift) - 1)) >> rshift;
1371+
total += data_to_meta_size_round_up(spec, data_pages);
13711372
}
13721373
total
13731374
}

src/util/metadata/side_metadata/helpers.rs

+35-6
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ use super::ranges::BitOffset;
22
use super::SideMetadataSpec;
33
use crate::util::constants::LOG_BYTES_IN_PAGE;
44
use crate::util::constants::{BITS_IN_WORD, BYTES_IN_PAGE, LOG_BITS_IN_BYTE};
5+
use crate::util::conversions::rshift_align_up;
56
use crate::util::heap::layout::vm_layout::VMLayout;
67
use crate::util::memory::{MmapAnnotation, MmapStrategy};
78
#[cfg(target_pointer_width = "32")]
@@ -110,7 +111,7 @@ pub(crate) fn ensure_munmap_contiguous_metadata_space(
110111
let metadata_start = address_to_meta_address(spec, start);
111112
let mmap_start = metadata_start.align_down(BYTES_IN_PAGE);
112113
// nearest page-aligned ending address
113-
let metadata_size = (size + ((1 << addr_rshift(spec)) - 1)) >> addr_rshift(spec);
114+
let metadata_size = data_to_meta_size_round_up(spec, size);
114115
let mmap_size = (metadata_start + metadata_size).align_up(BYTES_IN_PAGE) - mmap_start;
115116
if mmap_size > 0 {
116117
ensure_munmap_metadata(mmap_start, mmap_size);
@@ -135,7 +136,7 @@ pub(super) fn try_mmap_contiguous_metadata_space(
135136
let metadata_start = address_to_meta_address(spec, start);
136137
let mmap_start = metadata_start.align_down(BYTES_IN_PAGE);
137138
// nearest page-aligned ending address
138-
let metadata_size = (size + ((1 << addr_rshift(spec)) - 1)) >> addr_rshift(spec);
139+
let metadata_size = data_to_meta_size_round_up(spec, size);
139140
let mmap_size = (metadata_start + metadata_size).align_up(BYTES_IN_PAGE) - mmap_start;
140141
if mmap_size > 0 {
141142
if !no_reserve {
@@ -185,14 +186,42 @@ pub(crate) fn address_to_meta_address(
185186
res
186187
}
187188

188-
pub(super) const fn addr_rshift(metadata_spec: &SideMetadataSpec) -> i32 {
189-
((LOG_BITS_IN_BYTE as usize) + metadata_spec.log_bytes_in_region
190-
- (metadata_spec.log_num_of_bits)) as i32
189+
/// Return the base-2 logarithm of the ratio of data bits and metadata bits per region.
190+
///
191+
/// Suppose a memory region has `data_bits` bits of data, and `meta_bits` bits of metadata for
192+
/// `metadata_spec`, and the result of `log_data_meta_ratio(metadata_spec)` is `shift`, then
193+
///
194+
/// - `data_bits >> shift == meta_bits`
195+
/// - `meta_bits << shift == data_bits`
196+
pub(super) const fn log_data_meta_ratio(metadata_spec: &SideMetadataSpec) -> usize {
197+
let log_data_bits_in_region = (LOG_BITS_IN_BYTE as usize) + metadata_spec.log_bytes_in_region;
198+
let log_meta_bits_in_region = metadata_spec.log_num_of_bits;
199+
200+
// TODO: In theory, it is possible to construct a side metadata that has more metadata bits than
201+
// data bits per region. But such pathological side metadata consumes way too much memory, and
202+
// should never be used in any useful applications. It should be forbidden.
203+
log_data_bits_in_region - log_meta_bits_in_region
204+
}
205+
206+
/// Calculate the amount of metadata needed for the give amount of data memory, round up to nearest
207+
/// integer. `data_size` can be in any unit, e.g. bits, bytes, pages, blocks, chunks, etc., and the
208+
/// result has the same unit.
209+
pub(super) const fn data_to_meta_size_round_up(
210+
metadata_spec: &SideMetadataSpec,
211+
data_size: usize,
212+
) -> usize {
213+
rshift_align_up(data_size, log_data_meta_ratio(metadata_spec))
214+
}
215+
216+
/// Calculate the amount of data governed by the give amount of metadata. `meta_size` can be in any
217+
/// unit, e.g. bits, bytes, pages, blocks, chunks, etc., and the result has the same unit.
218+
pub(super) const fn meta_to_data_size(metadata_spec: &SideMetadataSpec, meta_size: usize) -> usize {
219+
meta_size << log_data_meta_ratio(metadata_spec)
191220
}
192221

193222
#[allow(dead_code)]
194223
pub(super) const fn metadata_address_range_size(metadata_spec: &SideMetadataSpec) -> usize {
195-
1usize << (VMLayout::LOG_ARCH_ADDRESS_SPACE - addr_rshift(metadata_spec) as usize)
224+
1usize << (VMLayout::LOG_ARCH_ADDRESS_SPACE - log_data_meta_ratio(metadata_spec))
196225
}
197226

198227
pub(super) fn meta_byte_lshift(metadata_spec: &SideMetadataSpec, data_addr: Address) -> u8 {

0 commit comments

Comments
 (0)