Skip to content

Commit 4a2b620

Browse files
authored
Frida windows check shadow fix (#2159)
* Fix check_shadow and add additional tests * add some additional documentation
1 parent 1b30874 commit 4a2b620

File tree

1 file changed

+94
-44
lines changed

1 file changed

+94
-44
lines changed

libafl_frida/src/alloc.rs

+94-44
Original file line numberDiff line numberDiff line change
@@ -434,67 +434,107 @@ impl Allocator {
434434
(shadow_mapping_start, (end - start) / 8 + 1)
435435
}
436436

437+
#[inline]
438+
#[must_use]
439+
fn check_shadow_aligned(&mut self, address: *const c_void, size: usize) -> bool {
440+
assert_eq!((address as usize) & 7, 0, "check_shadow_aligned used when address is not aligned. Use check_shadow");
441+
assert_eq!(size & 7, 0, "check_shadow_aligned used when size is not aligned. Use check_shadow");
442+
443+
if size == 0 {
444+
return true;
445+
}
446+
447+
let shadow_addr = map_to_shadow!(self, (address as usize));
448+
let shadow_size = size>>3;
449+
let buf =
450+
unsafe { std::slice::from_raw_parts_mut(shadow_addr as *mut u8, shadow_size) };
451+
let (prefix, aligned, suffix) = unsafe { buf.align_to::<u128>() };
452+
if !prefix.iter().all(|&x| x == 0xff)
453+
|| !suffix.iter().all(|&x| x == 0xff)
454+
|| !aligned
455+
.iter()
456+
.all(|&x| x == 0xffffffffffffffffffffffffffffffffu128)
457+
{
458+
return false;
459+
}
460+
461+
return true;
462+
}
437463
/// Checks whether the given address up till size is valid unpoisoned shadow memory.
438464
/// TODO: check edge cases
439465
#[inline]
440466
#[must_use]
441467
pub fn check_shadow(&mut self, address: *const c_void, size: usize) -> bool {
468+
//the algorithm for check_shadow is as follows:
469+
//1. we first check if its managed. if is not then exit
470+
//2. we check if it is aligned. this should be 99% of accesses. If it is do an aligned check and leave
471+
//3. if it is not split the check into 3 parts: the pre-aligment bytes, the aligned portion, and the post alignment posts
472+
//3. The prealignment bytes are the unaligned bytes (if any) located in the qword preceding the aligned portion. Perform a specialied check to ensure that the bytes from [start, align(start, 8)) are valid. In this case align(start,8) aligns start to the next 8 byte boundary.
473+
//4. The aligned check is where the address and the size is 8 byte aligned. Use check_shadow_aligned to check it
474+
//5. The post-alignment is the same as pre-alignment except it is the qword following the aligned portion. Use a specialized check to ensure that [end & ~7, end) is valid.
475+
442476
if size == 0 || !self.is_managed(address as *mut c_void) {
443477
return true;
444478
}
445-
let address = address as usize;
446-
let shadow_size = size / 8;
447-
448-
let shadow_addr = map_to_shadow!(self, address);
449-
450-
// self.map_shadow_for_region(address, address + size, false);
451479

452-
log::info!(
453-
"check_shadow: {:x}, {:x}, {:x}, {:x}",
454-
address,
455-
shadow_size,
456-
shadow_addr,
480+
log::trace!(
481+
"check_shadow: {:x}, {:x}",
482+
address as usize,
457483
size
458484
);
459485

460-
let offset = address & 7;
461-
// if we are not aligned to 8 bytes, we need to check the high bits of the shadow
462-
if offset != 0 {
463-
let val = (unsafe { (shadow_addr as *const u16).read() }) >> offset;
464-
let mask = ((1 << (size % 9)) - 1) as u16;
465-
if val & mask != mask {
486+
//fast path. most buffers are likely 8 byte aligned in size and address
487+
if (address as usize) & 7 == 0 && size & 7 == 0 {
488+
return self.check_shadow_aligned(address, size);
489+
}
490+
491+
//slow path. check everything
492+
let start_address = address as usize;
493+
let end_address = start_address + size;
494+
495+
//8 byte align the start/end so we can use check_shadow_aligned for the majority of it
496+
//in the case of subqword accesses (i.e,, the entire access is located within 1 qword), aligned_start > aligned_end naturally
497+
let aligned_start = (start_address + 7) & !7;
498+
let aligned_end = end_address & !7;
499+
500+
let start_offset = start_address & 7;
501+
let end_offset = end_address & 7;
502+
503+
504+
//if the start is unaligned
505+
if start_address != aligned_start {
506+
let start_shadow = map_to_shadow!(self, start_address);
507+
//the purpose of the min is to account for sub-qword accesses. If the access is subqword then size will be less than the distance to the end of the qword
508+
509+
510+
let start_mask: u8 = 0xff << std::cmp::min(size, 8-start_offset);
511+
512+
if unsafe { (start_shadow as *const u8).read() } & start_mask != start_mask {
466513
return false;
467514
}
468515
}
469516

470-
if size >= 8 {
471-
let buf =
472-
unsafe { std::slice::from_raw_parts_mut(shadow_addr as *mut u8, shadow_size) };
473-
let (prefix, aligned, suffix) = unsafe { buf.align_to::<u128>() };
474-
if prefix.iter().all(|&x| x == 0xff)
475-
&& suffix.iter().all(|&x| x == 0xff)
476-
&& aligned
477-
.iter()
478-
.all(|&x| x == 0xffffffffffffffffffffffffffffffffu128)
479-
{
480-
if size % 8 != 0 {
481-
let val = unsafe { ((shadow_addr + shadow_size) as *mut u8).read() };
482-
let mask = (((1 << (size % 8)) - 1) as u8).rotate_left(8 - (size % 8) as u32);
483-
if val & mask != mask {
484-
return false;
485-
}
486-
}
487-
return true;
517+
//if this is not true then it must be a subqword access as the start will be larger than the end
518+
if aligned_start <= aligned_end {
519+
if !self.check_shadow_aligned(aligned_start as *const c_void, aligned_end-aligned_start) {
520+
return false;
488521
}
489-
}
490-
if size % 8 != 0 {
491-
let val = unsafe { ((shadow_addr + shadow_size) as *mut u8).read() };
492-
let mask = (((1 << (size % 8)) - 1) as u8).rotate_left(8 - (size % 8) as u32);
493-
if val & mask == mask {
494-
return true;
522+
523+
if end_address != aligned_end {
524+
let end_shadow = map_to_shadow!(self, end_address);
525+
526+
let end_mask = 0xff << (8 - end_offset); //we want to check from the beginning of the qword to the offset
527+
if unsafe { (end_shadow as *const u8).read() } & end_mask != end_mask {
528+
return false;
529+
}
495530
}
531+
532+
496533
}
497-
return false;
534+
// self.map_shadow_for_region(address, address + size, false);
535+
536+
537+
return true;
498538
}
499539
/// Maps the address to a shadow address
500540
#[inline]
@@ -570,10 +610,9 @@ impl Allocator {
570610
let start = range.memory_range().base_address().0 as usize;
571611
let end = start + range.memory_range().size();
572612
log::trace!(
573-
"Start: {:#x}, end: {:#x}, prot: {:?}",
613+
"Start: {:#x}, end: {:#x}",
574614
start,
575615
end,
576-
range.protection()
577616
);
578617
occupied_ranges.push((start, end));
579618
let base: usize = 2;
@@ -743,6 +782,17 @@ fn check_shadow() {
743782
assert!(allocator.check_shadow(unsafe { allocation.offset(1) }, 8) == false);
744783
assert!(allocator.check_shadow(unsafe { allocation.offset(2) }, 8) == false);
745784
assert!(allocator.check_shadow(unsafe { allocation.offset(3) }, 8) == false);
785+
let allocation = unsafe { allocator.alloc(0xc, 0) };
786+
assert!(allocator.check_shadow(unsafe { allocation.offset(4)}, 8) == true);
787+
//subqword access
788+
assert!(allocator.check_shadow(unsafe { allocation.offset(3)}, 2) == true);
789+
//unaligned access
790+
assert!(allocator.check_shadow(unsafe { allocation.offset(3)}, 8) == true);
791+
let allocation = unsafe { allocator.alloc(0x20, 0) };
792+
//access with unaligned parts at the beginning and end
793+
assert!(allocator.check_shadow(unsafe { allocation.offset(10)}, 21) == true);
794+
//invalid, unaligned access
795+
assert!(allocator.check_shadow(unsafe { allocation.offset(10)}, 29) == false);
746796
let allocation = unsafe { allocator.alloc(4, 0) };
747797
assert!(allocator.check_shadow(allocation, 1) == true);
748798
assert!(allocator.check_shadow(allocation, 2) == true);

0 commit comments

Comments
 (0)