You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
//1. we first check if its managed. if is not then exit
470
+
//2. we check if it is aligned. this should be 99% of accesses. If it is do an aligned check and leave
471
+
//3. if it is not split the check into 3 parts: the pre-aligment bytes, the aligned portion, and the post alignment posts
472
+
//3. The prealignment bytes are the unaligned bytes (if any) located in the qword preceding the aligned portion. Perform a specialied check to ensure that the bytes from [start, align(start, 8)) are valid. In this case align(start,8) aligns start to the next 8 byte boundary.
473
+
//4. The aligned check is where the address and the size is 8 byte aligned. Use check_shadow_aligned to check it
474
+
//5. The post-alignment is the same as pre-alignment except it is the qword following the aligned portion. Use a specialized check to ensure that [end & ~7, end) is valid.
475
+
442
476
if size == 0 || !self.is_managed(address as*mutc_void){
// if we are not aligned to 8 bytes, we need to check the high bits of the shadow
462
-
if offset != 0{
463
-
let val = (unsafe{(shadow_addr as*constu16).read()}) >> offset;
464
-
let mask = ((1 << (size % 9)) - 1)asu16;
465
-
if val & mask != mask {
486
+
//fast path. most buffers are likely 8 byte aligned in size and address
487
+
if(address asusize)&7 == 0 && size &7 == 0{
488
+
returnself.check_shadow_aligned(address, size);
489
+
}
490
+
491
+
//slow path. check everything
492
+
let start_address = address asusize;
493
+
let end_address = start_address + size;
494
+
495
+
//8 byte align the start/end so we can use check_shadow_aligned for the majority of it
496
+
//in the case of subqword accesses (i.e,, the entire access is located within 1 qword), aligned_start > aligned_end naturally
497
+
let aligned_start = (start_address + 7)& !7;
498
+
let aligned_end = end_address & !7;
499
+
500
+
let start_offset = start_address &7;
501
+
let end_offset = end_address &7;
502
+
503
+
504
+
//if the start is unaligned
505
+
if start_address != aligned_start {
506
+
let start_shadow = map_to_shadow!(self, start_address);
507
+
//the purpose of the min is to account for sub-qword accesses. If the access is subqword then size will be less than the distance to the end of the qword
508
+
509
+
510
+
let start_mask:u8 = 0xff << std::cmp::min(size,8-start_offset);
0 commit comments