@@ -8,6 +8,7 @@ use crate::{AsContextMut, StoreContext, StoreContextMut, ValRaw};
8
8
use alloc:: borrow:: Cow ;
9
9
use alloc:: sync:: Arc ;
10
10
use core:: fmt;
11
+ use core:: iter;
11
12
use core:: marker;
12
13
use core:: mem:: { self , MaybeUninit } ;
13
14
use core:: ptr:: NonNull ;
@@ -942,6 +943,39 @@ macro_rules! floats {
942
943
* ptr = self . to_bits( ) . to_le_bytes( ) ;
943
944
Ok ( ( ) )
944
945
}
946
+
947
+ fn store_list<T >(
948
+ cx: & mut LowerContext <' _, T >,
949
+ ty: InterfaceType ,
950
+ offset: usize ,
951
+ items: & [ Self ] ,
952
+ ) -> Result <( ) > {
953
+ debug_assert!( matches!( ty, InterfaceType :: $ty) ) ;
954
+
955
+ // Double-check that the CM alignment is at least the host's
956
+ // alignment for this type which should be true for all
957
+ // platforms.
958
+ assert!( ( Self :: ALIGN32 as usize ) >= mem:: align_of:: <Self >( ) ) ;
959
+
960
+ // Slice `cx`'s memory to the window that we'll be modifying.
961
+ // This should all have already been verified in terms of
962
+ // alignment and sizing meaning that these assertions here are
963
+ // not truly necessary but are instead double-checks.
964
+ let dst = & mut cx. as_slice_mut( ) [ offset..] [ ..items. len( ) * Self :: SIZE32 ] ;
965
+ assert!( dst. as_ptr( ) . cast:: <Self >( ) . is_aligned( ) ) ;
966
+
967
+ // And with all that out of the way perform the copying loop.
968
+ // This is not a `copy_from_slice` because endianness needs to
969
+ // be handled here, but LLVM should pretty easily transform this
970
+ // into a memcpy on little-endian platforms.
971
+ // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
972
+ // is stabilized
973
+ for ( dst, src) in iter:: zip( dst. chunks_exact_mut( Self :: SIZE32 ) , items) {
974
+ let dst: & mut [ u8 ; Self :: SIZE32 ] = dst. try_into( ) . unwrap( ) ;
975
+ * dst = src. to_le_bytes( ) ;
976
+ }
977
+ Ok ( ( ) )
978
+ }
945
979
}
946
980
947
981
unsafe impl Lift for $float {
@@ -957,6 +991,27 @@ macro_rules! floats {
957
991
debug_assert!( ( bytes. as_ptr( ) as usize ) % Self :: SIZE32 == 0 ) ;
958
992
Ok ( $float:: from_le_bytes( bytes. try_into( ) . unwrap( ) ) )
959
993
}
994
+
995
+ fn load_list( cx: & mut LiftContext <' _>, list: & WasmList <Self >) -> Result <Vec <Self >> where Self : Sized {
996
+ // See comments in `WasmList::get` for the panicking indexing
997
+ let byte_size = list. len * mem:: size_of:: <Self >( ) ;
998
+ let bytes = & cx. memory( ) [ list. ptr..] [ ..byte_size] ;
999
+
1000
+ // The canonical ABI requires that everything is aligned to its
1001
+ // own size, so this should be an aligned array.
1002
+ assert!( bytes. as_ptr( ) . cast:: <Self >( ) . is_aligned( ) ) ;
1003
+
1004
+ // Copy the resulting slice to a new Vec, handling endianness
1005
+ // in the process
1006
+ // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1007
+ // is stabilized
1008
+ Ok (
1009
+ bytes
1010
+ . chunks_exact( Self :: SIZE32 )
1011
+ . map( |i| $float:: from_le_bytes( i. try_into( ) . unwrap( ) ) )
1012
+ . collect( )
1013
+ )
1014
+ }
960
1015
}
961
1016
} ; ) * )
962
1017
}
0 commit comments