@@ -15,7 +15,7 @@ use crate::buf::IntoIter;
15
15
#[ allow( unused) ]
16
16
use crate :: loom:: sync:: atomic:: AtomicMut ;
17
17
use crate :: loom:: sync:: atomic:: { AtomicPtr , AtomicUsize , Ordering } ;
18
- use crate :: Buf ;
18
+ use crate :: { Buf , BytesMut } ;
19
19
20
20
/// A cheaply cloneable and sliceable chunk of contiguous memory.
21
21
///
@@ -113,6 +113,7 @@ pub(crate) struct Vtable {
113
113
///
114
114
/// takes `Bytes` to value
115
115
pub to_vec : unsafe fn ( & AtomicPtr < ( ) > , * const u8 , usize ) -> Vec < u8 > ,
116
+ pub to_mut : unsafe fn ( & AtomicPtr < ( ) > , * const u8 , usize ) -> BytesMut ,
116
117
/// fn(data)
117
118
pub is_unique : unsafe fn ( & AtomicPtr < ( ) > ) -> bool ,
118
119
/// fn(data, ptr, len)
@@ -507,6 +508,49 @@ impl Bytes {
507
508
self . truncate ( 0 ) ;
508
509
}
509
510
511
+ /// Try to convert self into `BytesMut`.
512
+ ///
513
+ /// If `self` is unique for the entire original buffer, this will succeed
514
+ /// and return a `BytesMut` with the contents of `self` without copying.
515
+ /// If `self` is not unique for the entire original buffer, this will fail
516
+ /// and return self.
517
+ ///
518
+ /// # Examples
519
+ ///
520
+ /// ```
521
+ /// use bytes::{Bytes, BytesMut};
522
+ ///
523
+ /// let bytes = Bytes::from(b"hello".to_vec());
524
+ /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..])));
525
+ /// ```
526
+ pub fn try_into_mut ( self ) -> Result < BytesMut , Bytes > {
527
+ if self . is_unique ( ) {
528
+ Ok ( self . make_mut ( ) )
529
+ } else {
530
+ Err ( self )
531
+ }
532
+ }
533
+
534
+ /// Convert self into `BytesMut`.
535
+ ///
536
+ /// If `self` is unique for the entire original buffer, this will return a
537
+ /// `BytesMut` with the contents of `self` without copying.
538
+ /// If `self` is not unique for the entire original buffer, this will make
539
+ /// a copy of `self` subset of the original buffer in a new `BytesMut`.
540
+ ///
541
+ /// # Examples
542
+ ///
543
+ /// ```
544
+ /// use bytes::{Bytes, BytesMut};
545
+ ///
546
+ /// let bytes = Bytes::from(b"hello".to_vec());
547
+ /// assert_eq!(bytes.make_mut(), BytesMut::from(&b"hello"[..]));
548
+ /// ```
549
+ pub fn make_mut ( self ) -> BytesMut {
550
+ let bytes = ManuallyDrop :: new ( self ) ;
551
+ unsafe { ( bytes. vtable . to_mut ) ( & bytes. data , bytes. ptr , bytes. len ) }
552
+ }
553
+
510
554
#[ inline]
511
555
pub ( crate ) unsafe fn with_vtable (
512
556
ptr : * const u8 ,
@@ -917,6 +961,7 @@ impl fmt::Debug for Vtable {
917
961
const STATIC_VTABLE : Vtable = Vtable {
918
962
clone : static_clone,
919
963
to_vec : static_to_vec,
964
+ to_mut : static_to_mut,
920
965
is_unique : static_is_unique,
921
966
drop : static_drop,
922
967
} ;
@@ -931,6 +976,11 @@ unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8
931
976
slice. to_vec ( )
932
977
}
933
978
979
+ unsafe fn static_to_mut ( _: & AtomicPtr < ( ) > , ptr : * const u8 , len : usize ) -> BytesMut {
980
+ let slice = slice:: from_raw_parts ( ptr, len) ;
981
+ BytesMut :: from ( slice)
982
+ }
983
+
934
984
fn static_is_unique ( _: & AtomicPtr < ( ) > ) -> bool {
935
985
false
936
986
}
@@ -944,13 +994,15 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
944
994
static PROMOTABLE_EVEN_VTABLE : Vtable = Vtable {
945
995
clone : promotable_even_clone,
946
996
to_vec : promotable_even_to_vec,
997
+ to_mut : promotable_even_to_mut,
947
998
is_unique : promotable_is_unique,
948
999
drop : promotable_even_drop,
949
1000
} ;
950
1001
951
1002
static PROMOTABLE_ODD_VTABLE : Vtable = Vtable {
952
1003
clone : promotable_odd_clone,
953
1004
to_vec : promotable_odd_to_vec,
1005
+ to_mut : promotable_odd_to_mut,
954
1006
is_unique : promotable_is_unique,
955
1007
drop : promotable_odd_drop,
956
1008
} ;
@@ -994,12 +1046,47 @@ unsafe fn promotable_to_vec(
994
1046
}
995
1047
}
996
1048
1049
+ unsafe fn promotable_to_mut (
1050
+ data : & AtomicPtr < ( ) > ,
1051
+ ptr : * const u8 ,
1052
+ len : usize ,
1053
+ f : fn ( * mut ( ) ) -> * mut u8 ,
1054
+ ) -> BytesMut {
1055
+ let shared = data. load ( Ordering :: Acquire ) ;
1056
+ let kind = shared as usize & KIND_MASK ;
1057
+
1058
+ if kind == KIND_ARC {
1059
+ shared_to_mut_impl ( shared. cast ( ) , ptr, len)
1060
+ } else {
1061
+ // KIND_VEC is a view of an underlying buffer at a certain offset.
1062
+ // The ptr + len always represents the end of that buffer.
1063
+ // Before truncating it, it is first promoted to KIND_ARC.
1064
+ // Thus, we can safely reconstruct a Vec from it without leaking memory.
1065
+ debug_assert_eq ! ( kind, KIND_VEC ) ;
1066
+
1067
+ let buf = f ( shared) ;
1068
+ let off = offset_from ( ptr, buf) ;
1069
+ let cap = off + len;
1070
+ let v = Vec :: from_raw_parts ( buf, cap, cap) ;
1071
+
1072
+ let mut b = BytesMut :: from_vec ( v) ;
1073
+ b. advance_unchecked ( off) ;
1074
+ b
1075
+ }
1076
+ }
1077
+
997
1078
unsafe fn promotable_even_to_vec ( data : & AtomicPtr < ( ) > , ptr : * const u8 , len : usize ) -> Vec < u8 > {
998
1079
promotable_to_vec ( data, ptr, len, |shared| {
999
1080
ptr_map ( shared. cast ( ) , |addr| addr & !KIND_MASK )
1000
1081
} )
1001
1082
}
1002
1083
1084
+ unsafe fn promotable_even_to_mut ( data : & AtomicPtr < ( ) > , ptr : * const u8 , len : usize ) -> BytesMut {
1085
+ promotable_to_mut ( data, ptr, len, |shared| {
1086
+ ptr_map ( shared. cast ( ) , |addr| addr & !KIND_MASK )
1087
+ } )
1088
+ }
1089
+
1003
1090
unsafe fn promotable_even_drop ( data : & mut AtomicPtr < ( ) > , ptr : * const u8 , len : usize ) {
1004
1091
data. with_mut ( |shared| {
1005
1092
let shared = * shared;
@@ -1031,6 +1118,10 @@ unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize
1031
1118
promotable_to_vec ( data, ptr, len, |shared| shared. cast ( ) )
1032
1119
}
1033
1120
1121
+ unsafe fn promotable_odd_to_mut ( data : & AtomicPtr < ( ) > , ptr : * const u8 , len : usize ) -> BytesMut {
1122
+ promotable_to_mut ( data, ptr, len, |shared| shared. cast ( ) )
1123
+ }
1124
+
1034
1125
unsafe fn promotable_odd_drop ( data : & mut AtomicPtr < ( ) > , ptr : * const u8 , len : usize ) {
1035
1126
data. with_mut ( |shared| {
1036
1127
let shared = * shared;
@@ -1087,6 +1178,7 @@ const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignm
1087
1178
static SHARED_VTABLE : Vtable = Vtable {
1088
1179
clone : shared_clone,
1089
1180
to_vec : shared_to_vec,
1181
+ to_mut : shared_to_mut,
1090
1182
is_unique : shared_is_unique,
1091
1183
drop : shared_drop,
1092
1184
} ;
@@ -1133,6 +1225,45 @@ unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec
1133
1225
shared_to_vec_impl ( data. load ( Ordering :: Relaxed ) . cast ( ) , ptr, len)
1134
1226
}
1135
1227
1228
+ unsafe fn shared_to_mut_impl ( shared : * mut Shared , ptr : * const u8 , len : usize ) -> BytesMut {
1229
+ // The goal is to check if the current handle is the only handle
1230
+ // that currently has access to the buffer. This is done by
1231
+ // checking if the `ref_cnt` is currently 1.
1232
+ //
1233
+ // The `Acquire` ordering synchronizes with the `Release` as
1234
+ // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1235
+ // operation guarantees that any mutations done in other threads
1236
+ // are ordered before the `ref_cnt` is decremented. As such,
1237
+ // this `Acquire` will guarantee that those mutations are
1238
+ // visible to the current thread.
1239
+ //
1240
+ // Otherwise, we take the other branch, copy the data and call `release_shared`.
1241
+ if ( * shared) . ref_cnt . load ( Ordering :: Acquire ) == 1 {
1242
+ // Deallocate the `Shared` instance without running its destructor.
1243
+ let shared = * Box :: from_raw ( shared) ;
1244
+ let shared = ManuallyDrop :: new ( shared) ;
1245
+ let buf = shared. buf ;
1246
+ let cap = shared. cap ;
1247
+
1248
+ // Rebuild Vec
1249
+ let off = offset_from ( ptr, buf) ;
1250
+ let v = Vec :: from_raw_parts ( buf, len + off, cap) ;
1251
+
1252
+ let mut b = BytesMut :: from_vec ( v) ;
1253
+ b. advance_unchecked ( off) ;
1254
+ b
1255
+ } else {
1256
+ // Copy the data from Shared in a new Vec, then release it
1257
+ let v = slice:: from_raw_parts ( ptr, len) . to_vec ( ) ;
1258
+ release_shared ( shared) ;
1259
+ BytesMut :: from_vec ( v)
1260
+ }
1261
+ }
1262
+
1263
+ unsafe fn shared_to_mut ( data : & AtomicPtr < ( ) > , ptr : * const u8 , len : usize ) -> BytesMut {
1264
+ shared_to_mut_impl ( data. load ( Ordering :: Relaxed ) . cast ( ) , ptr, len)
1265
+ }
1266
+
1136
1267
pub ( crate ) unsafe fn shared_is_unique ( data : & AtomicPtr < ( ) > ) -> bool {
1137
1268
let shared = data. load ( Ordering :: Acquire ) ;
1138
1269
let ref_cnt = ( * shared. cast :: < Shared > ( ) ) . ref_cnt . load ( Ordering :: Relaxed ) ;
@@ -1291,6 +1422,23 @@ where
1291
1422
new_addr as * mut u8
1292
1423
}
1293
1424
1425
+ /// Precondition: dst >= original
1426
+ ///
1427
+ /// The following line is equivalent to:
1428
+ ///
1429
+ /// ```rust,ignore
1430
+ /// self.ptr.as_ptr().offset_from(ptr) as usize;
1431
+ /// ```
1432
+ ///
1433
+ /// But due to min rust is 1.39 and it is only stabilized
1434
+ /// in 1.47, we cannot use it.
1435
+ #[ inline]
1436
+ fn offset_from ( dst : * const u8 , original : * const u8 ) -> usize {
1437
+ debug_assert ! ( dst >= original) ;
1438
+
1439
+ dst as usize - original as usize
1440
+ }
1441
+
1294
1442
// compile-fails
1295
1443
1296
1444
/// ```compile_fail
0 commit comments