1use core::iter::FromIterator;
2use core::mem::{self, ManuallyDrop};
3use core::ops::{Deref, RangeBounds};
4use core::{cmp, fmt, hash, ptr, slice, usize};
5
6use alloc::{
7 alloc::{dealloc, Layout},
8 borrow::Borrow,
9 boxed::Box,
10 string::String,
11 vec::Vec,
12};
13
14use crate::buf::IntoIter;
15#[allow(unused)]
16use crate::loom::sync::atomic::AtomicMut;
17use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
18use crate::{offset_from, Buf, BytesMut};
19
20pub struct Bytes {
102 ptr: *const u8,
103 len: usize,
104 data: AtomicPtr<()>,
106 vtable: &'static Vtable,
107}
108
109pub(crate) struct Vtable {
110 pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
112 pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
116 pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
117 pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
119 pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
121}
122
123impl Bytes {
124 #[inline]
137 #[cfg(not(all(loom, test)))]
138 pub const fn new() -> Self {
139 const EMPTY: &[u8] = &[];
142 Bytes::from_static(EMPTY)
143 }
144
145 #[cfg(all(loom, test))]
147 pub fn new() -> Self {
148 const EMPTY: &[u8] = &[];
149 Bytes::from_static(EMPTY)
150 }
151
152 #[inline]
166 #[cfg(not(all(loom, test)))]
167 pub const fn from_static(bytes: &'static [u8]) -> Self {
168 Bytes {
169 ptr: bytes.as_ptr(),
170 len: bytes.len(),
171 data: AtomicPtr::new(ptr::null_mut()),
172 vtable: &STATIC_VTABLE,
173 }
174 }
175
176 #[cfg(all(loom, test))]
178 pub fn from_static(bytes: &'static [u8]) -> Self {
179 Bytes {
180 ptr: bytes.as_ptr(),
181 len: bytes.len(),
182 data: AtomicPtr::new(ptr::null_mut()),
183 vtable: &STATIC_VTABLE,
184 }
185 }
186
187 #[inline]
198 pub const fn len(&self) -> usize {
199 self.len
200 }
201
202 #[inline]
213 pub const fn is_empty(&self) -> bool {
214 self.len == 0
215 }
216
217 pub fn is_unique(&self) -> bool {
236 unsafe { (self.vtable.is_unique)(&self.data) }
237 }
238
239 pub fn copy_from_slice(data: &[u8]) -> Self {
241 data.to_vec().into()
242 }
243
244 pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
267 use core::ops::Bound;
268
269 let len = self.len();
270
271 let begin = match range.start_bound() {
272 Bound::Included(&n) => n,
273 Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
274 Bound::Unbounded => 0,
275 };
276
277 let end = match range.end_bound() {
278 Bound::Included(&n) => n.checked_add(1).expect("out of range"),
279 Bound::Excluded(&n) => n,
280 Bound::Unbounded => len,
281 };
282
283 assert!(
284 begin <= end,
285 "range start must not be greater than end: {:?} <= {:?}",
286 begin,
287 end,
288 );
289 assert!(
290 end <= len,
291 "range end out of bounds: {:?} <= {:?}",
292 end,
293 len,
294 );
295
296 if end == begin {
297 return Bytes::new();
298 }
299
300 let mut ret = self.clone();
301
302 ret.len = end - begin;
303 ret.ptr = unsafe { ret.ptr.add(begin) };
304
305 ret
306 }
307
308 pub fn slice_ref(&self, subset: &[u8]) -> Self {
334 if subset.is_empty() {
337 return Bytes::new();
338 }
339
340 let bytes_p = self.as_ptr() as usize;
341 let bytes_len = self.len();
342
343 let sub_p = subset.as_ptr() as usize;
344 let sub_len = subset.len();
345
346 assert!(
347 sub_p >= bytes_p,
348 "subset pointer ({:p}) is smaller than self pointer ({:p})",
349 subset.as_ptr(),
350 self.as_ptr(),
351 );
352 assert!(
353 sub_p + sub_len <= bytes_p + bytes_len,
354 "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
355 self.as_ptr(),
356 bytes_len,
357 subset.as_ptr(),
358 sub_len,
359 );
360
361 let sub_offset = sub_p - bytes_p;
362
363 self.slice(sub_offset..(sub_offset + sub_len))
364 }
365
366 #[must_use = "consider Bytes::truncate if you don't need the other half"]
390 pub fn split_off(&mut self, at: usize) -> Self {
391 if at == self.len() {
392 return Bytes::new();
393 }
394
395 if at == 0 {
396 return mem::replace(self, Bytes::new());
397 }
398
399 assert!(
400 at <= self.len(),
401 "split_off out of bounds: {:?} <= {:?}",
402 at,
403 self.len(),
404 );
405
406 let mut ret = self.clone();
407
408 self.len = at;
409
410 unsafe { ret.inc_start(at) };
411
412 ret
413 }
414
415 #[must_use = "consider Bytes::advance if you don't need the other half"]
439 pub fn split_to(&mut self, at: usize) -> Self {
440 if at == self.len() {
441 return mem::replace(self, Bytes::new());
442 }
443
444 if at == 0 {
445 return Bytes::new();
446 }
447
448 assert!(
449 at <= self.len(),
450 "split_to out of bounds: {:?} <= {:?}",
451 at,
452 self.len(),
453 );
454
455 let mut ret = self.clone();
456
457 unsafe { self.inc_start(at) };
458
459 ret.len = at;
460 ret
461 }
462
463 #[inline]
482 pub fn truncate(&mut self, len: usize) {
483 if len < self.len {
484 if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
488 || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
489 {
490 drop(self.split_off(len));
491 } else {
492 self.len = len;
493 }
494 }
495 }
496
497 #[inline]
509 pub fn clear(&mut self) {
510 self.truncate(0);
511 }
512
513 pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
529 if self.is_unique() {
530 Ok(self.into())
531 } else {
532 Err(self)
533 }
534 }
535
536 #[inline]
537 pub(crate) unsafe fn with_vtable(
538 ptr: *const u8,
539 len: usize,
540 data: AtomicPtr<()>,
541 vtable: &'static Vtable,
542 ) -> Bytes {
543 Bytes {
544 ptr,
545 len,
546 data,
547 vtable,
548 }
549 }
550
551 #[inline]
554 fn as_slice(&self) -> &[u8] {
555 unsafe { slice::from_raw_parts(self.ptr, self.len) }
556 }
557
558 #[inline]
559 unsafe fn inc_start(&mut self, by: usize) {
560 debug_assert!(self.len >= by, "internal: inc_start out of bounds");
562 self.len -= by;
563 self.ptr = self.ptr.add(by);
564 }
565}
566
567unsafe impl Send for Bytes {}
569unsafe impl Sync for Bytes {}
570
571impl Drop for Bytes {
572 #[inline]
573 fn drop(&mut self) {
574 unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
575 }
576}
577
578impl Clone for Bytes {
579 #[inline]
580 fn clone(&self) -> Bytes {
581 unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
582 }
583}
584
585impl Buf for Bytes {
586 #[inline]
587 fn remaining(&self) -> usize {
588 self.len()
589 }
590
591 #[inline]
592 fn chunk(&self) -> &[u8] {
593 self.as_slice()
594 }
595
596 #[inline]
597 fn advance(&mut self, cnt: usize) {
598 assert!(
599 cnt <= self.len(),
600 "cannot advance past `remaining`: {:?} <= {:?}",
601 cnt,
602 self.len(),
603 );
604
605 unsafe {
606 self.inc_start(cnt);
607 }
608 }
609
610 fn copy_to_bytes(&mut self, len: usize) -> Self {
611 self.split_to(len)
612 }
613}
614
615impl Deref for Bytes {
616 type Target = [u8];
617
618 #[inline]
619 fn deref(&self) -> &[u8] {
620 self.as_slice()
621 }
622}
623
624impl AsRef<[u8]> for Bytes {
625 #[inline]
626 fn as_ref(&self) -> &[u8] {
627 self.as_slice()
628 }
629}
630
631impl hash::Hash for Bytes {
632 fn hash<H>(&self, state: &mut H)
633 where
634 H: hash::Hasher,
635 {
636 self.as_slice().hash(state);
637 }
638}
639
640impl Borrow<[u8]> for Bytes {
641 fn borrow(&self) -> &[u8] {
642 self.as_slice()
643 }
644}
645
646impl IntoIterator for Bytes {
647 type Item = u8;
648 type IntoIter = IntoIter<Bytes>;
649
650 fn into_iter(self) -> Self::IntoIter {
651 IntoIter::new(self)
652 }
653}
654
655impl<'a> IntoIterator for &'a Bytes {
656 type Item = &'a u8;
657 type IntoIter = core::slice::Iter<'a, u8>;
658
659 fn into_iter(self) -> Self::IntoIter {
660 self.as_slice().iter()
661 }
662}
663
664impl FromIterator<u8> for Bytes {
665 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
666 Vec::from_iter(into_iter).into()
667 }
668}
669
670impl PartialEq for Bytes {
673 fn eq(&self, other: &Bytes) -> bool {
674 self.as_slice() == other.as_slice()
675 }
676}
677
678impl PartialOrd for Bytes {
679 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
680 self.as_slice().partial_cmp(other.as_slice())
681 }
682}
683
684impl Ord for Bytes {
685 fn cmp(&self, other: &Bytes) -> cmp::Ordering {
686 self.as_slice().cmp(other.as_slice())
687 }
688}
689
690impl Eq for Bytes {}
691
692impl PartialEq<[u8]> for Bytes {
693 fn eq(&self, other: &[u8]) -> bool {
694 self.as_slice() == other
695 }
696}
697
698impl PartialOrd<[u8]> for Bytes {
699 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
700 self.as_slice().partial_cmp(other)
701 }
702}
703
704impl PartialEq<Bytes> for [u8] {
705 fn eq(&self, other: &Bytes) -> bool {
706 *other == *self
707 }
708}
709
710impl PartialOrd<Bytes> for [u8] {
711 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
712 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
713 }
714}
715
716impl PartialEq<str> for Bytes {
717 fn eq(&self, other: &str) -> bool {
718 self.as_slice() == other.as_bytes()
719 }
720}
721
722impl PartialOrd<str> for Bytes {
723 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
724 self.as_slice().partial_cmp(other.as_bytes())
725 }
726}
727
728impl PartialEq<Bytes> for str {
729 fn eq(&self, other: &Bytes) -> bool {
730 *other == *self
731 }
732}
733
734impl PartialOrd<Bytes> for str {
735 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
736 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
737 }
738}
739
740impl PartialEq<Vec<u8>> for Bytes {
741 fn eq(&self, other: &Vec<u8>) -> bool {
742 *self == other[..]
743 }
744}
745
746impl PartialOrd<Vec<u8>> for Bytes {
747 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
748 self.as_slice().partial_cmp(&other[..])
749 }
750}
751
752impl PartialEq<Bytes> for Vec<u8> {
753 fn eq(&self, other: &Bytes) -> bool {
754 *other == *self
755 }
756}
757
758impl PartialOrd<Bytes> for Vec<u8> {
759 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
760 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
761 }
762}
763
764impl PartialEq<String> for Bytes {
765 fn eq(&self, other: &String) -> bool {
766 *self == other[..]
767 }
768}
769
770impl PartialOrd<String> for Bytes {
771 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
772 self.as_slice().partial_cmp(other.as_bytes())
773 }
774}
775
776impl PartialEq<Bytes> for String {
777 fn eq(&self, other: &Bytes) -> bool {
778 *other == *self
779 }
780}
781
782impl PartialOrd<Bytes> for String {
783 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
784 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
785 }
786}
787
788impl PartialEq<Bytes> for &[u8] {
789 fn eq(&self, other: &Bytes) -> bool {
790 *other == *self
791 }
792}
793
794impl PartialOrd<Bytes> for &[u8] {
795 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
796 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
797 }
798}
799
800impl PartialEq<Bytes> for &str {
801 fn eq(&self, other: &Bytes) -> bool {
802 *other == *self
803 }
804}
805
806impl PartialOrd<Bytes> for &str {
807 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
808 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
809 }
810}
811
812impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
813where
814 Bytes: PartialEq<T>,
815{
816 fn eq(&self, other: &&'a T) -> bool {
817 *self == **other
818 }
819}
820
821impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
822where
823 Bytes: PartialOrd<T>,
824{
825 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
826 self.partial_cmp(&**other)
827 }
828}
829
830impl Default for Bytes {
833 #[inline]
834 fn default() -> Bytes {
835 Bytes::new()
836 }
837}
838
839impl From<&'static [u8]> for Bytes {
840 fn from(slice: &'static [u8]) -> Bytes {
841 Bytes::from_static(slice)
842 }
843}
844
845impl From<&'static str> for Bytes {
846 fn from(slice: &'static str) -> Bytes {
847 Bytes::from_static(slice.as_bytes())
848 }
849}
850
851impl From<Vec<u8>> for Bytes {
852 fn from(vec: Vec<u8>) -> Bytes {
853 let mut vec = ManuallyDrop::new(vec);
854 let ptr = vec.as_mut_ptr();
855 let len = vec.len();
856 let cap = vec.capacity();
857
858 if len == cap {
860 let vec = ManuallyDrop::into_inner(vec);
861 return Bytes::from(vec.into_boxed_slice());
862 }
863
864 let shared = Box::new(Shared {
865 buf: ptr,
866 cap,
867 ref_cnt: AtomicUsize::new(1),
868 });
869
870 let shared = Box::into_raw(shared);
871 debug_assert!(
874 0 == (shared as usize & KIND_MASK),
875 "internal: Box<Shared> should have an aligned pointer",
876 );
877 Bytes {
878 ptr,
879 len,
880 data: AtomicPtr::new(shared as _),
881 vtable: &SHARED_VTABLE,
882 }
883 }
884}
885
886impl From<Box<[u8]>> for Bytes {
887 fn from(slice: Box<[u8]>) -> Bytes {
888 if slice.is_empty() {
892 return Bytes::new();
893 }
894
895 let len = slice.len();
896 let ptr = Box::into_raw(slice) as *mut u8;
897
898 if ptr as usize & 0x1 == 0 {
899 let data = ptr_map(ptr, |addr| addr | KIND_VEC);
900 Bytes {
901 ptr,
902 len,
903 data: AtomicPtr::new(data.cast()),
904 vtable: &PROMOTABLE_EVEN_VTABLE,
905 }
906 } else {
907 Bytes {
908 ptr,
909 len,
910 data: AtomicPtr::new(ptr.cast()),
911 vtable: &PROMOTABLE_ODD_VTABLE,
912 }
913 }
914 }
915}
916
917impl From<Bytes> for BytesMut {
918 fn from(bytes: Bytes) -> Self {
934 let bytes = ManuallyDrop::new(bytes);
935 unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) }
936 }
937}
938
939impl From<String> for Bytes {
940 fn from(s: String) -> Bytes {
941 Bytes::from(s.into_bytes())
942 }
943}
944
945impl From<Bytes> for Vec<u8> {
946 fn from(bytes: Bytes) -> Vec<u8> {
947 let bytes = ManuallyDrop::new(bytes);
948 unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
949 }
950}
951
952impl fmt::Debug for Vtable {
955 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
956 f.debug_struct("Vtable")
957 .field("clone", &(self.clone as *const ()))
958 .field("drop", &(self.drop as *const ()))
959 .finish()
960 }
961}
962
963const STATIC_VTABLE: Vtable = Vtable {
966 clone: static_clone,
967 to_vec: static_to_vec,
968 to_mut: static_to_mut,
969 is_unique: static_is_unique,
970 drop: static_drop,
971};
972
973unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
974 let slice = slice::from_raw_parts(ptr, len);
975 Bytes::from_static(slice)
976}
977
978unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
979 let slice = slice::from_raw_parts(ptr, len);
980 slice.to_vec()
981}
982
983unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
984 let slice = slice::from_raw_parts(ptr, len);
985 BytesMut::from(slice)
986}
987
988fn static_is_unique(_: &AtomicPtr<()>) -> bool {
989 false
990}
991
992unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
993 }
995
996static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
999 clone: promotable_even_clone,
1000 to_vec: promotable_even_to_vec,
1001 to_mut: promotable_even_to_mut,
1002 is_unique: promotable_is_unique,
1003 drop: promotable_even_drop,
1004};
1005
1006static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1007 clone: promotable_odd_clone,
1008 to_vec: promotable_odd_to_vec,
1009 to_mut: promotable_odd_to_mut,
1010 is_unique: promotable_is_unique,
1011 drop: promotable_odd_drop,
1012};
1013
1014unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1015 let shared = data.load(Ordering::Acquire);
1016 let kind = shared as usize & KIND_MASK;
1017
1018 if kind == KIND_ARC {
1019 shallow_clone_arc(shared.cast(), ptr, len)
1020 } else {
1021 debug_assert_eq!(kind, KIND_VEC);
1022 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1023 shallow_clone_vec(data, shared, buf, ptr, len)
1024 }
1025}
1026
1027unsafe fn promotable_to_vec(
1028 data: &AtomicPtr<()>,
1029 ptr: *const u8,
1030 len: usize,
1031 f: fn(*mut ()) -> *mut u8,
1032) -> Vec<u8> {
1033 let shared = data.load(Ordering::Acquire);
1034 let kind = shared as usize & KIND_MASK;
1035
1036 if kind == KIND_ARC {
1037 shared_to_vec_impl(shared.cast(), ptr, len)
1038 } else {
1039 debug_assert_eq!(kind, KIND_VEC);
1041
1042 let buf = f(shared);
1043
1044 let cap = offset_from(ptr, buf) + len;
1045
1046 ptr::copy(ptr, buf, len);
1048
1049 Vec::from_raw_parts(buf, len, cap)
1050 }
1051}
1052
1053unsafe fn promotable_to_mut(
1054 data: &AtomicPtr<()>,
1055 ptr: *const u8,
1056 len: usize,
1057 f: fn(*mut ()) -> *mut u8,
1058) -> BytesMut {
1059 let shared = data.load(Ordering::Acquire);
1060 let kind = shared as usize & KIND_MASK;
1061
1062 if kind == KIND_ARC {
1063 shared_to_mut_impl(shared.cast(), ptr, len)
1064 } else {
1065 debug_assert_eq!(kind, KIND_VEC);
1070
1071 let buf = f(shared);
1072 let off = offset_from(ptr, buf);
1073 let cap = off + len;
1074 let v = Vec::from_raw_parts(buf, cap, cap);
1075
1076 let mut b = BytesMut::from_vec(v);
1077 b.advance_unchecked(off);
1078 b
1079 }
1080}
1081
1082unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1083 promotable_to_vec(data, ptr, len, |shared| {
1084 ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1085 })
1086}
1087
1088unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1089 promotable_to_mut(data, ptr, len, |shared| {
1090 ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1091 })
1092}
1093
1094unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1095 data.with_mut(|shared| {
1096 let shared = *shared;
1097 let kind = shared as usize & KIND_MASK;
1098
1099 if kind == KIND_ARC {
1100 release_shared(shared.cast());
1101 } else {
1102 debug_assert_eq!(kind, KIND_VEC);
1103 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1104 free_boxed_slice(buf, ptr, len);
1105 }
1106 });
1107}
1108
1109unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1110 let shared = data.load(Ordering::Acquire);
1111 let kind = shared as usize & KIND_MASK;
1112
1113 if kind == KIND_ARC {
1114 shallow_clone_arc(shared as _, ptr, len)
1115 } else {
1116 debug_assert_eq!(kind, KIND_VEC);
1117 shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1118 }
1119}
1120
1121unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1122 promotable_to_vec(data, ptr, len, |shared| shared.cast())
1123}
1124
1125unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1126 promotable_to_mut(data, ptr, len, |shared| shared.cast())
1127}
1128
1129unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1130 data.with_mut(|shared| {
1131 let shared = *shared;
1132 let kind = shared as usize & KIND_MASK;
1133
1134 if kind == KIND_ARC {
1135 release_shared(shared.cast());
1136 } else {
1137 debug_assert_eq!(kind, KIND_VEC);
1138
1139 free_boxed_slice(shared.cast(), ptr, len);
1140 }
1141 });
1142}
1143
1144unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1145 let shared = data.load(Ordering::Acquire);
1146 let kind = shared as usize & KIND_MASK;
1147
1148 if kind == KIND_ARC {
1149 let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1150 ref_cnt == 1
1151 } else {
1152 true
1153 }
1154}
1155
1156unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1157 let cap = offset_from(offset, buf) + len;
1158 dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1159}
1160
1161struct Shared {
1164 buf: *mut u8,
1166 cap: usize,
1167 ref_cnt: AtomicUsize,
1168}
1169
1170impl Drop for Shared {
1171 fn drop(&mut self) {
1172 unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1173 }
1174}
1175
1176const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; static SHARED_VTABLE: Vtable = Vtable {
1183 clone: shared_clone,
1184 to_vec: shared_to_vec,
1185 to_mut: shared_to_mut,
1186 is_unique: shared_is_unique,
1187 drop: shared_drop,
1188};
1189
1190const KIND_ARC: usize = 0b0;
1191const KIND_VEC: usize = 0b1;
1192const KIND_MASK: usize = 0b1;
1193
1194unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1195 let shared = data.load(Ordering::Relaxed);
1196 shallow_clone_arc(shared as _, ptr, len)
1197}
1198
1199unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1200 if (*shared)
1207 .ref_cnt
1208 .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1209 .is_ok()
1210 {
1211 let shared = *Box::from_raw(shared);
1213 let shared = ManuallyDrop::new(shared);
1214 let buf = shared.buf;
1215 let cap = shared.cap;
1216
1217 ptr::copy(ptr, buf, len);
1219
1220 Vec::from_raw_parts(buf, len, cap)
1221 } else {
1222 let v = slice::from_raw_parts(ptr, len).to_vec();
1223 release_shared(shared);
1224 v
1225 }
1226}
1227
1228unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1229 shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1230}
1231
1232unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1233 if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1246 let shared = *Box::from_raw(shared);
1248 let shared = ManuallyDrop::new(shared);
1249 let buf = shared.buf;
1250 let cap = shared.cap;
1251
1252 let off = offset_from(ptr, buf);
1254 let v = Vec::from_raw_parts(buf, len + off, cap);
1255
1256 let mut b = BytesMut::from_vec(v);
1257 b.advance_unchecked(off);
1258 b
1259 } else {
1260 let v = slice::from_raw_parts(ptr, len).to_vec();
1262 release_shared(shared);
1263 BytesMut::from_vec(v)
1264 }
1265}
1266
1267unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1268 shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1269}
1270
1271pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1272 let shared = data.load(Ordering::Acquire);
1273 let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1274 ref_cnt == 1
1275}
1276
1277unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1278 data.with_mut(|shared| {
1279 release_shared(shared.cast());
1280 });
1281}
1282
1283unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1284 let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1285
1286 if old_size > usize::MAX >> 1 {
1287 crate::abort();
1288 }
1289
1290 Bytes {
1291 ptr,
1292 len,
1293 data: AtomicPtr::new(shared as _),
1294 vtable: &SHARED_VTABLE,
1295 }
1296}
1297
1298#[cold]
1299unsafe fn shallow_clone_vec(
1300 atom: &AtomicPtr<()>,
1301 ptr: *const (),
1302 buf: *mut u8,
1303 offset: *const u8,
1304 len: usize,
1305) -> Bytes {
1306 let shared = Box::new(Shared {
1318 buf,
1319 cap: offset_from(offset, buf) + len,
1320 ref_cnt: AtomicUsize::new(2),
1324 });
1325
1326 let shared = Box::into_raw(shared);
1327
1328 debug_assert!(
1331 0 == (shared as usize & KIND_MASK),
1332 "internal: Box<Shared> should have an aligned pointer",
1333 );
1334
1335 match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1345 Ok(actual) => {
1346 debug_assert!(actual as usize == ptr as usize);
1347 Bytes {
1350 ptr: offset,
1351 len,
1352 data: AtomicPtr::new(shared as _),
1353 vtable: &SHARED_VTABLE,
1354 }
1355 }
1356 Err(actual) => {
1357 let shared = Box::from_raw(shared);
1361 mem::forget(*shared);
1362
1363 shallow_clone_arc(actual as _, offset, len)
1366 }
1367 }
1368}
1369
1370unsafe fn release_shared(ptr: *mut Shared) {
1371 if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1373 return;
1374 }
1375
1376 (*ptr).ref_cnt.load(Ordering::Acquire);
1397
1398 drop(Box::from_raw(ptr));
1400}
1401
1402#[cfg(miri)]
1409fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1410where
1411 F: FnOnce(usize) -> usize,
1412{
1413 let old_addr = ptr as usize;
1414 let new_addr = f(old_addr);
1415 let diff = new_addr.wrapping_sub(old_addr);
1416 ptr.wrapping_add(diff)
1417}
1418
1419#[cfg(not(miri))]
1420fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1421where
1422 F: FnOnce(usize) -> usize,
1423{
1424 let old_addr = ptr as usize;
1425 let new_addr = f(old_addr);
1426 new_addr as *mut u8
1427}
1428
1429fn _split_to_must_use() {}
1440
1441fn _split_off_must_use() {}
1450
1451#[cfg(all(test, loom))]
1453mod fuzz {
1454 use loom::sync::Arc;
1455 use loom::thread;
1456
1457 use super::Bytes;
1458 #[test]
1459 fn bytes_cloning_vec() {
1460 loom::model(|| {
1461 let a = Bytes::from(b"abcdefgh".to_vec());
1462 let addr = a.as_ptr() as usize;
1463
1464 let a1 = Arc::new(a);
1466 let a2 = a1.clone();
1467
1468 let t1 = thread::spawn(move || {
1469 let b: Bytes = (*a1).clone();
1470 assert_eq!(b.as_ptr() as usize, addr);
1471 });
1472
1473 let t2 = thread::spawn(move || {
1474 let b: Bytes = (*a2).clone();
1475 assert_eq!(b.as_ptr() as usize, addr);
1476 });
1477
1478 t1.join().unwrap();
1479 t2.join().unwrap();
1480 });
1481 }
1482}