bytes/
bytes.rs

1use core::iter::FromIterator;
2use core::mem::{self, ManuallyDrop};
3use core::ops::{Deref, RangeBounds};
4use core::{cmp, fmt, hash, ptr, slice, usize};
5
6use alloc::{
7    alloc::{dealloc, Layout},
8    borrow::Borrow,
9    boxed::Box,
10    string::String,
11    vec::Vec,
12};
13
14use crate::buf::IntoIter;
15#[allow(unused)]
16use crate::loom::sync::atomic::AtomicMut;
17use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
18use crate::{offset_from, Buf, BytesMut};
19
20/// A cheaply cloneable and sliceable chunk of contiguous memory.
21///
22/// `Bytes` is an efficient container for storing and operating on contiguous
23/// slices of memory. It is intended for use primarily in networking code, but
24/// could have applications elsewhere as well.
25///
26/// `Bytes` values facilitate zero-copy network programming by allowing multiple
27/// `Bytes` objects to point to the same underlying memory.
28///
29/// `Bytes` does not have a single implementation. It is an interface, whose
30/// exact behavior is implemented through dynamic dispatch in several underlying
31/// implementations of `Bytes`.
32///
33/// All `Bytes` implementations must fulfill the following requirements:
34/// - They are cheaply cloneable and thereby shareable between an unlimited amount
35///   of components, for example by modifying a reference count.
36/// - Instances can be sliced to refer to a subset of the original buffer.
37///
38/// ```
39/// use bytes::Bytes;
40///
41/// let mut mem = Bytes::from("Hello world");
42/// let a = mem.slice(0..5);
43///
44/// assert_eq!(a, "Hello");
45///
46/// let b = mem.split_to(6);
47///
48/// assert_eq!(mem, "world");
49/// assert_eq!(b, "Hello ");
50/// ```
51///
52/// # Memory layout
53///
54/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used
55/// to track information about which segment of the underlying memory the
56/// `Bytes` handle has access to.
57///
58/// `Bytes` keeps both a pointer to the shared state containing the full memory
59/// slice and a pointer to the start of the region visible by the handle.
60/// `Bytes` also tracks the length of its view into the memory.
61///
62/// # Sharing
63///
64/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
65/// how sharing/cloning is implemented in detail.
66/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
67/// cloning the backing storage in order to share it behind multiple `Bytes`
68/// instances.
69///
70/// For `Bytes` implementations which refer to constant memory (e.g. created
71/// via `Bytes::from_static()`) the cloning implementation will be a no-op.
72///
73/// For `Bytes` implementations which point to a reference counted shared storage
74/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the
75/// reference count.
76///
77/// Due to this mechanism, multiple `Bytes` instances may point to the same
78/// shared memory region.
79/// Each `Bytes` instance can point to different sections within that
80/// memory region, and `Bytes` instances may or may not have overlapping views
81/// into the memory.
82///
83/// The following diagram visualizes a scenario where 2 `Bytes` instances make
84/// use of an `Arc`-based backing storage, and provide access to different views:
85///
86/// ```text
87///
88///    Arc ptrs                   ┌─────────┐
89///    ________________________ / │ Bytes 2 │
90///   /                           └─────────┘
91///  /          ┌───────────┐     |         |
92/// |_________/ │  Bytes 1  │     |         |
93/// |           └───────────┘     |         |
94/// |           |           | ___/ data     | tail
95/// |      data |      tail |/              |
96/// v           v           v               v
97/// ┌─────┬─────┬───────────┬───────────────┬─────┐
98/// │ Arc │     │           │               │     │
99/// └─────┴─────┴───────────┴───────────────┴─────┘
100/// ```
101pub struct Bytes {
102    ptr: *const u8,
103    len: usize,
104    // inlined "trait object"
105    data: AtomicPtr<()>,
106    vtable: &'static Vtable,
107}
108
109pub(crate) struct Vtable {
110    /// fn(data, ptr, len)
111    pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
112    /// fn(data, ptr, len)
113    ///
114    /// takes `Bytes` to value
115    pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
116    pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
117    /// fn(data)
118    pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
119    /// fn(data, ptr, len)
120    pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
121}
122
123impl Bytes {
124    /// Creates a new empty `Bytes`.
125    ///
126    /// This will not allocate and the returned `Bytes` handle will be empty.
127    ///
128    /// # Examples
129    ///
130    /// ```
131    /// use bytes::Bytes;
132    ///
133    /// let b = Bytes::new();
134    /// assert_eq!(&b[..], b"");
135    /// ```
136    #[inline]
137    #[cfg(not(all(loom, test)))]
138    pub const fn new() -> Self {
139        // Make it a named const to work around
140        // "unsizing casts are not allowed in const fn"
141        const EMPTY: &[u8] = &[];
142        Bytes::from_static(EMPTY)
143    }
144
145    /// Creates a new empty `Bytes`.
146    #[cfg(all(loom, test))]
147    pub fn new() -> Self {
148        const EMPTY: &[u8] = &[];
149        Bytes::from_static(EMPTY)
150    }
151
152    /// Creates a new `Bytes` from a static slice.
153    ///
154    /// The returned `Bytes` will point directly to the static slice. There is
155    /// no allocating or copying.
156    ///
157    /// # Examples
158    ///
159    /// ```
160    /// use bytes::Bytes;
161    ///
162    /// let b = Bytes::from_static(b"hello");
163    /// assert_eq!(&b[..], b"hello");
164    /// ```
165    #[inline]
166    #[cfg(not(all(loom, test)))]
167    pub const fn from_static(bytes: &'static [u8]) -> Self {
168        Bytes {
169            ptr: bytes.as_ptr(),
170            len: bytes.len(),
171            data: AtomicPtr::new(ptr::null_mut()),
172            vtable: &STATIC_VTABLE,
173        }
174    }
175
176    /// Creates a new `Bytes` from a static slice.
177    #[cfg(all(loom, test))]
178    pub fn from_static(bytes: &'static [u8]) -> Self {
179        Bytes {
180            ptr: bytes.as_ptr(),
181            len: bytes.len(),
182            data: AtomicPtr::new(ptr::null_mut()),
183            vtable: &STATIC_VTABLE,
184        }
185    }
186
187    /// Returns the number of bytes contained in this `Bytes`.
188    ///
189    /// # Examples
190    ///
191    /// ```
192    /// use bytes::Bytes;
193    ///
194    /// let b = Bytes::from(&b"hello"[..]);
195    /// assert_eq!(b.len(), 5);
196    /// ```
197    #[inline]
198    pub const fn len(&self) -> usize {
199        self.len
200    }
201
202    /// Returns true if the `Bytes` has a length of 0.
203    ///
204    /// # Examples
205    ///
206    /// ```
207    /// use bytes::Bytes;
208    ///
209    /// let b = Bytes::new();
210    /// assert!(b.is_empty());
211    /// ```
212    #[inline]
213    pub const fn is_empty(&self) -> bool {
214        self.len == 0
215    }
216
217    /// Returns true if this is the only reference to the data.
218    ///
219    /// Always returns false if the data is backed by a static slice.
220    ///
221    /// The result of this method may be invalidated immediately if another
222    /// thread clones this value while this is being called. Ensure you have
223    /// unique access to this value (`&mut Bytes`) first if you need to be
224    /// certain the result is valid (i.e. for safety reasons)
225    /// # Examples
226    ///
227    /// ```
228    /// use bytes::Bytes;
229    ///
230    /// let a = Bytes::from(vec![1, 2, 3]);
231    /// assert!(a.is_unique());
232    /// let b = a.clone();
233    /// assert!(!a.is_unique());
234    /// ```
235    pub fn is_unique(&self) -> bool {
236        unsafe { (self.vtable.is_unique)(&self.data) }
237    }
238
239    /// Creates `Bytes` instance from slice, by copying it.
240    pub fn copy_from_slice(data: &[u8]) -> Self {
241        data.to_vec().into()
242    }
243
244    /// Returns a slice of self for the provided range.
245    ///
246    /// This will increment the reference count for the underlying memory and
247    /// return a new `Bytes` handle set to the slice.
248    ///
249    /// This operation is `O(1)`.
250    ///
251    /// # Examples
252    ///
253    /// ```
254    /// use bytes::Bytes;
255    ///
256    /// let a = Bytes::from(&b"hello world"[..]);
257    /// let b = a.slice(2..5);
258    ///
259    /// assert_eq!(&b[..], b"llo");
260    /// ```
261    ///
262    /// # Panics
263    ///
264    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
265    /// will panic.
266    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
267        use core::ops::Bound;
268
269        let len = self.len();
270
271        let begin = match range.start_bound() {
272            Bound::Included(&n) => n,
273            Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
274            Bound::Unbounded => 0,
275        };
276
277        let end = match range.end_bound() {
278            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
279            Bound::Excluded(&n) => n,
280            Bound::Unbounded => len,
281        };
282
283        assert!(
284            begin <= end,
285            "range start must not be greater than end: {:?} <= {:?}",
286            begin,
287            end,
288        );
289        assert!(
290            end <= len,
291            "range end out of bounds: {:?} <= {:?}",
292            end,
293            len,
294        );
295
296        if end == begin {
297            return Bytes::new();
298        }
299
300        let mut ret = self.clone();
301
302        ret.len = end - begin;
303        ret.ptr = unsafe { ret.ptr.add(begin) };
304
305        ret
306    }
307
308    /// Returns a slice of self that is equivalent to the given `subset`.
309    ///
310    /// When processing a `Bytes` buffer with other tools, one often gets a
311    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
312    /// This function turns that `&[u8]` into another `Bytes`, as if one had
313    /// called `self.slice()` with the offsets that correspond to `subset`.
314    ///
315    /// This operation is `O(1)`.
316    ///
317    /// # Examples
318    ///
319    /// ```
320    /// use bytes::Bytes;
321    ///
322    /// let bytes = Bytes::from(&b"012345678"[..]);
323    /// let as_slice = bytes.as_ref();
324    /// let subset = &as_slice[2..6];
325    /// let subslice = bytes.slice_ref(&subset);
326    /// assert_eq!(&subslice[..], b"2345");
327    /// ```
328    ///
329    /// # Panics
330    ///
331    /// Requires that the given `sub` slice is in fact contained within the
332    /// `Bytes` buffer; otherwise this function will panic.
333    pub fn slice_ref(&self, subset: &[u8]) -> Self {
334        // Empty slice and empty Bytes may have their pointers reset
335        // so explicitly allow empty slice to be a subslice of any slice.
336        if subset.is_empty() {
337            return Bytes::new();
338        }
339
340        let bytes_p = self.as_ptr() as usize;
341        let bytes_len = self.len();
342
343        let sub_p = subset.as_ptr() as usize;
344        let sub_len = subset.len();
345
346        assert!(
347            sub_p >= bytes_p,
348            "subset pointer ({:p}) is smaller than self pointer ({:p})",
349            subset.as_ptr(),
350            self.as_ptr(),
351        );
352        assert!(
353            sub_p + sub_len <= bytes_p + bytes_len,
354            "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
355            self.as_ptr(),
356            bytes_len,
357            subset.as_ptr(),
358            sub_len,
359        );
360
361        let sub_offset = sub_p - bytes_p;
362
363        self.slice(sub_offset..(sub_offset + sub_len))
364    }
365
366    /// Splits the bytes into two at the given index.
367    ///
368    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
369    /// contains elements `[at, len)`.
370    ///
371    /// This is an `O(1)` operation that just increases the reference count and
372    /// sets a few indices.
373    ///
374    /// # Examples
375    ///
376    /// ```
377    /// use bytes::Bytes;
378    ///
379    /// let mut a = Bytes::from(&b"hello world"[..]);
380    /// let b = a.split_off(5);
381    ///
382    /// assert_eq!(&a[..], b"hello");
383    /// assert_eq!(&b[..], b" world");
384    /// ```
385    ///
386    /// # Panics
387    ///
388    /// Panics if `at > len`.
389    #[must_use = "consider Bytes::truncate if you don't need the other half"]
390    pub fn split_off(&mut self, at: usize) -> Self {
391        if at == self.len() {
392            return Bytes::new();
393        }
394
395        if at == 0 {
396            return mem::replace(self, Bytes::new());
397        }
398
399        assert!(
400            at <= self.len(),
401            "split_off out of bounds: {:?} <= {:?}",
402            at,
403            self.len(),
404        );
405
406        let mut ret = self.clone();
407
408        self.len = at;
409
410        unsafe { ret.inc_start(at) };
411
412        ret
413    }
414
415    /// Splits the bytes into two at the given index.
416    ///
417    /// Afterwards `self` contains elements `[at, len)`, and the returned
418    /// `Bytes` contains elements `[0, at)`.
419    ///
420    /// This is an `O(1)` operation that just increases the reference count and
421    /// sets a few indices.
422    ///
423    /// # Examples
424    ///
425    /// ```
426    /// use bytes::Bytes;
427    ///
428    /// let mut a = Bytes::from(&b"hello world"[..]);
429    /// let b = a.split_to(5);
430    ///
431    /// assert_eq!(&a[..], b" world");
432    /// assert_eq!(&b[..], b"hello");
433    /// ```
434    ///
435    /// # Panics
436    ///
437    /// Panics if `at > len`.
438    #[must_use = "consider Bytes::advance if you don't need the other half"]
439    pub fn split_to(&mut self, at: usize) -> Self {
440        if at == self.len() {
441            return mem::replace(self, Bytes::new());
442        }
443
444        if at == 0 {
445            return Bytes::new();
446        }
447
448        assert!(
449            at <= self.len(),
450            "split_to out of bounds: {:?} <= {:?}",
451            at,
452            self.len(),
453        );
454
455        let mut ret = self.clone();
456
457        unsafe { self.inc_start(at) };
458
459        ret.len = at;
460        ret
461    }
462
463    /// Shortens the buffer, keeping the first `len` bytes and dropping the
464    /// rest.
465    ///
466    /// If `len` is greater than the buffer's current length, this has no
467    /// effect.
468    ///
469    /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
470    /// excess bytes to be returned instead of dropped.
471    ///
472    /// # Examples
473    ///
474    /// ```
475    /// use bytes::Bytes;
476    ///
477    /// let mut buf = Bytes::from(&b"hello world"[..]);
478    /// buf.truncate(5);
479    /// assert_eq!(buf, b"hello"[..]);
480    /// ```
481    #[inline]
482    pub fn truncate(&mut self, len: usize) {
483        if len < self.len {
484            // The Vec "promotable" vtables do not store the capacity,
485            // so we cannot truncate while using this repr. We *have* to
486            // promote using `split_off` so the capacity can be stored.
487            if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
488                || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
489            {
490                drop(self.split_off(len));
491            } else {
492                self.len = len;
493            }
494        }
495    }
496
497    /// Clears the buffer, removing all data.
498    ///
499    /// # Examples
500    ///
501    /// ```
502    /// use bytes::Bytes;
503    ///
504    /// let mut buf = Bytes::from(&b"hello world"[..]);
505    /// buf.clear();
506    /// assert!(buf.is_empty());
507    /// ```
508    #[inline]
509    pub fn clear(&mut self) {
510        self.truncate(0);
511    }
512
513    /// Try to convert self into `BytesMut`.
514    ///
515    /// If `self` is unique for the entire original buffer, this will succeed
516    /// and return a `BytesMut` with the contents of `self` without copying.
517    /// If `self` is not unique for the entire original buffer, this will fail
518    /// and return self.
519    ///
520    /// # Examples
521    ///
522    /// ```
523    /// use bytes::{Bytes, BytesMut};
524    ///
525    /// let bytes = Bytes::from(b"hello".to_vec());
526    /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..])));
527    /// ```
528    pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
529        if self.is_unique() {
530            Ok(self.into())
531        } else {
532            Err(self)
533        }
534    }
535
536    #[inline]
537    pub(crate) unsafe fn with_vtable(
538        ptr: *const u8,
539        len: usize,
540        data: AtomicPtr<()>,
541        vtable: &'static Vtable,
542    ) -> Bytes {
543        Bytes {
544            ptr,
545            len,
546            data,
547            vtable,
548        }
549    }
550
551    // private
552
553    #[inline]
554    fn as_slice(&self) -> &[u8] {
555        unsafe { slice::from_raw_parts(self.ptr, self.len) }
556    }
557
558    #[inline]
559    unsafe fn inc_start(&mut self, by: usize) {
560        // should already be asserted, but debug assert for tests
561        debug_assert!(self.len >= by, "internal: inc_start out of bounds");
562        self.len -= by;
563        self.ptr = self.ptr.add(by);
564    }
565}
566
567// Vtable must enforce this behavior
568unsafe impl Send for Bytes {}
569unsafe impl Sync for Bytes {}
570
571impl Drop for Bytes {
572    #[inline]
573    fn drop(&mut self) {
574        unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
575    }
576}
577
578impl Clone for Bytes {
579    #[inline]
580    fn clone(&self) -> Bytes {
581        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
582    }
583}
584
585impl Buf for Bytes {
586    #[inline]
587    fn remaining(&self) -> usize {
588        self.len()
589    }
590
591    #[inline]
592    fn chunk(&self) -> &[u8] {
593        self.as_slice()
594    }
595
596    #[inline]
597    fn advance(&mut self, cnt: usize) {
598        assert!(
599            cnt <= self.len(),
600            "cannot advance past `remaining`: {:?} <= {:?}",
601            cnt,
602            self.len(),
603        );
604
605        unsafe {
606            self.inc_start(cnt);
607        }
608    }
609
610    fn copy_to_bytes(&mut self, len: usize) -> Self {
611        self.split_to(len)
612    }
613}
614
615impl Deref for Bytes {
616    type Target = [u8];
617
618    #[inline]
619    fn deref(&self) -> &[u8] {
620        self.as_slice()
621    }
622}
623
624impl AsRef<[u8]> for Bytes {
625    #[inline]
626    fn as_ref(&self) -> &[u8] {
627        self.as_slice()
628    }
629}
630
631impl hash::Hash for Bytes {
632    fn hash<H>(&self, state: &mut H)
633    where
634        H: hash::Hasher,
635    {
636        self.as_slice().hash(state);
637    }
638}
639
640impl Borrow<[u8]> for Bytes {
641    fn borrow(&self) -> &[u8] {
642        self.as_slice()
643    }
644}
645
646impl IntoIterator for Bytes {
647    type Item = u8;
648    type IntoIter = IntoIter<Bytes>;
649
650    fn into_iter(self) -> Self::IntoIter {
651        IntoIter::new(self)
652    }
653}
654
655impl<'a> IntoIterator for &'a Bytes {
656    type Item = &'a u8;
657    type IntoIter = core::slice::Iter<'a, u8>;
658
659    fn into_iter(self) -> Self::IntoIter {
660        self.as_slice().iter()
661    }
662}
663
664impl FromIterator<u8> for Bytes {
665    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
666        Vec::from_iter(into_iter).into()
667    }
668}
669
670// impl Eq
671
672impl PartialEq for Bytes {
673    fn eq(&self, other: &Bytes) -> bool {
674        self.as_slice() == other.as_slice()
675    }
676}
677
678impl PartialOrd for Bytes {
679    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
680        self.as_slice().partial_cmp(other.as_slice())
681    }
682}
683
684impl Ord for Bytes {
685    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
686        self.as_slice().cmp(other.as_slice())
687    }
688}
689
690impl Eq for Bytes {}
691
692impl PartialEq<[u8]> for Bytes {
693    fn eq(&self, other: &[u8]) -> bool {
694        self.as_slice() == other
695    }
696}
697
698impl PartialOrd<[u8]> for Bytes {
699    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
700        self.as_slice().partial_cmp(other)
701    }
702}
703
704impl PartialEq<Bytes> for [u8] {
705    fn eq(&self, other: &Bytes) -> bool {
706        *other == *self
707    }
708}
709
710impl PartialOrd<Bytes> for [u8] {
711    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
712        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
713    }
714}
715
716impl PartialEq<str> for Bytes {
717    fn eq(&self, other: &str) -> bool {
718        self.as_slice() == other.as_bytes()
719    }
720}
721
722impl PartialOrd<str> for Bytes {
723    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
724        self.as_slice().partial_cmp(other.as_bytes())
725    }
726}
727
728impl PartialEq<Bytes> for str {
729    fn eq(&self, other: &Bytes) -> bool {
730        *other == *self
731    }
732}
733
734impl PartialOrd<Bytes> for str {
735    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
736        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
737    }
738}
739
740impl PartialEq<Vec<u8>> for Bytes {
741    fn eq(&self, other: &Vec<u8>) -> bool {
742        *self == other[..]
743    }
744}
745
746impl PartialOrd<Vec<u8>> for Bytes {
747    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
748        self.as_slice().partial_cmp(&other[..])
749    }
750}
751
752impl PartialEq<Bytes> for Vec<u8> {
753    fn eq(&self, other: &Bytes) -> bool {
754        *other == *self
755    }
756}
757
758impl PartialOrd<Bytes> for Vec<u8> {
759    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
760        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
761    }
762}
763
764impl PartialEq<String> for Bytes {
765    fn eq(&self, other: &String) -> bool {
766        *self == other[..]
767    }
768}
769
770impl PartialOrd<String> for Bytes {
771    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
772        self.as_slice().partial_cmp(other.as_bytes())
773    }
774}
775
776impl PartialEq<Bytes> for String {
777    fn eq(&self, other: &Bytes) -> bool {
778        *other == *self
779    }
780}
781
782impl PartialOrd<Bytes> for String {
783    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
784        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
785    }
786}
787
788impl PartialEq<Bytes> for &[u8] {
789    fn eq(&self, other: &Bytes) -> bool {
790        *other == *self
791    }
792}
793
794impl PartialOrd<Bytes> for &[u8] {
795    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
796        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
797    }
798}
799
800impl PartialEq<Bytes> for &str {
801    fn eq(&self, other: &Bytes) -> bool {
802        *other == *self
803    }
804}
805
806impl PartialOrd<Bytes> for &str {
807    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
808        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
809    }
810}
811
812impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
813where
814    Bytes: PartialEq<T>,
815{
816    fn eq(&self, other: &&'a T) -> bool {
817        *self == **other
818    }
819}
820
821impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
822where
823    Bytes: PartialOrd<T>,
824{
825    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
826        self.partial_cmp(&**other)
827    }
828}
829
830// impl From
831
832impl Default for Bytes {
833    #[inline]
834    fn default() -> Bytes {
835        Bytes::new()
836    }
837}
838
839impl From<&'static [u8]> for Bytes {
840    fn from(slice: &'static [u8]) -> Bytes {
841        Bytes::from_static(slice)
842    }
843}
844
845impl From<&'static str> for Bytes {
846    fn from(slice: &'static str) -> Bytes {
847        Bytes::from_static(slice.as_bytes())
848    }
849}
850
851impl From<Vec<u8>> for Bytes {
852    fn from(vec: Vec<u8>) -> Bytes {
853        let mut vec = ManuallyDrop::new(vec);
854        let ptr = vec.as_mut_ptr();
855        let len = vec.len();
856        let cap = vec.capacity();
857
858        // Avoid an extra allocation if possible.
859        if len == cap {
860            let vec = ManuallyDrop::into_inner(vec);
861            return Bytes::from(vec.into_boxed_slice());
862        }
863
864        let shared = Box::new(Shared {
865            buf: ptr,
866            cap,
867            ref_cnt: AtomicUsize::new(1),
868        });
869
870        let shared = Box::into_raw(shared);
871        // The pointer should be aligned, so this assert should
872        // always succeed.
873        debug_assert!(
874            0 == (shared as usize & KIND_MASK),
875            "internal: Box<Shared> should have an aligned pointer",
876        );
877        Bytes {
878            ptr,
879            len,
880            data: AtomicPtr::new(shared as _),
881            vtable: &SHARED_VTABLE,
882        }
883    }
884}
885
886impl From<Box<[u8]>> for Bytes {
887    fn from(slice: Box<[u8]>) -> Bytes {
888        // Box<[u8]> doesn't contain a heap allocation for empty slices,
889        // so the pointer isn't aligned enough for the KIND_VEC stashing to
890        // work.
891        if slice.is_empty() {
892            return Bytes::new();
893        }
894
895        let len = slice.len();
896        let ptr = Box::into_raw(slice) as *mut u8;
897
898        if ptr as usize & 0x1 == 0 {
899            let data = ptr_map(ptr, |addr| addr | KIND_VEC);
900            Bytes {
901                ptr,
902                len,
903                data: AtomicPtr::new(data.cast()),
904                vtable: &PROMOTABLE_EVEN_VTABLE,
905            }
906        } else {
907            Bytes {
908                ptr,
909                len,
910                data: AtomicPtr::new(ptr.cast()),
911                vtable: &PROMOTABLE_ODD_VTABLE,
912            }
913        }
914    }
915}
916
917impl From<Bytes> for BytesMut {
918    /// Convert self into `BytesMut`.
919    ///
920    /// If `bytes` is unique for the entire original buffer, this will return a
921    /// `BytesMut` with the contents of `bytes` without copying.
922    /// If `bytes` is not unique for the entire original buffer, this will make
923    /// a copy of `bytes` subset of the original buffer in a new `BytesMut`.
924    ///
925    /// # Examples
926    ///
927    /// ```
928    /// use bytes::{Bytes, BytesMut};
929    ///
930    /// let bytes = Bytes::from(b"hello".to_vec());
931    /// assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..]));
932    /// ```
933    fn from(bytes: Bytes) -> Self {
934        let bytes = ManuallyDrop::new(bytes);
935        unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) }
936    }
937}
938
939impl From<String> for Bytes {
940    fn from(s: String) -> Bytes {
941        Bytes::from(s.into_bytes())
942    }
943}
944
945impl From<Bytes> for Vec<u8> {
946    fn from(bytes: Bytes) -> Vec<u8> {
947        let bytes = ManuallyDrop::new(bytes);
948        unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
949    }
950}
951
952// ===== impl Vtable =====
953
954impl fmt::Debug for Vtable {
955    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
956        f.debug_struct("Vtable")
957            .field("clone", &(self.clone as *const ()))
958            .field("drop", &(self.drop as *const ()))
959            .finish()
960    }
961}
962
963// ===== impl StaticVtable =====
964
965const STATIC_VTABLE: Vtable = Vtable {
966    clone: static_clone,
967    to_vec: static_to_vec,
968    to_mut: static_to_mut,
969    is_unique: static_is_unique,
970    drop: static_drop,
971};
972
973unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
974    let slice = slice::from_raw_parts(ptr, len);
975    Bytes::from_static(slice)
976}
977
978unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
979    let slice = slice::from_raw_parts(ptr, len);
980    slice.to_vec()
981}
982
983unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
984    let slice = slice::from_raw_parts(ptr, len);
985    BytesMut::from(slice)
986}
987
988fn static_is_unique(_: &AtomicPtr<()>) -> bool {
989    false
990}
991
992unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
993    // nothing to drop for &'static [u8]
994}
995
996// ===== impl PromotableVtable =====
997
998static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
999    clone: promotable_even_clone,
1000    to_vec: promotable_even_to_vec,
1001    to_mut: promotable_even_to_mut,
1002    is_unique: promotable_is_unique,
1003    drop: promotable_even_drop,
1004};
1005
1006static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1007    clone: promotable_odd_clone,
1008    to_vec: promotable_odd_to_vec,
1009    to_mut: promotable_odd_to_mut,
1010    is_unique: promotable_is_unique,
1011    drop: promotable_odd_drop,
1012};
1013
1014unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1015    let shared = data.load(Ordering::Acquire);
1016    let kind = shared as usize & KIND_MASK;
1017
1018    if kind == KIND_ARC {
1019        shallow_clone_arc(shared.cast(), ptr, len)
1020    } else {
1021        debug_assert_eq!(kind, KIND_VEC);
1022        let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1023        shallow_clone_vec(data, shared, buf, ptr, len)
1024    }
1025}
1026
1027unsafe fn promotable_to_vec(
1028    data: &AtomicPtr<()>,
1029    ptr: *const u8,
1030    len: usize,
1031    f: fn(*mut ()) -> *mut u8,
1032) -> Vec<u8> {
1033    let shared = data.load(Ordering::Acquire);
1034    let kind = shared as usize & KIND_MASK;
1035
1036    if kind == KIND_ARC {
1037        shared_to_vec_impl(shared.cast(), ptr, len)
1038    } else {
1039        // If Bytes holds a Vec, then the offset must be 0.
1040        debug_assert_eq!(kind, KIND_VEC);
1041
1042        let buf = f(shared);
1043
1044        let cap = offset_from(ptr, buf) + len;
1045
1046        // Copy back buffer
1047        ptr::copy(ptr, buf, len);
1048
1049        Vec::from_raw_parts(buf, len, cap)
1050    }
1051}
1052
1053unsafe fn promotable_to_mut(
1054    data: &AtomicPtr<()>,
1055    ptr: *const u8,
1056    len: usize,
1057    f: fn(*mut ()) -> *mut u8,
1058) -> BytesMut {
1059    let shared = data.load(Ordering::Acquire);
1060    let kind = shared as usize & KIND_MASK;
1061
1062    if kind == KIND_ARC {
1063        shared_to_mut_impl(shared.cast(), ptr, len)
1064    } else {
1065        // KIND_VEC is a view of an underlying buffer at a certain offset.
1066        // The ptr + len always represents the end of that buffer.
1067        // Before truncating it, it is first promoted to KIND_ARC.
1068        // Thus, we can safely reconstruct a Vec from it without leaking memory.
1069        debug_assert_eq!(kind, KIND_VEC);
1070
1071        let buf = f(shared);
1072        let off = offset_from(ptr, buf);
1073        let cap = off + len;
1074        let v = Vec::from_raw_parts(buf, cap, cap);
1075
1076        let mut b = BytesMut::from_vec(v);
1077        b.advance_unchecked(off);
1078        b
1079    }
1080}
1081
1082unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1083    promotable_to_vec(data, ptr, len, |shared| {
1084        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1085    })
1086}
1087
1088unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1089    promotable_to_mut(data, ptr, len, |shared| {
1090        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1091    })
1092}
1093
1094unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1095    data.with_mut(|shared| {
1096        let shared = *shared;
1097        let kind = shared as usize & KIND_MASK;
1098
1099        if kind == KIND_ARC {
1100            release_shared(shared.cast());
1101        } else {
1102            debug_assert_eq!(kind, KIND_VEC);
1103            let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1104            free_boxed_slice(buf, ptr, len);
1105        }
1106    });
1107}
1108
1109unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1110    let shared = data.load(Ordering::Acquire);
1111    let kind = shared as usize & KIND_MASK;
1112
1113    if kind == KIND_ARC {
1114        shallow_clone_arc(shared as _, ptr, len)
1115    } else {
1116        debug_assert_eq!(kind, KIND_VEC);
1117        shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1118    }
1119}
1120
1121unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1122    promotable_to_vec(data, ptr, len, |shared| shared.cast())
1123}
1124
1125unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1126    promotable_to_mut(data, ptr, len, |shared| shared.cast())
1127}
1128
1129unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1130    data.with_mut(|shared| {
1131        let shared = *shared;
1132        let kind = shared as usize & KIND_MASK;
1133
1134        if kind == KIND_ARC {
1135            release_shared(shared.cast());
1136        } else {
1137            debug_assert_eq!(kind, KIND_VEC);
1138
1139            free_boxed_slice(shared.cast(), ptr, len);
1140        }
1141    });
1142}
1143
1144unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1145    let shared = data.load(Ordering::Acquire);
1146    let kind = shared as usize & KIND_MASK;
1147
1148    if kind == KIND_ARC {
1149        let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1150        ref_cnt == 1
1151    } else {
1152        true
1153    }
1154}
1155
1156unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1157    let cap = offset_from(offset, buf) + len;
1158    dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1159}
1160
1161// ===== impl SharedVtable =====
1162
1163struct Shared {
1164    // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
1165    buf: *mut u8,
1166    cap: usize,
1167    ref_cnt: AtomicUsize,
1168}
1169
1170impl Drop for Shared {
1171    fn drop(&mut self) {
1172        unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1173    }
1174}
1175
1176// Assert that the alignment of `Shared` is divisible by 2.
1177// This is a necessary invariant since we depend on allocating `Shared` a
1178// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
1179// This flag is set when the LSB is 0.
1180const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
1181
1182static SHARED_VTABLE: Vtable = Vtable {
1183    clone: shared_clone,
1184    to_vec: shared_to_vec,
1185    to_mut: shared_to_mut,
1186    is_unique: shared_is_unique,
1187    drop: shared_drop,
1188};
1189
1190const KIND_ARC: usize = 0b0;
1191const KIND_VEC: usize = 0b1;
1192const KIND_MASK: usize = 0b1;
1193
1194unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1195    let shared = data.load(Ordering::Relaxed);
1196    shallow_clone_arc(shared as _, ptr, len)
1197}
1198
1199unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1200    // Check that the ref_cnt is 1 (unique).
1201    //
1202    // If it is unique, then it is set to 0 with AcqRel fence for the same
1203    // reason in release_shared.
1204    //
1205    // Otherwise, we take the other branch and call release_shared.
1206    if (*shared)
1207        .ref_cnt
1208        .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1209        .is_ok()
1210    {
1211        // Deallocate the `Shared` instance without running its destructor.
1212        let shared = *Box::from_raw(shared);
1213        let shared = ManuallyDrop::new(shared);
1214        let buf = shared.buf;
1215        let cap = shared.cap;
1216
1217        // Copy back buffer
1218        ptr::copy(ptr, buf, len);
1219
1220        Vec::from_raw_parts(buf, len, cap)
1221    } else {
1222        let v = slice::from_raw_parts(ptr, len).to_vec();
1223        release_shared(shared);
1224        v
1225    }
1226}
1227
1228unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1229    shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1230}
1231
1232unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1233    // The goal is to check if the current handle is the only handle
1234    // that currently has access to the buffer. This is done by
1235    // checking if the `ref_cnt` is currently 1.
1236    //
1237    // The `Acquire` ordering synchronizes with the `Release` as
1238    // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1239    // operation guarantees that any mutations done in other threads
1240    // are ordered before the `ref_cnt` is decremented. As such,
1241    // this `Acquire` will guarantee that those mutations are
1242    // visible to the current thread.
1243    //
1244    // Otherwise, we take the other branch, copy the data and call `release_shared`.
1245    if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1246        // Deallocate the `Shared` instance without running its destructor.
1247        let shared = *Box::from_raw(shared);
1248        let shared = ManuallyDrop::new(shared);
1249        let buf = shared.buf;
1250        let cap = shared.cap;
1251
1252        // Rebuild Vec
1253        let off = offset_from(ptr, buf);
1254        let v = Vec::from_raw_parts(buf, len + off, cap);
1255
1256        let mut b = BytesMut::from_vec(v);
1257        b.advance_unchecked(off);
1258        b
1259    } else {
1260        // Copy the data from Shared in a new Vec, then release it
1261        let v = slice::from_raw_parts(ptr, len).to_vec();
1262        release_shared(shared);
1263        BytesMut::from_vec(v)
1264    }
1265}
1266
1267unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1268    shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1269}
1270
1271pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1272    let shared = data.load(Ordering::Acquire);
1273    let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1274    ref_cnt == 1
1275}
1276
1277unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1278    data.with_mut(|shared| {
1279        release_shared(shared.cast());
1280    });
1281}
1282
1283unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1284    let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1285
1286    if old_size > usize::MAX >> 1 {
1287        crate::abort();
1288    }
1289
1290    Bytes {
1291        ptr,
1292        len,
1293        data: AtomicPtr::new(shared as _),
1294        vtable: &SHARED_VTABLE,
1295    }
1296}
1297
1298#[cold]
1299unsafe fn shallow_clone_vec(
1300    atom: &AtomicPtr<()>,
1301    ptr: *const (),
1302    buf: *mut u8,
1303    offset: *const u8,
1304    len: usize,
1305) -> Bytes {
1306    // If the buffer is still tracked in a `Vec<u8>`. It is time to
1307    // promote the vec to an `Arc`. This could potentially be called
1308    // concurrently, so some care must be taken.
1309
1310    // First, allocate a new `Shared` instance containing the
1311    // `Vec` fields. It's important to note that `ptr`, `len`,
1312    // and `cap` cannot be mutated without having `&mut self`.
1313    // This means that these fields will not be concurrently
1314    // updated and since the buffer hasn't been promoted to an
1315    // `Arc`, those three fields still are the components of the
1316    // vector.
1317    let shared = Box::new(Shared {
1318        buf,
1319        cap: offset_from(offset, buf) + len,
1320        // Initialize refcount to 2. One for this reference, and one
1321        // for the new clone that will be returned from
1322        // `shallow_clone`.
1323        ref_cnt: AtomicUsize::new(2),
1324    });
1325
1326    let shared = Box::into_raw(shared);
1327
1328    // The pointer should be aligned, so this assert should
1329    // always succeed.
1330    debug_assert!(
1331        0 == (shared as usize & KIND_MASK),
1332        "internal: Box<Shared> should have an aligned pointer",
1333    );
1334
1335    // Try compare & swapping the pointer into the `arc` field.
1336    // `Release` is used synchronize with other threads that
1337    // will load the `arc` field.
1338    //
1339    // If the `compare_exchange` fails, then the thread lost the
1340    // race to promote the buffer to shared. The `Acquire`
1341    // ordering will synchronize with the `compare_exchange`
1342    // that happened in the other thread and the `Shared`
1343    // pointed to by `actual` will be visible.
1344    match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1345        Ok(actual) => {
1346            debug_assert!(actual as usize == ptr as usize);
1347            // The upgrade was successful, the new handle can be
1348            // returned.
1349            Bytes {
1350                ptr: offset,
1351                len,
1352                data: AtomicPtr::new(shared as _),
1353                vtable: &SHARED_VTABLE,
1354            }
1355        }
1356        Err(actual) => {
1357            // The upgrade failed, a concurrent clone happened. Release
1358            // the allocation that was made in this thread, it will not
1359            // be needed.
1360            let shared = Box::from_raw(shared);
1361            mem::forget(*shared);
1362
1363            // Buffer already promoted to shared storage, so increment ref
1364            // count.
1365            shallow_clone_arc(actual as _, offset, len)
1366        }
1367    }
1368}
1369
1370unsafe fn release_shared(ptr: *mut Shared) {
1371    // `Shared` storage... follow the drop steps from Arc.
1372    if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1373        return;
1374    }
1375
1376    // This fence is needed to prevent reordering of use of the data and
1377    // deletion of the data.  Because it is marked `Release`, the decreasing
1378    // of the reference count synchronizes with this `Acquire` fence. This
1379    // means that use of the data happens before decreasing the reference
1380    // count, which happens before this fence, which happens before the
1381    // deletion of the data.
1382    //
1383    // As explained in the [Boost documentation][1],
1384    //
1385    // > It is important to enforce any possible access to the object in one
1386    // > thread (through an existing reference) to *happen before* deleting
1387    // > the object in a different thread. This is achieved by a "release"
1388    // > operation after dropping a reference (any access to the object
1389    // > through this reference must obviously happened before), and an
1390    // > "acquire" operation before deleting the object.
1391    //
1392    // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1393    //
1394    // Thread sanitizer does not support atomic fences. Use an atomic load
1395    // instead.
1396    (*ptr).ref_cnt.load(Ordering::Acquire);
1397
1398    // Drop the data
1399    drop(Box::from_raw(ptr));
1400}
1401
1402// Ideally we would always use this version of `ptr_map` since it is strict
1403// provenance compatible, but it results in worse codegen. We will however still
1404// use it on miri because it gives better diagnostics for people who test bytes
1405// code with miri.
1406//
1407// See https://github.com/tokio-rs/bytes/pull/545 for more info.
1408#[cfg(miri)]
1409fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1410where
1411    F: FnOnce(usize) -> usize,
1412{
1413    let old_addr = ptr as usize;
1414    let new_addr = f(old_addr);
1415    let diff = new_addr.wrapping_sub(old_addr);
1416    ptr.wrapping_add(diff)
1417}
1418
1419#[cfg(not(miri))]
1420fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1421where
1422    F: FnOnce(usize) -> usize,
1423{
1424    let old_addr = ptr as usize;
1425    let new_addr = f(old_addr);
1426    new_addr as *mut u8
1427}
1428
1429// compile-fails
1430
1431/// ```compile_fail
1432/// use bytes::Bytes;
1433/// #[deny(unused_must_use)]
1434/// {
1435///     let mut b1 = Bytes::from("hello world");
1436///     b1.split_to(6);
1437/// }
1438/// ```
1439fn _split_to_must_use() {}
1440
1441/// ```compile_fail
1442/// use bytes::Bytes;
1443/// #[deny(unused_must_use)]
1444/// {
1445///     let mut b1 = Bytes::from("hello world");
1446///     b1.split_off(6);
1447/// }
1448/// ```
1449fn _split_off_must_use() {}
1450
1451// fuzz tests
1452#[cfg(all(test, loom))]
1453mod fuzz {
1454    use loom::sync::Arc;
1455    use loom::thread;
1456
1457    use super::Bytes;
1458    #[test]
1459    fn bytes_cloning_vec() {
1460        loom::model(|| {
1461            let a = Bytes::from(b"abcdefgh".to_vec());
1462            let addr = a.as_ptr() as usize;
1463
1464            // test the Bytes::clone is Sync by putting it in an Arc
1465            let a1 = Arc::new(a);
1466            let a2 = a1.clone();
1467
1468            let t1 = thread::spawn(move || {
1469                let b: Bytes = (*a1).clone();
1470                assert_eq!(b.as_ptr() as usize, addr);
1471            });
1472
1473            let t2 = thread::spawn(move || {
1474                let b: Bytes = (*a2).clone();
1475                assert_eq!(b.as_ptr() as usize, addr);
1476            });
1477
1478            t1.join().unwrap();
1479            t2.join().unwrap();
1480        });
1481    }
1482}