bytes/bytes_mut.rs
1use core::iter::FromIterator;
2use core::mem::{self, ManuallyDrop, MaybeUninit};
3use core::ops::{Deref, DerefMut};
4use core::ptr::{self, NonNull};
5use core::{cmp, fmt, hash, isize, slice, usize};
6
7use alloc::{
8 borrow::{Borrow, BorrowMut},
9 boxed::Box,
10 string::String,
11 vec,
12 vec::Vec,
13};
14
15use crate::buf::{IntoIter, UninitSlice};
16use crate::bytes::Vtable;
17#[allow(unused)]
18use crate::loom::sync::atomic::AtomicMut;
19use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
20use crate::{offset_from, Buf, BufMut, Bytes};
21
22/// A unique reference to a contiguous slice of memory.
23///
24/// `BytesMut` represents a unique view into a potentially shared memory region.
25/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
26/// mutate the memory.
27///
28/// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
29/// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
30/// same `buf` overlaps with its slice. That guarantee means that a write lock
31/// is not required.
32///
33/// # Growth
34///
35/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
36/// necessary. However, explicitly reserving the required space up-front before
37/// a series of inserts will be more efficient.
38///
39/// # Examples
40///
41/// ```
42/// use bytes::{BytesMut, BufMut};
43///
44/// let mut buf = BytesMut::with_capacity(64);
45///
46/// buf.put_u8(b'h');
47/// buf.put_u8(b'e');
48/// buf.put(&b"llo"[..]);
49///
50/// assert_eq!(&buf[..], b"hello");
51///
52/// // Freeze the buffer so that it can be shared
53/// let a = buf.freeze();
54///
55/// // This does not allocate, instead `b` points to the same memory.
56/// let b = a.clone();
57///
58/// assert_eq!(&a[..], b"hello");
59/// assert_eq!(&b[..], b"hello");
60/// ```
61pub struct BytesMut {
62 ptr: NonNull<u8>,
63 len: usize,
64 cap: usize,
65 data: *mut Shared,
66}
67
68// Thread-safe reference-counted container for the shared storage. This mostly
69// the same as `core::sync::Arc` but without the weak counter. The ref counting
70// fns are based on the ones found in `std`.
71//
72// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
73// up making the overall code simpler and easier to reason about. This is due to
74// some of the logic around setting `Inner::arc` and other ways the `arc` field
75// is used. Using `Arc` ended up requiring a number of funky transmutes and
76// other shenanigans to make it work.
77struct Shared {
78 vec: Vec<u8>,
79 original_capacity_repr: usize,
80 ref_count: AtomicUsize,
81}
82
83// Assert that the alignment of `Shared` is divisible by 2.
84// This is a necessary invariant since we depend on allocating `Shared` a
85// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
86// This flag is set when the LSB is 0.
87const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
88
89// Buffer storage strategy flags.
90const KIND_ARC: usize = 0b0;
91const KIND_VEC: usize = 0b1;
92const KIND_MASK: usize = 0b1;
93
94// The max original capacity value. Any `Bytes` allocated with a greater initial
95// capacity will default to this.
96const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
97// The original capacity algorithm will not take effect unless the originally
98// allocated capacity was at least 1kb in size.
99const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
100// The original capacity is stored in powers of 2 starting at 1kb to a max of
101// 64kb. Representing it as such requires only 3 bits of storage.
102const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
103const ORIGINAL_CAPACITY_OFFSET: usize = 2;
104
105const VEC_POS_OFFSET: usize = 5;
106// When the storage is in the `Vec` representation, the pointer can be advanced
107// at most this value. This is due to the amount of storage available to track
108// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
109// bits.
110const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
111const NOT_VEC_POS_MASK: usize = 0b11111;
112
113#[cfg(target_pointer_width = "64")]
114const PTR_WIDTH: usize = 64;
115#[cfg(target_pointer_width = "32")]
116const PTR_WIDTH: usize = 32;
117
118/*
119 *
120 * ===== BytesMut =====
121 *
122 */
123
124impl BytesMut {
125 /// Creates a new `BytesMut` with the specified capacity.
126 ///
127 /// The returned `BytesMut` will be able to hold at least `capacity` bytes
128 /// without reallocating.
129 ///
130 /// It is important to note that this function does not specify the length
131 /// of the returned `BytesMut`, but only the capacity.
132 ///
133 /// # Examples
134 ///
135 /// ```
136 /// use bytes::{BytesMut, BufMut};
137 ///
138 /// let mut bytes = BytesMut::with_capacity(64);
139 ///
140 /// // `bytes` contains no data, even though there is capacity
141 /// assert_eq!(bytes.len(), 0);
142 ///
143 /// bytes.put(&b"hello world"[..]);
144 ///
145 /// assert_eq!(&bytes[..], b"hello world");
146 /// ```
147 #[inline]
148 pub fn with_capacity(capacity: usize) -> BytesMut {
149 BytesMut::from_vec(Vec::with_capacity(capacity))
150 }
151
152 /// Creates a new `BytesMut` with default capacity.
153 ///
154 /// Resulting object has length 0 and unspecified capacity.
155 /// This function does not allocate.
156 ///
157 /// # Examples
158 ///
159 /// ```
160 /// use bytes::{BytesMut, BufMut};
161 ///
162 /// let mut bytes = BytesMut::new();
163 ///
164 /// assert_eq!(0, bytes.len());
165 ///
166 /// bytes.reserve(2);
167 /// bytes.put_slice(b"xy");
168 ///
169 /// assert_eq!(&b"xy"[..], &bytes[..]);
170 /// ```
171 #[inline]
172 pub fn new() -> BytesMut {
173 BytesMut::with_capacity(0)
174 }
175
176 /// Returns the number of bytes contained in this `BytesMut`.
177 ///
178 /// # Examples
179 ///
180 /// ```
181 /// use bytes::BytesMut;
182 ///
183 /// let b = BytesMut::from(&b"hello"[..]);
184 /// assert_eq!(b.len(), 5);
185 /// ```
186 #[inline]
187 pub fn len(&self) -> usize {
188 self.len
189 }
190
191 /// Returns true if the `BytesMut` has a length of 0.
192 ///
193 /// # Examples
194 ///
195 /// ```
196 /// use bytes::BytesMut;
197 ///
198 /// let b = BytesMut::with_capacity(64);
199 /// assert!(b.is_empty());
200 /// ```
201 #[inline]
202 pub fn is_empty(&self) -> bool {
203 self.len == 0
204 }
205
206 /// Returns the number of bytes the `BytesMut` can hold without reallocating.
207 ///
208 /// # Examples
209 ///
210 /// ```
211 /// use bytes::BytesMut;
212 ///
213 /// let b = BytesMut::with_capacity(64);
214 /// assert_eq!(b.capacity(), 64);
215 /// ```
216 #[inline]
217 pub fn capacity(&self) -> usize {
218 self.cap
219 }
220
221 /// Converts `self` into an immutable `Bytes`.
222 ///
223 /// The conversion is zero cost and is used to indicate that the slice
224 /// referenced by the handle will no longer be mutated. Once the conversion
225 /// is done, the handle can be cloned and shared across threads.
226 ///
227 /// # Examples
228 ///
229 /// ```
230 /// use bytes::{BytesMut, BufMut};
231 /// use std::thread;
232 ///
233 /// let mut b = BytesMut::with_capacity(64);
234 /// b.put(&b"hello world"[..]);
235 /// let b1 = b.freeze();
236 /// let b2 = b1.clone();
237 ///
238 /// let th = thread::spawn(move || {
239 /// assert_eq!(&b1[..], b"hello world");
240 /// });
241 ///
242 /// assert_eq!(&b2[..], b"hello world");
243 /// th.join().unwrap();
244 /// ```
245 #[inline]
246 pub fn freeze(self) -> Bytes {
247 let bytes = ManuallyDrop::new(self);
248 if bytes.kind() == KIND_VEC {
249 // Just re-use `Bytes` internal Vec vtable
250 unsafe {
251 let off = bytes.get_vec_pos();
252 let vec = rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off);
253 let mut b: Bytes = vec.into();
254 b.advance(off);
255 b
256 }
257 } else {
258 debug_assert_eq!(bytes.kind(), KIND_ARC);
259
260 let ptr = bytes.ptr.as_ptr();
261 let len = bytes.len;
262 let data = AtomicPtr::new(bytes.data.cast());
263 unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
264 }
265 }
266
267 /// Creates a new `BytesMut` containing `len` zeros.
268 ///
269 /// The resulting object has a length of `len` and a capacity greater
270 /// than or equal to `len`. The entire length of the object will be filled
271 /// with zeros.
272 ///
273 /// On some platforms or allocators this function may be faster than
274 /// a manual implementation.
275 ///
276 /// # Examples
277 ///
278 /// ```
279 /// use bytes::BytesMut;
280 ///
281 /// let zeros = BytesMut::zeroed(42);
282 ///
283 /// assert!(zeros.capacity() >= 42);
284 /// assert_eq!(zeros.len(), 42);
285 /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
286 /// ```
287 pub fn zeroed(len: usize) -> BytesMut {
288 BytesMut::from_vec(vec![0; len])
289 }
290
291 /// Splits the bytes into two at the given index.
292 ///
293 /// Afterwards `self` contains elements `[0, at)`, and the returned
294 /// `BytesMut` contains elements `[at, capacity)`.
295 ///
296 /// This is an `O(1)` operation that just increases the reference count
297 /// and sets a few indices.
298 ///
299 /// # Examples
300 ///
301 /// ```
302 /// use bytes::BytesMut;
303 ///
304 /// let mut a = BytesMut::from(&b"hello world"[..]);
305 /// let mut b = a.split_off(5);
306 ///
307 /// a[0] = b'j';
308 /// b[0] = b'!';
309 ///
310 /// assert_eq!(&a[..], b"jello");
311 /// assert_eq!(&b[..], b"!world");
312 /// ```
313 ///
314 /// # Panics
315 ///
316 /// Panics if `at > capacity`.
317 #[must_use = "consider BytesMut::truncate if you don't need the other half"]
318 pub fn split_off(&mut self, at: usize) -> BytesMut {
319 assert!(
320 at <= self.capacity(),
321 "split_off out of bounds: {:?} <= {:?}",
322 at,
323 self.capacity(),
324 );
325 unsafe {
326 let mut other = self.shallow_clone();
327 // SAFETY: We've checked that `at` <= `self.capacity()` above.
328 other.advance_unchecked(at);
329 self.cap = at;
330 self.len = cmp::min(self.len, at);
331 other
332 }
333 }
334
335 /// Removes the bytes from the current view, returning them in a new
336 /// `BytesMut` handle.
337 ///
338 /// Afterwards, `self` will be empty, but will retain any additional
339 /// capacity that it had before the operation. This is identical to
340 /// `self.split_to(self.len())`.
341 ///
342 /// This is an `O(1)` operation that just increases the reference count and
343 /// sets a few indices.
344 ///
345 /// # Examples
346 ///
347 /// ```
348 /// use bytes::{BytesMut, BufMut};
349 ///
350 /// let mut buf = BytesMut::with_capacity(1024);
351 /// buf.put(&b"hello world"[..]);
352 ///
353 /// let other = buf.split();
354 ///
355 /// assert!(buf.is_empty());
356 /// assert_eq!(1013, buf.capacity());
357 ///
358 /// assert_eq!(other, b"hello world"[..]);
359 /// ```
360 #[must_use = "consider BytesMut::clear if you don't need the other half"]
361 pub fn split(&mut self) -> BytesMut {
362 let len = self.len();
363 self.split_to(len)
364 }
365
366 /// Splits the buffer into two at the given index.
367 ///
368 /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
369 /// contains elements `[0, at)`.
370 ///
371 /// This is an `O(1)` operation that just increases the reference count and
372 /// sets a few indices.
373 ///
374 /// # Examples
375 ///
376 /// ```
377 /// use bytes::BytesMut;
378 ///
379 /// let mut a = BytesMut::from(&b"hello world"[..]);
380 /// let mut b = a.split_to(5);
381 ///
382 /// a[0] = b'!';
383 /// b[0] = b'j';
384 ///
385 /// assert_eq!(&a[..], b"!world");
386 /// assert_eq!(&b[..], b"jello");
387 /// ```
388 ///
389 /// # Panics
390 ///
391 /// Panics if `at > len`.
392 #[must_use = "consider BytesMut::advance if you don't need the other half"]
393 pub fn split_to(&mut self, at: usize) -> BytesMut {
394 assert!(
395 at <= self.len(),
396 "split_to out of bounds: {:?} <= {:?}",
397 at,
398 self.len(),
399 );
400
401 unsafe {
402 let mut other = self.shallow_clone();
403 // SAFETY: We've checked that `at` <= `self.len()` and we know that `self.len()` <=
404 // `self.capacity()`.
405 self.advance_unchecked(at);
406 other.cap = at;
407 other.len = at;
408 other
409 }
410 }
411
412 /// Shortens the buffer, keeping the first `len` bytes and dropping the
413 /// rest.
414 ///
415 /// If `len` is greater than the buffer's current length, this has no
416 /// effect.
417 ///
418 /// Existing underlying capacity is preserved.
419 ///
420 /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
421 /// excess bytes to be returned instead of dropped.
422 ///
423 /// # Examples
424 ///
425 /// ```
426 /// use bytes::BytesMut;
427 ///
428 /// let mut buf = BytesMut::from(&b"hello world"[..]);
429 /// buf.truncate(5);
430 /// assert_eq!(buf, b"hello"[..]);
431 /// ```
432 pub fn truncate(&mut self, len: usize) {
433 if len <= self.len() {
434 // SAFETY: Shrinking the buffer cannot expose uninitialized bytes.
435 unsafe { self.set_len(len) };
436 }
437 }
438
439 /// Clears the buffer, removing all data. Existing capacity is preserved.
440 ///
441 /// # Examples
442 ///
443 /// ```
444 /// use bytes::BytesMut;
445 ///
446 /// let mut buf = BytesMut::from(&b"hello world"[..]);
447 /// buf.clear();
448 /// assert!(buf.is_empty());
449 /// ```
450 pub fn clear(&mut self) {
451 // SAFETY: Setting the length to zero cannot expose uninitialized bytes.
452 unsafe { self.set_len(0) };
453 }
454
455 /// Resizes the buffer so that `len` is equal to `new_len`.
456 ///
457 /// If `new_len` is greater than `len`, the buffer is extended by the
458 /// difference with each additional byte set to `value`. If `new_len` is
459 /// less than `len`, the buffer is simply truncated.
460 ///
461 /// # Examples
462 ///
463 /// ```
464 /// use bytes::BytesMut;
465 ///
466 /// let mut buf = BytesMut::new();
467 ///
468 /// buf.resize(3, 0x1);
469 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
470 ///
471 /// buf.resize(2, 0x2);
472 /// assert_eq!(&buf[..], &[0x1, 0x1]);
473 ///
474 /// buf.resize(4, 0x3);
475 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
476 /// ```
477 pub fn resize(&mut self, new_len: usize, value: u8) {
478 let additional = if let Some(additional) = new_len.checked_sub(self.len()) {
479 additional
480 } else {
481 self.truncate(new_len);
482 return;
483 };
484
485 if additional == 0 {
486 return;
487 }
488
489 self.reserve(additional);
490 let dst = self.spare_capacity_mut().as_mut_ptr();
491 // SAFETY: `spare_capacity_mut` returns a valid, properly aligned pointer and we've
492 // reserved enough space to write `additional` bytes.
493 unsafe { ptr::write_bytes(dst, value, additional) };
494
495 // SAFETY: There are at least `new_len` initialized bytes in the buffer so no
496 // uninitialized bytes are being exposed.
497 unsafe { self.set_len(new_len) };
498 }
499
500 /// Sets the length of the buffer.
501 ///
502 /// This will explicitly set the size of the buffer without actually
503 /// modifying the data, so it is up to the caller to ensure that the data
504 /// has been initialized.
505 ///
506 /// # Examples
507 ///
508 /// ```
509 /// use bytes::BytesMut;
510 ///
511 /// let mut b = BytesMut::from(&b"hello world"[..]);
512 ///
513 /// unsafe {
514 /// b.set_len(5);
515 /// }
516 ///
517 /// assert_eq!(&b[..], b"hello");
518 ///
519 /// unsafe {
520 /// b.set_len(11);
521 /// }
522 ///
523 /// assert_eq!(&b[..], b"hello world");
524 /// ```
525 #[inline]
526 pub unsafe fn set_len(&mut self, len: usize) {
527 debug_assert!(len <= self.cap, "set_len out of bounds");
528 self.len = len;
529 }
530
531 /// Reserves capacity for at least `additional` more bytes to be inserted
532 /// into the given `BytesMut`.
533 ///
534 /// More than `additional` bytes may be reserved in order to avoid frequent
535 /// reallocations. A call to `reserve` may result in an allocation.
536 ///
537 /// Before allocating new buffer space, the function will attempt to reclaim
538 /// space in the existing buffer. If the current handle references a view
539 /// into a larger original buffer, and all other handles referencing part
540 /// of the same original buffer have been dropped, then the current view
541 /// can be copied/shifted to the front of the buffer and the handle can take
542 /// ownership of the full buffer, provided that the full buffer is large
543 /// enough to fit the requested additional capacity.
544 ///
545 /// This optimization will only happen if shifting the data from the current
546 /// view to the front of the buffer is not too expensive in terms of the
547 /// (amortized) time required. The precise condition is subject to change;
548 /// as of now, the length of the data being shifted needs to be at least as
549 /// large as the distance that it's shifted by. If the current view is empty
550 /// and the original buffer is large enough to fit the requested additional
551 /// capacity, then reallocations will never happen.
552 ///
553 /// # Examples
554 ///
555 /// In the following example, a new buffer is allocated.
556 ///
557 /// ```
558 /// use bytes::BytesMut;
559 ///
560 /// let mut buf = BytesMut::from(&b"hello"[..]);
561 /// buf.reserve(64);
562 /// assert!(buf.capacity() >= 69);
563 /// ```
564 ///
565 /// In the following example, the existing buffer is reclaimed.
566 ///
567 /// ```
568 /// use bytes::{BytesMut, BufMut};
569 ///
570 /// let mut buf = BytesMut::with_capacity(128);
571 /// buf.put(&[0; 64][..]);
572 ///
573 /// let ptr = buf.as_ptr();
574 /// let other = buf.split();
575 ///
576 /// assert!(buf.is_empty());
577 /// assert_eq!(buf.capacity(), 64);
578 ///
579 /// drop(other);
580 /// buf.reserve(128);
581 ///
582 /// assert_eq!(buf.capacity(), 128);
583 /// assert_eq!(buf.as_ptr(), ptr);
584 /// ```
585 ///
586 /// # Panics
587 ///
588 /// Panics if the new capacity overflows `usize`.
589 #[inline]
590 pub fn reserve(&mut self, additional: usize) {
591 let len = self.len();
592 let rem = self.capacity() - len;
593
594 if additional <= rem {
595 // The handle can already store at least `additional` more bytes, so
596 // there is no further work needed to be done.
597 return;
598 }
599
600 // will always succeed
601 let _ = self.reserve_inner(additional, true);
602 }
603
604 // In separate function to allow the short-circuits in `reserve` and `try_reclaim` to
605 // be inline-able. Significantly helps performance. Returns false if it did not succeed.
606 fn reserve_inner(&mut self, additional: usize, allocate: bool) -> bool {
607 let len = self.len();
608 let kind = self.kind();
609
610 if kind == KIND_VEC {
611 // If there's enough free space before the start of the buffer, then
612 // just copy the data backwards and reuse the already-allocated
613 // space.
614 //
615 // Otherwise, since backed by a vector, use `Vec::reserve`
616 //
617 // We need to make sure that this optimization does not kill the
618 // amortized runtimes of BytesMut's operations.
619 unsafe {
620 let off = self.get_vec_pos();
621
622 // Only reuse space if we can satisfy the requested additional space.
623 //
624 // Also check if the value of `off` suggests that enough bytes
625 // have been read to account for the overhead of shifting all
626 // the data (in an amortized analysis).
627 // Hence the condition `off >= self.len()`.
628 //
629 // This condition also already implies that the buffer is going
630 // to be (at least) half-empty in the end; so we do not break
631 // the (amortized) runtime with future resizes of the underlying
632 // `Vec`.
633 //
634 // [For more details check issue #524, and PR #525.]
635 if self.capacity() - self.len() + off >= additional && off >= self.len() {
636 // There's enough space, and it's not too much overhead:
637 // reuse the space!
638 //
639 // Just move the pointer back to the start after copying
640 // data back.
641 let base_ptr = self.ptr.as_ptr().sub(off);
642 // Since `off >= self.len()`, the two regions don't overlap.
643 ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
644 self.ptr = vptr(base_ptr);
645 self.set_vec_pos(0);
646
647 // Length stays constant, but since we moved backwards we
648 // can gain capacity back.
649 self.cap += off;
650 } else {
651 if !allocate {
652 return false;
653 }
654 // Not enough space, or reusing might be too much overhead:
655 // allocate more space!
656 let mut v =
657 ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
658 v.reserve(additional);
659
660 // Update the info
661 self.ptr = vptr(v.as_mut_ptr().add(off));
662 self.cap = v.capacity() - off;
663 debug_assert_eq!(self.len, v.len() - off);
664 }
665
666 return true;
667 }
668 }
669
670 debug_assert_eq!(kind, KIND_ARC);
671 let shared: *mut Shared = self.data;
672
673 // Reserving involves abandoning the currently shared buffer and
674 // allocating a new vector with the requested capacity.
675 //
676 // Compute the new capacity
677 let mut new_cap = match len.checked_add(additional) {
678 Some(new_cap) => new_cap,
679 None if !allocate => return false,
680 None => panic!("overflow"),
681 };
682
683 unsafe {
684 // First, try to reclaim the buffer. This is possible if the current
685 // handle is the only outstanding handle pointing to the buffer.
686 if (*shared).is_unique() {
687 // This is the only handle to the buffer. It can be reclaimed.
688 // However, before doing the work of copying data, check to make
689 // sure that the vector has enough capacity.
690 let v = &mut (*shared).vec;
691
692 let v_capacity = v.capacity();
693 let ptr = v.as_mut_ptr();
694
695 let offset = offset_from(self.ptr.as_ptr(), ptr);
696
697 // Compare the condition in the `kind == KIND_VEC` case above
698 // for more details.
699 if v_capacity >= new_cap + offset {
700 self.cap = new_cap;
701 // no copy is necessary
702 } else if v_capacity >= new_cap && offset >= len {
703 // The capacity is sufficient, and copying is not too much
704 // overhead: reclaim the buffer!
705
706 // `offset >= len` means: no overlap
707 ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
708
709 self.ptr = vptr(ptr);
710 self.cap = v.capacity();
711 } else {
712 if !allocate {
713 return false;
714 }
715 // calculate offset
716 let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
717
718 // new_cap is calculated in terms of `BytesMut`, not the underlying
719 // `Vec`, so it does not take the offset into account.
720 //
721 // Thus we have to manually add it here.
722 new_cap = new_cap.checked_add(off).expect("overflow");
723
724 // The vector capacity is not sufficient. The reserve request is
725 // asking for more than the initial buffer capacity. Allocate more
726 // than requested if `new_cap` is not much bigger than the current
727 // capacity.
728 //
729 // There are some situations, using `reserve_exact` that the
730 // buffer capacity could be below `original_capacity`, so do a
731 // check.
732 let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
733
734 new_cap = cmp::max(double, new_cap);
735
736 // No space - allocate more
737 //
738 // The length field of `Shared::vec` is not used by the `BytesMut`;
739 // instead we use the `len` field in the `BytesMut` itself. However,
740 // when calling `reserve`, it doesn't guarantee that data stored in
741 // the unused capacity of the vector is copied over to the new
742 // allocation, so we need to ensure that we don't have any data we
743 // care about in the unused capacity before calling `reserve`.
744 debug_assert!(off + len <= v.capacity());
745 v.set_len(off + len);
746 v.reserve(new_cap - v.len());
747
748 // Update the info
749 self.ptr = vptr(v.as_mut_ptr().add(off));
750 self.cap = v.capacity() - off;
751 }
752
753 return true;
754 }
755 }
756 if !allocate {
757 return false;
758 }
759
760 let original_capacity_repr = unsafe { (*shared).original_capacity_repr };
761 let original_capacity = original_capacity_from_repr(original_capacity_repr);
762
763 new_cap = cmp::max(new_cap, original_capacity);
764
765 // Create a new vector to store the data
766 let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
767
768 // Copy the bytes
769 v.extend_from_slice(self.as_ref());
770
771 // Release the shared handle. This must be done *after* the bytes are
772 // copied.
773 unsafe { release_shared(shared) };
774
775 // Update self
776 let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
777 self.data = invalid_ptr(data);
778 self.ptr = vptr(v.as_mut_ptr());
779 self.cap = v.capacity();
780 debug_assert_eq!(self.len, v.len());
781 return true;
782 }
783
784 /// Attempts to cheaply reclaim already allocated capacity for at least `additional` more
785 /// bytes to be inserted into the given `BytesMut` and returns `true` if it succeeded.
786 ///
787 /// `try_reclaim` behaves exactly like `reserve`, except that it never allocates new storage
788 /// and returns a `bool` indicating whether it was successful in doing so:
789 ///
790 /// `try_reclaim` returns false under these conditions:
791 /// - The spare capacity left is less than `additional` bytes AND
792 /// - The existing allocation cannot be reclaimed cheaply or it was less than
793 /// `additional` bytes in size
794 ///
795 /// Reclaiming the allocation cheaply is possible if the `BytesMut` has no outstanding
796 /// references through other `BytesMut`s or `Bytes` which point to the same underlying
797 /// storage.
798 ///
799 /// # Examples
800 ///
801 /// ```
802 /// use bytes::BytesMut;
803 ///
804 /// let mut buf = BytesMut::with_capacity(64);
805 /// assert_eq!(true, buf.try_reclaim(64));
806 /// assert_eq!(64, buf.capacity());
807 ///
808 /// buf.extend_from_slice(b"abcd");
809 /// let mut split = buf.split();
810 /// assert_eq!(60, buf.capacity());
811 /// assert_eq!(4, split.capacity());
812 /// assert_eq!(false, split.try_reclaim(64));
813 /// assert_eq!(false, buf.try_reclaim(64));
814 /// // The split buffer is filled with "abcd"
815 /// assert_eq!(false, split.try_reclaim(4));
816 /// // buf is empty and has capacity for 60 bytes
817 /// assert_eq!(true, buf.try_reclaim(60));
818 ///
819 /// drop(buf);
820 /// assert_eq!(false, split.try_reclaim(64));
821 ///
822 /// split.clear();
823 /// assert_eq!(4, split.capacity());
824 /// assert_eq!(true, split.try_reclaim(64));
825 /// assert_eq!(64, split.capacity());
826 /// ```
827 // I tried splitting out try_reclaim_inner after the short circuits, but it was inlined
828 // regardless with Rust 1.78.0 so probably not worth it
829 #[inline]
830 #[must_use = "consider BytesMut::reserve if you need an infallible reservation"]
831 pub fn try_reclaim(&mut self, additional: usize) -> bool {
832 let len = self.len();
833 let rem = self.capacity() - len;
834
835 if additional <= rem {
836 // The handle can already store at least `additional` more bytes, so
837 // there is no further work needed to be done.
838 return true;
839 }
840
841 self.reserve_inner(additional, false)
842 }
843
844 /// Appends given bytes to this `BytesMut`.
845 ///
846 /// If this `BytesMut` object does not have enough capacity, it is resized
847 /// first.
848 ///
849 /// # Examples
850 ///
851 /// ```
852 /// use bytes::BytesMut;
853 ///
854 /// let mut buf = BytesMut::with_capacity(0);
855 /// buf.extend_from_slice(b"aaabbb");
856 /// buf.extend_from_slice(b"cccddd");
857 ///
858 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
859 /// ```
860 #[inline]
861 pub fn extend_from_slice(&mut self, extend: &[u8]) {
862 let cnt = extend.len();
863 self.reserve(cnt);
864
865 unsafe {
866 let dst = self.spare_capacity_mut();
867 // Reserved above
868 debug_assert!(dst.len() >= cnt);
869
870 ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
871 }
872
873 unsafe {
874 self.advance_mut(cnt);
875 }
876 }
877
878 /// Absorbs a `BytesMut` that was previously split off.
879 ///
880 /// If the two `BytesMut` objects were previously contiguous and not mutated
881 /// in a way that causes re-allocation i.e., if `other` was created by
882 /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
883 /// that just decreases a reference count and sets a few indices.
884 /// Otherwise this method degenerates to
885 /// `self.extend_from_slice(other.as_ref())`.
886 ///
887 /// # Examples
888 ///
889 /// ```
890 /// use bytes::BytesMut;
891 ///
892 /// let mut buf = BytesMut::with_capacity(64);
893 /// buf.extend_from_slice(b"aaabbbcccddd");
894 ///
895 /// let split = buf.split_off(6);
896 /// assert_eq!(b"aaabbb", &buf[..]);
897 /// assert_eq!(b"cccddd", &split[..]);
898 ///
899 /// buf.unsplit(split);
900 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
901 /// ```
902 pub fn unsplit(&mut self, other: BytesMut) {
903 if self.is_empty() {
904 *self = other;
905 return;
906 }
907
908 if let Err(other) = self.try_unsplit(other) {
909 self.extend_from_slice(other.as_ref());
910 }
911 }
912
913 // private
914
915 // For now, use a `Vec` to manage the memory for us, but we may want to
916 // change that in the future to some alternate allocator strategy.
917 //
918 // Thus, we don't expose an easy way to construct from a `Vec` since an
919 // internal change could make a simple pattern (`BytesMut::from(vec)`)
920 // suddenly a lot more expensive.
921 #[inline]
922 pub(crate) fn from_vec(vec: Vec<u8>) -> BytesMut {
923 let mut vec = ManuallyDrop::new(vec);
924 let ptr = vptr(vec.as_mut_ptr());
925 let len = vec.len();
926 let cap = vec.capacity();
927
928 let original_capacity_repr = original_capacity_to_repr(cap);
929 let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
930
931 BytesMut {
932 ptr,
933 len,
934 cap,
935 data: invalid_ptr(data),
936 }
937 }
938
939 #[inline]
940 fn as_slice(&self) -> &[u8] {
941 unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
942 }
943
944 #[inline]
945 fn as_slice_mut(&mut self) -> &mut [u8] {
946 unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
947 }
948
949 /// Advance the buffer without bounds checking.
950 ///
951 /// # SAFETY
952 ///
953 /// The caller must ensure that `count` <= `self.cap`.
954 pub(crate) unsafe fn advance_unchecked(&mut self, count: usize) {
955 // Setting the start to 0 is a no-op, so return early if this is the
956 // case.
957 if count == 0 {
958 return;
959 }
960
961 debug_assert!(count <= self.cap, "internal: set_start out of bounds");
962
963 let kind = self.kind();
964
965 if kind == KIND_VEC {
966 // Setting the start when in vec representation is a little more
967 // complicated. First, we have to track how far ahead the
968 // "start" of the byte buffer from the beginning of the vec. We
969 // also have to ensure that we don't exceed the maximum shift.
970 let pos = self.get_vec_pos() + count;
971
972 if pos <= MAX_VEC_POS {
973 self.set_vec_pos(pos);
974 } else {
975 // The repr must be upgraded to ARC. This will never happen
976 // on 64 bit systems and will only happen on 32 bit systems
977 // when shifting past 134,217,727 bytes. As such, we don't
978 // worry too much about performance here.
979 self.promote_to_shared(/*ref_count = */ 1);
980 }
981 }
982
983 // Updating the start of the view is setting `ptr` to point to the
984 // new start and updating the `len` field to reflect the new length
985 // of the view.
986 self.ptr = vptr(self.ptr.as_ptr().add(count));
987 self.len = self.len.checked_sub(count).unwrap_or(0);
988 self.cap -= count;
989 }
990
991 fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
992 if other.capacity() == 0 {
993 return Ok(());
994 }
995
996 let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
997 if ptr == other.ptr.as_ptr()
998 && self.kind() == KIND_ARC
999 && other.kind() == KIND_ARC
1000 && self.data == other.data
1001 {
1002 // Contiguous blocks, just combine directly
1003 self.len += other.len;
1004 self.cap += other.cap;
1005 Ok(())
1006 } else {
1007 Err(other)
1008 }
1009 }
1010
1011 #[inline]
1012 fn kind(&self) -> usize {
1013 self.data as usize & KIND_MASK
1014 }
1015
1016 unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
1017 debug_assert_eq!(self.kind(), KIND_VEC);
1018 debug_assert!(ref_cnt == 1 || ref_cnt == 2);
1019
1020 let original_capacity_repr =
1021 (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
1022
1023 // The vec offset cannot be concurrently mutated, so there
1024 // should be no danger reading it.
1025 let off = (self.data as usize) >> VEC_POS_OFFSET;
1026
1027 // First, allocate a new `Shared` instance containing the
1028 // `Vec` fields. It's important to note that `ptr`, `len`,
1029 // and `cap` cannot be mutated without having `&mut self`.
1030 // This means that these fields will not be concurrently
1031 // updated and since the buffer hasn't been promoted to an
1032 // `Arc`, those three fields still are the components of the
1033 // vector.
1034 let shared = Box::new(Shared {
1035 vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
1036 original_capacity_repr,
1037 ref_count: AtomicUsize::new(ref_cnt),
1038 });
1039
1040 let shared = Box::into_raw(shared);
1041
1042 // The pointer should be aligned, so this assert should
1043 // always succeed.
1044 debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
1045
1046 self.data = shared;
1047 }
1048
1049 /// Makes an exact shallow clone of `self`.
1050 ///
1051 /// The kind of `self` doesn't matter, but this is unsafe
1052 /// because the clone will have the same offsets. You must
1053 /// be sure the returned value to the user doesn't allow
1054 /// two views into the same range.
1055 #[inline]
1056 unsafe fn shallow_clone(&mut self) -> BytesMut {
1057 if self.kind() == KIND_ARC {
1058 increment_shared(self.data);
1059 ptr::read(self)
1060 } else {
1061 self.promote_to_shared(/*ref_count = */ 2);
1062 ptr::read(self)
1063 }
1064 }
1065
1066 #[inline]
1067 unsafe fn get_vec_pos(&self) -> usize {
1068 debug_assert_eq!(self.kind(), KIND_VEC);
1069
1070 self.data as usize >> VEC_POS_OFFSET
1071 }
1072
1073 #[inline]
1074 unsafe fn set_vec_pos(&mut self, pos: usize) {
1075 debug_assert_eq!(self.kind(), KIND_VEC);
1076 debug_assert!(pos <= MAX_VEC_POS);
1077
1078 self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (self.data as usize & NOT_VEC_POS_MASK));
1079 }
1080
1081 /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
1082 ///
1083 /// The returned slice can be used to fill the buffer with data (e.g. by
1084 /// reading from a file) before marking the data as initialized using the
1085 /// [`set_len`] method.
1086 ///
1087 /// [`set_len`]: BytesMut::set_len
1088 ///
1089 /// # Examples
1090 ///
1091 /// ```
1092 /// use bytes::BytesMut;
1093 ///
1094 /// // Allocate buffer big enough for 10 bytes.
1095 /// let mut buf = BytesMut::with_capacity(10);
1096 ///
1097 /// // Fill in the first 3 elements.
1098 /// let uninit = buf.spare_capacity_mut();
1099 /// uninit[0].write(0);
1100 /// uninit[1].write(1);
1101 /// uninit[2].write(2);
1102 ///
1103 /// // Mark the first 3 bytes of the buffer as being initialized.
1104 /// unsafe {
1105 /// buf.set_len(3);
1106 /// }
1107 ///
1108 /// assert_eq!(&buf[..], &[0, 1, 2]);
1109 /// ```
1110 #[inline]
1111 pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
1112 unsafe {
1113 let ptr = self.ptr.as_ptr().add(self.len);
1114 let len = self.cap - self.len;
1115
1116 slice::from_raw_parts_mut(ptr.cast(), len)
1117 }
1118 }
1119}
1120
1121impl Drop for BytesMut {
1122 fn drop(&mut self) {
1123 let kind = self.kind();
1124
1125 if kind == KIND_VEC {
1126 unsafe {
1127 let off = self.get_vec_pos();
1128
1129 // Vector storage, free the vector
1130 let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
1131 }
1132 } else if kind == KIND_ARC {
1133 unsafe { release_shared(self.data) };
1134 }
1135 }
1136}
1137
1138impl Buf for BytesMut {
1139 #[inline]
1140 fn remaining(&self) -> usize {
1141 self.len()
1142 }
1143
1144 #[inline]
1145 fn chunk(&self) -> &[u8] {
1146 self.as_slice()
1147 }
1148
1149 #[inline]
1150 fn advance(&mut self, cnt: usize) {
1151 assert!(
1152 cnt <= self.remaining(),
1153 "cannot advance past `remaining`: {:?} <= {:?}",
1154 cnt,
1155 self.remaining(),
1156 );
1157 unsafe {
1158 // SAFETY: We've checked that `cnt` <= `self.remaining()` and we know that
1159 // `self.remaining()` <= `self.cap`.
1160 self.advance_unchecked(cnt);
1161 }
1162 }
1163
1164 fn copy_to_bytes(&mut self, len: usize) -> Bytes {
1165 self.split_to(len).freeze()
1166 }
1167}
1168
1169unsafe impl BufMut for BytesMut {
1170 #[inline]
1171 fn remaining_mut(&self) -> usize {
1172 usize::MAX - self.len()
1173 }
1174
1175 #[inline]
1176 unsafe fn advance_mut(&mut self, cnt: usize) {
1177 let remaining = self.cap - self.len();
1178 if cnt > remaining {
1179 super::panic_advance(cnt, remaining);
1180 }
1181 // Addition won't overflow since it is at most `self.cap`.
1182 self.len = self.len() + cnt;
1183 }
1184
1185 #[inline]
1186 fn chunk_mut(&mut self) -> &mut UninitSlice {
1187 if self.capacity() == self.len() {
1188 self.reserve(64);
1189 }
1190 self.spare_capacity_mut().into()
1191 }
1192
1193 // Specialize these methods so they can skip checking `remaining_mut`
1194 // and `advance_mut`.
1195
1196 fn put<T: Buf>(&mut self, mut src: T)
1197 where
1198 Self: Sized,
1199 {
1200 while src.has_remaining() {
1201 let s = src.chunk();
1202 let l = s.len();
1203 self.extend_from_slice(s);
1204 src.advance(l);
1205 }
1206 }
1207
1208 fn put_slice(&mut self, src: &[u8]) {
1209 self.extend_from_slice(src);
1210 }
1211
1212 fn put_bytes(&mut self, val: u8, cnt: usize) {
1213 self.reserve(cnt);
1214 unsafe {
1215 let dst = self.spare_capacity_mut();
1216 // Reserved above
1217 debug_assert!(dst.len() >= cnt);
1218
1219 ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
1220
1221 self.advance_mut(cnt);
1222 }
1223 }
1224}
1225
1226impl AsRef<[u8]> for BytesMut {
1227 #[inline]
1228 fn as_ref(&self) -> &[u8] {
1229 self.as_slice()
1230 }
1231}
1232
1233impl Deref for BytesMut {
1234 type Target = [u8];
1235
1236 #[inline]
1237 fn deref(&self) -> &[u8] {
1238 self.as_ref()
1239 }
1240}
1241
1242impl AsMut<[u8]> for BytesMut {
1243 #[inline]
1244 fn as_mut(&mut self) -> &mut [u8] {
1245 self.as_slice_mut()
1246 }
1247}
1248
1249impl DerefMut for BytesMut {
1250 #[inline]
1251 fn deref_mut(&mut self) -> &mut [u8] {
1252 self.as_mut()
1253 }
1254}
1255
1256impl<'a> From<&'a [u8]> for BytesMut {
1257 fn from(src: &'a [u8]) -> BytesMut {
1258 BytesMut::from_vec(src.to_vec())
1259 }
1260}
1261
1262impl<'a> From<&'a str> for BytesMut {
1263 fn from(src: &'a str) -> BytesMut {
1264 BytesMut::from(src.as_bytes())
1265 }
1266}
1267
1268impl From<BytesMut> for Bytes {
1269 fn from(src: BytesMut) -> Bytes {
1270 src.freeze()
1271 }
1272}
1273
1274impl PartialEq for BytesMut {
1275 fn eq(&self, other: &BytesMut) -> bool {
1276 self.as_slice() == other.as_slice()
1277 }
1278}
1279
1280impl PartialOrd for BytesMut {
1281 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1282 self.as_slice().partial_cmp(other.as_slice())
1283 }
1284}
1285
1286impl Ord for BytesMut {
1287 fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
1288 self.as_slice().cmp(other.as_slice())
1289 }
1290}
1291
1292impl Eq for BytesMut {}
1293
1294impl Default for BytesMut {
1295 #[inline]
1296 fn default() -> BytesMut {
1297 BytesMut::new()
1298 }
1299}
1300
1301impl hash::Hash for BytesMut {
1302 fn hash<H>(&self, state: &mut H)
1303 where
1304 H: hash::Hasher,
1305 {
1306 let s: &[u8] = self.as_ref();
1307 s.hash(state);
1308 }
1309}
1310
1311impl Borrow<[u8]> for BytesMut {
1312 fn borrow(&self) -> &[u8] {
1313 self.as_ref()
1314 }
1315}
1316
1317impl BorrowMut<[u8]> for BytesMut {
1318 fn borrow_mut(&mut self) -> &mut [u8] {
1319 self.as_mut()
1320 }
1321}
1322
1323impl fmt::Write for BytesMut {
1324 #[inline]
1325 fn write_str(&mut self, s: &str) -> fmt::Result {
1326 if self.remaining_mut() >= s.len() {
1327 self.put_slice(s.as_bytes());
1328 Ok(())
1329 } else {
1330 Err(fmt::Error)
1331 }
1332 }
1333
1334 #[inline]
1335 fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1336 fmt::write(self, args)
1337 }
1338}
1339
1340impl Clone for BytesMut {
1341 fn clone(&self) -> BytesMut {
1342 BytesMut::from(&self[..])
1343 }
1344}
1345
1346impl IntoIterator for BytesMut {
1347 type Item = u8;
1348 type IntoIter = IntoIter<BytesMut>;
1349
1350 fn into_iter(self) -> Self::IntoIter {
1351 IntoIter::new(self)
1352 }
1353}
1354
1355impl<'a> IntoIterator for &'a BytesMut {
1356 type Item = &'a u8;
1357 type IntoIter = core::slice::Iter<'a, u8>;
1358
1359 fn into_iter(self) -> Self::IntoIter {
1360 self.as_ref().iter()
1361 }
1362}
1363
1364impl Extend<u8> for BytesMut {
1365 fn extend<T>(&mut self, iter: T)
1366 where
1367 T: IntoIterator<Item = u8>,
1368 {
1369 let iter = iter.into_iter();
1370
1371 let (lower, _) = iter.size_hint();
1372 self.reserve(lower);
1373
1374 // TODO: optimize
1375 // 1. If self.kind() == KIND_VEC, use Vec::extend
1376 for b in iter {
1377 self.put_u8(b);
1378 }
1379 }
1380}
1381
1382impl<'a> Extend<&'a u8> for BytesMut {
1383 fn extend<T>(&mut self, iter: T)
1384 where
1385 T: IntoIterator<Item = &'a u8>,
1386 {
1387 self.extend(iter.into_iter().copied())
1388 }
1389}
1390
1391impl Extend<Bytes> for BytesMut {
1392 fn extend<T>(&mut self, iter: T)
1393 where
1394 T: IntoIterator<Item = Bytes>,
1395 {
1396 for bytes in iter {
1397 self.extend_from_slice(&bytes)
1398 }
1399 }
1400}
1401
1402impl FromIterator<u8> for BytesMut {
1403 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1404 BytesMut::from_vec(Vec::from_iter(into_iter))
1405 }
1406}
1407
1408impl<'a> FromIterator<&'a u8> for BytesMut {
1409 fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1410 BytesMut::from_iter(into_iter.into_iter().copied())
1411 }
1412}
1413
1414/*
1415 *
1416 * ===== Inner =====
1417 *
1418 */
1419
1420unsafe fn increment_shared(ptr: *mut Shared) {
1421 let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
1422
1423 if old_size > isize::MAX as usize {
1424 crate::abort();
1425 }
1426}
1427
1428unsafe fn release_shared(ptr: *mut Shared) {
1429 // `Shared` storage... follow the drop steps from Arc.
1430 if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
1431 return;
1432 }
1433
1434 // This fence is needed to prevent reordering of use of the data and
1435 // deletion of the data. Because it is marked `Release`, the decreasing
1436 // of the reference count synchronizes with this `Acquire` fence. This
1437 // means that use of the data happens before decreasing the reference
1438 // count, which happens before this fence, which happens before the
1439 // deletion of the data.
1440 //
1441 // As explained in the [Boost documentation][1],
1442 //
1443 // > It is important to enforce any possible access to the object in one
1444 // > thread (through an existing reference) to *happen before* deleting
1445 // > the object in a different thread. This is achieved by a "release"
1446 // > operation after dropping a reference (any access to the object
1447 // > through this reference must obviously happened before), and an
1448 // > "acquire" operation before deleting the object.
1449 //
1450 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1451 //
1452 // Thread sanitizer does not support atomic fences. Use an atomic load
1453 // instead.
1454 (*ptr).ref_count.load(Ordering::Acquire);
1455
1456 // Drop the data
1457 drop(Box::from_raw(ptr));
1458}
1459
1460impl Shared {
1461 fn is_unique(&self) -> bool {
1462 // The goal is to check if the current handle is the only handle
1463 // that currently has access to the buffer. This is done by
1464 // checking if the `ref_count` is currently 1.
1465 //
1466 // The `Acquire` ordering synchronizes with the `Release` as
1467 // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1468 // operation guarantees that any mutations done in other threads
1469 // are ordered before the `ref_count` is decremented. As such,
1470 // this `Acquire` will guarantee that those mutations are
1471 // visible to the current thread.
1472 self.ref_count.load(Ordering::Acquire) == 1
1473 }
1474}
1475
1476#[inline]
1477fn original_capacity_to_repr(cap: usize) -> usize {
1478 let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
1479 cmp::min(
1480 width,
1481 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
1482 )
1483}
1484
1485fn original_capacity_from_repr(repr: usize) -> usize {
1486 if repr == 0 {
1487 return 0;
1488 }
1489
1490 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
1491}
1492
1493#[cfg(test)]
1494mod tests {
1495 use super::*;
1496
1497 #[test]
1498 fn test_original_capacity_to_repr() {
1499 assert_eq!(original_capacity_to_repr(0), 0);
1500
1501 let max_width = 32;
1502
1503 for width in 1..(max_width + 1) {
1504 let cap = 1 << width - 1;
1505
1506 let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
1507 0
1508 } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
1509 width - MIN_ORIGINAL_CAPACITY_WIDTH
1510 } else {
1511 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
1512 };
1513
1514 assert_eq!(original_capacity_to_repr(cap), expected);
1515
1516 if width > 1 {
1517 assert_eq!(original_capacity_to_repr(cap + 1), expected);
1518 }
1519
1520 // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
1521 if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
1522 assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
1523 assert_eq!(original_capacity_to_repr(cap + 76), expected);
1524 } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
1525 assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
1526 assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
1527 }
1528 }
1529 }
1530
1531 #[test]
1532 fn test_original_capacity_from_repr() {
1533 assert_eq!(0, original_capacity_from_repr(0));
1534
1535 let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
1536
1537 assert_eq!(min_cap, original_capacity_from_repr(1));
1538 assert_eq!(min_cap * 2, original_capacity_from_repr(2));
1539 assert_eq!(min_cap * 4, original_capacity_from_repr(3));
1540 assert_eq!(min_cap * 8, original_capacity_from_repr(4));
1541 assert_eq!(min_cap * 16, original_capacity_from_repr(5));
1542 assert_eq!(min_cap * 32, original_capacity_from_repr(6));
1543 assert_eq!(min_cap * 64, original_capacity_from_repr(7));
1544 }
1545}
1546
1547unsafe impl Send for BytesMut {}
1548unsafe impl Sync for BytesMut {}
1549
1550/*
1551 *
1552 * ===== PartialEq / PartialOrd =====
1553 *
1554 */
1555
1556impl PartialEq<[u8]> for BytesMut {
1557 fn eq(&self, other: &[u8]) -> bool {
1558 &**self == other
1559 }
1560}
1561
1562impl PartialOrd<[u8]> for BytesMut {
1563 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
1564 (**self).partial_cmp(other)
1565 }
1566}
1567
1568impl PartialEq<BytesMut> for [u8] {
1569 fn eq(&self, other: &BytesMut) -> bool {
1570 *other == *self
1571 }
1572}
1573
1574impl PartialOrd<BytesMut> for [u8] {
1575 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1576 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1577 }
1578}
1579
1580impl PartialEq<str> for BytesMut {
1581 fn eq(&self, other: &str) -> bool {
1582 &**self == other.as_bytes()
1583 }
1584}
1585
1586impl PartialOrd<str> for BytesMut {
1587 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
1588 (**self).partial_cmp(other.as_bytes())
1589 }
1590}
1591
1592impl PartialEq<BytesMut> for str {
1593 fn eq(&self, other: &BytesMut) -> bool {
1594 *other == *self
1595 }
1596}
1597
1598impl PartialOrd<BytesMut> for str {
1599 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1600 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1601 }
1602}
1603
1604impl PartialEq<Vec<u8>> for BytesMut {
1605 fn eq(&self, other: &Vec<u8>) -> bool {
1606 *self == other[..]
1607 }
1608}
1609
1610impl PartialOrd<Vec<u8>> for BytesMut {
1611 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
1612 (**self).partial_cmp(&other[..])
1613 }
1614}
1615
1616impl PartialEq<BytesMut> for Vec<u8> {
1617 fn eq(&self, other: &BytesMut) -> bool {
1618 *other == *self
1619 }
1620}
1621
1622impl PartialOrd<BytesMut> for Vec<u8> {
1623 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1624 other.partial_cmp(self)
1625 }
1626}
1627
1628impl PartialEq<String> for BytesMut {
1629 fn eq(&self, other: &String) -> bool {
1630 *self == other[..]
1631 }
1632}
1633
1634impl PartialOrd<String> for BytesMut {
1635 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
1636 (**self).partial_cmp(other.as_bytes())
1637 }
1638}
1639
1640impl PartialEq<BytesMut> for String {
1641 fn eq(&self, other: &BytesMut) -> bool {
1642 *other == *self
1643 }
1644}
1645
1646impl PartialOrd<BytesMut> for String {
1647 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1648 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1649 }
1650}
1651
1652impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
1653where
1654 BytesMut: PartialEq<T>,
1655{
1656 fn eq(&self, other: &&'a T) -> bool {
1657 *self == **other
1658 }
1659}
1660
1661impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
1662where
1663 BytesMut: PartialOrd<T>,
1664{
1665 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
1666 self.partial_cmp(*other)
1667 }
1668}
1669
1670impl PartialEq<BytesMut> for &[u8] {
1671 fn eq(&self, other: &BytesMut) -> bool {
1672 *other == *self
1673 }
1674}
1675
1676impl PartialOrd<BytesMut> for &[u8] {
1677 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1678 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1679 }
1680}
1681
1682impl PartialEq<BytesMut> for &str {
1683 fn eq(&self, other: &BytesMut) -> bool {
1684 *other == *self
1685 }
1686}
1687
1688impl PartialOrd<BytesMut> for &str {
1689 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1690 other.partial_cmp(self)
1691 }
1692}
1693
1694impl PartialEq<BytesMut> for Bytes {
1695 fn eq(&self, other: &BytesMut) -> bool {
1696 other[..] == self[..]
1697 }
1698}
1699
1700impl PartialEq<Bytes> for BytesMut {
1701 fn eq(&self, other: &Bytes) -> bool {
1702 other[..] == self[..]
1703 }
1704}
1705
1706impl From<BytesMut> for Vec<u8> {
1707 fn from(bytes: BytesMut) -> Self {
1708 let kind = bytes.kind();
1709 let bytes = ManuallyDrop::new(bytes);
1710
1711 let mut vec = if kind == KIND_VEC {
1712 unsafe {
1713 let off = bytes.get_vec_pos();
1714 rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
1715 }
1716 } else {
1717 let shared = bytes.data as *mut Shared;
1718
1719 if unsafe { (*shared).is_unique() } {
1720 let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
1721
1722 unsafe { release_shared(shared) };
1723
1724 vec
1725 } else {
1726 return ManuallyDrop::into_inner(bytes).deref().to_vec();
1727 }
1728 };
1729
1730 let len = bytes.len;
1731
1732 unsafe {
1733 ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
1734 vec.set_len(len);
1735 }
1736
1737 vec
1738 }
1739}
1740
1741#[inline]
1742fn vptr(ptr: *mut u8) -> NonNull<u8> {
1743 if cfg!(debug_assertions) {
1744 NonNull::new(ptr).expect("Vec pointer should be non-null")
1745 } else {
1746 unsafe { NonNull::new_unchecked(ptr) }
1747 }
1748}
1749
1750/// Returns a dangling pointer with the given address. This is used to store
1751/// integer data in pointer fields.
1752///
1753/// It is equivalent to `addr as *mut T`, but this fails on miri when strict
1754/// provenance checking is enabled.
1755#[inline]
1756fn invalid_ptr<T>(addr: usize) -> *mut T {
1757 let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
1758 debug_assert_eq!(ptr as usize, addr);
1759 ptr.cast::<T>()
1760}
1761
1762unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
1763 let ptr = ptr.sub(off);
1764 len += off;
1765 cap += off;
1766
1767 Vec::from_raw_parts(ptr, len, cap)
1768}
1769
1770// ===== impl SharedVtable =====
1771
1772static SHARED_VTABLE: Vtable = Vtable {
1773 clone: shared_v_clone,
1774 to_vec: shared_v_to_vec,
1775 to_mut: shared_v_to_mut,
1776 is_unique: shared_v_is_unique,
1777 drop: shared_v_drop,
1778};
1779
1780unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1781 let shared = data.load(Ordering::Relaxed) as *mut Shared;
1782 increment_shared(shared);
1783
1784 let data = AtomicPtr::new(shared as *mut ());
1785 Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
1786}
1787
1788unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1789 let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
1790
1791 if (*shared).is_unique() {
1792 let shared = &mut *shared;
1793
1794 // Drop shared
1795 let mut vec = mem::replace(&mut shared.vec, Vec::new());
1796 release_shared(shared);
1797
1798 // Copy back buffer
1799 ptr::copy(ptr, vec.as_mut_ptr(), len);
1800 vec.set_len(len);
1801
1802 vec
1803 } else {
1804 let v = slice::from_raw_parts(ptr, len).to_vec();
1805 release_shared(shared);
1806 v
1807 }
1808}
1809
1810unsafe fn shared_v_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1811 let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
1812
1813 if (*shared).is_unique() {
1814 let shared = &mut *shared;
1815
1816 // The capacity is always the original capacity of the buffer
1817 // minus the offset from the start of the buffer
1818 let v = &mut shared.vec;
1819 let v_capacity = v.capacity();
1820 let v_ptr = v.as_mut_ptr();
1821 let offset = offset_from(ptr as *mut u8, v_ptr);
1822 let cap = v_capacity - offset;
1823
1824 let ptr = vptr(ptr as *mut u8);
1825
1826 BytesMut {
1827 ptr,
1828 len,
1829 cap,
1830 data: shared,
1831 }
1832 } else {
1833 let v = slice::from_raw_parts(ptr, len).to_vec();
1834 release_shared(shared);
1835 BytesMut::from_vec(v)
1836 }
1837}
1838
1839unsafe fn shared_v_is_unique(data: &AtomicPtr<()>) -> bool {
1840 let shared = data.load(Ordering::Acquire);
1841 let ref_count = (*shared.cast::<Shared>()).ref_count.load(Ordering::Relaxed);
1842 ref_count == 1
1843}
1844
1845unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1846 data.with_mut(|shared| {
1847 release_shared(*shared as *mut Shared);
1848 });
1849}
1850
1851// compile-fails
1852
1853/// ```compile_fail
1854/// use bytes::BytesMut;
1855/// #[deny(unused_must_use)]
1856/// {
1857/// let mut b1 = BytesMut::from("hello world");
1858/// b1.split_to(6);
1859/// }
1860/// ```
1861fn _split_to_must_use() {}
1862
1863/// ```compile_fail
1864/// use bytes::BytesMut;
1865/// #[deny(unused_must_use)]
1866/// {
1867/// let mut b1 = BytesMut::from("hello world");
1868/// b1.split_off(6);
1869/// }
1870/// ```
1871fn _split_off_must_use() {}
1872
1873/// ```compile_fail
1874/// use bytes::BytesMut;
1875/// #[deny(unused_must_use)]
1876/// {
1877/// let mut b1 = BytesMut::from("hello world");
1878/// b1.split();
1879/// }
1880/// ```
1881fn _split_must_use() {}
1882
1883// fuzz tests
1884#[cfg(all(test, loom))]
1885mod fuzz {
1886 use loom::sync::Arc;
1887 use loom::thread;
1888
1889 use super::BytesMut;
1890 use crate::Bytes;
1891
1892 #[test]
1893 fn bytes_mut_cloning_frozen() {
1894 loom::model(|| {
1895 let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
1896 let addr = a.as_ptr() as usize;
1897
1898 // test the Bytes::clone is Sync by putting it in an Arc
1899 let a1 = Arc::new(a);
1900 let a2 = a1.clone();
1901
1902 let t1 = thread::spawn(move || {
1903 let b: Bytes = (*a1).clone();
1904 assert_eq!(b.as_ptr() as usize, addr);
1905 });
1906
1907 let t2 = thread::spawn(move || {
1908 let b: Bytes = (*a2).clone();
1909 assert_eq!(b.as_ptr() as usize, addr);
1910 });
1911
1912 t1.join().unwrap();
1913 t2.join().unwrap();
1914 });
1915 }
1916}