bytes/
bytes.rs

1use core::iter::FromIterator;
2use core::mem::{self, ManuallyDrop};
3use core::ops::{Deref, RangeBounds};
4use core::{cmp, fmt, hash, ptr, slice, usize};
5
6use alloc::{
7    alloc::{dealloc, Layout},
8    borrow::Borrow,
9    boxed::Box,
10    string::String,
11    vec::Vec,
12};
13
14use crate::buf::IntoIter;
15#[allow(unused)]
16use crate::loom::sync::atomic::AtomicMut;
17use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
18use crate::{offset_from, Buf, BytesMut};
19
20/// A cheaply cloneable and sliceable chunk of contiguous memory.
21///
22/// `Bytes` is an efficient container for storing and operating on contiguous
23/// slices of memory. It is intended for use primarily in networking code, but
24/// could have applications elsewhere as well.
25///
26/// `Bytes` values facilitate zero-copy network programming by allowing multiple
27/// `Bytes` objects to point to the same underlying memory.
28///
29/// `Bytes` does not have a single implementation. It is an interface, whose
30/// exact behavior is implemented through dynamic dispatch in several underlying
31/// implementations of `Bytes`.
32///
33/// All `Bytes` implementations must fulfill the following requirements:
34/// - They are cheaply cloneable and thereby shareable between an unlimited amount
35///   of components, for example by modifying a reference count.
36/// - Instances can be sliced to refer to a subset of the original buffer.
37///
38/// ```
39/// use bytes::Bytes;
40///
41/// let mut mem = Bytes::from("Hello world");
42/// let a = mem.slice(0..5);
43///
44/// assert_eq!(a, "Hello");
45///
46/// let b = mem.split_to(6);
47///
48/// assert_eq!(mem, "world");
49/// assert_eq!(b, "Hello ");
50/// ```
51///
52/// # Memory layout
53///
54/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used
55/// to track information about which segment of the underlying memory the
56/// `Bytes` handle has access to.
57///
58/// `Bytes` keeps both a pointer to the shared state containing the full memory
59/// slice and a pointer to the start of the region visible by the handle.
60/// `Bytes` also tracks the length of its view into the memory.
61///
62/// # Sharing
63///
64/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
65/// how sharing/cloning is implemented in detail.
66/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
67/// cloning the backing storage in order to share it behind multiple `Bytes`
68/// instances.
69///
70/// For `Bytes` implementations which refer to constant memory (e.g. created
71/// via `Bytes::from_static()`) the cloning implementation will be a no-op.
72///
73/// For `Bytes` implementations which point to a reference counted shared storage
74/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the
75/// reference count.
76///
77/// Due to this mechanism, multiple `Bytes` instances may point to the same
78/// shared memory region.
79/// Each `Bytes` instance can point to different sections within that
80/// memory region, and `Bytes` instances may or may not have overlapping views
81/// into the memory.
82///
83/// The following diagram visualizes a scenario where 2 `Bytes` instances make
84/// use of an `Arc`-based backing storage, and provide access to different views:
85///
86/// ```text
87///
88///    Arc ptrs                   ┌─────────┐
89///    ________________________ / │ Bytes 2 │
90///   /                           └─────────┘
91///  /          ┌───────────┐     |         |
92/// |_________/ │  Bytes 1  │     |         |
93/// |           └───────────┘     |         |
94/// |           |           | ___/ data     | tail
95/// |      data |      tail |/              |
96/// v           v           v               v
97/// ┌─────┬─────┬───────────┬───────────────┬─────┐
98/// │ Arc │     │           │               │     │
99/// └─────┴─────┴───────────┴───────────────┴─────┘
100/// ```
101pub struct Bytes {
102    ptr: *const u8,
103    len: usize,
104    // inlined "trait object"
105    data: AtomicPtr<()>,
106    vtable: &'static Vtable,
107}
108
109pub(crate) struct Vtable {
110    /// fn(data, ptr, len)
111    pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
112    /// fn(data, ptr, len)
113    ///
114    /// takes `Bytes` to value
115    pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
116    pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
117    /// fn(data)
118    pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
119    /// fn(data, ptr, len)
120    pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
121}
122
123impl Bytes {
124    /// Creates a new empty `Bytes`.
125    ///
126    /// This will not allocate and the returned `Bytes` handle will be empty.
127    ///
128    /// # Examples
129    ///
130    /// ```
131    /// use bytes::Bytes;
132    ///
133    /// let b = Bytes::new();
134    /// assert_eq!(&b[..], b"");
135    /// ```
136    #[inline]
137    #[cfg(not(all(loom, test)))]
138    pub const fn new() -> Self {
139        // Make it a named const to work around
140        // "unsizing casts are not allowed in const fn"
141        const EMPTY: &[u8] = &[];
142        Bytes::from_static(EMPTY)
143    }
144
145    #[cfg(all(loom, test))]
146    pub fn new() -> Self {
147        const EMPTY: &[u8] = &[];
148        Bytes::from_static(EMPTY)
149    }
150
151    /// Creates a new `Bytes` from a static slice.
152    ///
153    /// The returned `Bytes` will point directly to the static slice. There is
154    /// no allocating or copying.
155    ///
156    /// # Examples
157    ///
158    /// ```
159    /// use bytes::Bytes;
160    ///
161    /// let b = Bytes::from_static(b"hello");
162    /// assert_eq!(&b[..], b"hello");
163    /// ```
164    #[inline]
165    #[cfg(not(all(loom, test)))]
166    pub const fn from_static(bytes: &'static [u8]) -> Self {
167        Bytes {
168            ptr: bytes.as_ptr(),
169            len: bytes.len(),
170            data: AtomicPtr::new(ptr::null_mut()),
171            vtable: &STATIC_VTABLE,
172        }
173    }
174
175    #[cfg(all(loom, test))]
176    pub fn from_static(bytes: &'static [u8]) -> Self {
177        Bytes {
178            ptr: bytes.as_ptr(),
179            len: bytes.len(),
180            data: AtomicPtr::new(ptr::null_mut()),
181            vtable: &STATIC_VTABLE,
182        }
183    }
184
185    /// Returns the number of bytes contained in this `Bytes`.
186    ///
187    /// # Examples
188    ///
189    /// ```
190    /// use bytes::Bytes;
191    ///
192    /// let b = Bytes::from(&b"hello"[..]);
193    /// assert_eq!(b.len(), 5);
194    /// ```
195    #[inline]
196    pub const fn len(&self) -> usize {
197        self.len
198    }
199
200    /// Returns true if the `Bytes` has a length of 0.
201    ///
202    /// # Examples
203    ///
204    /// ```
205    /// use bytes::Bytes;
206    ///
207    /// let b = Bytes::new();
208    /// assert!(b.is_empty());
209    /// ```
210    #[inline]
211    pub const fn is_empty(&self) -> bool {
212        self.len == 0
213    }
214
215    /// Returns true if this is the only reference to the data.
216    ///
217    /// Always returns false if the data is backed by a static slice.
218    ///
219    /// The result of this method may be invalidated immediately if another
220    /// thread clones this value while this is being called. Ensure you have
221    /// unique access to this value (`&mut Bytes`) first if you need to be
222    /// certain the result is valid (i.e. for safety reasons)
223    /// # Examples
224    ///
225    /// ```
226    /// use bytes::Bytes;
227    ///
228    /// let a = Bytes::from(vec![1, 2, 3]);
229    /// assert!(a.is_unique());
230    /// let b = a.clone();
231    /// assert!(!a.is_unique());
232    /// ```
233    pub fn is_unique(&self) -> bool {
234        unsafe { (self.vtable.is_unique)(&self.data) }
235    }
236
237    /// Creates `Bytes` instance from slice, by copying it.
238    pub fn copy_from_slice(data: &[u8]) -> Self {
239        data.to_vec().into()
240    }
241
242    /// Returns a slice of self for the provided range.
243    ///
244    /// This will increment the reference count for the underlying memory and
245    /// return a new `Bytes` handle set to the slice.
246    ///
247    /// This operation is `O(1)`.
248    ///
249    /// # Examples
250    ///
251    /// ```
252    /// use bytes::Bytes;
253    ///
254    /// let a = Bytes::from(&b"hello world"[..]);
255    /// let b = a.slice(2..5);
256    ///
257    /// assert_eq!(&b[..], b"llo");
258    /// ```
259    ///
260    /// # Panics
261    ///
262    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
263    /// will panic.
264    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
265        use core::ops::Bound;
266
267        let len = self.len();
268
269        let begin = match range.start_bound() {
270            Bound::Included(&n) => n,
271            Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
272            Bound::Unbounded => 0,
273        };
274
275        let end = match range.end_bound() {
276            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
277            Bound::Excluded(&n) => n,
278            Bound::Unbounded => len,
279        };
280
281        assert!(
282            begin <= end,
283            "range start must not be greater than end: {:?} <= {:?}",
284            begin,
285            end,
286        );
287        assert!(
288            end <= len,
289            "range end out of bounds: {:?} <= {:?}",
290            end,
291            len,
292        );
293
294        if end == begin {
295            return Bytes::new();
296        }
297
298        let mut ret = self.clone();
299
300        ret.len = end - begin;
301        ret.ptr = unsafe { ret.ptr.add(begin) };
302
303        ret
304    }
305
306    /// Returns a slice of self that is equivalent to the given `subset`.
307    ///
308    /// When processing a `Bytes` buffer with other tools, one often gets a
309    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
310    /// This function turns that `&[u8]` into another `Bytes`, as if one had
311    /// called `self.slice()` with the offsets that correspond to `subset`.
312    ///
313    /// This operation is `O(1)`.
314    ///
315    /// # Examples
316    ///
317    /// ```
318    /// use bytes::Bytes;
319    ///
320    /// let bytes = Bytes::from(&b"012345678"[..]);
321    /// let as_slice = bytes.as_ref();
322    /// let subset = &as_slice[2..6];
323    /// let subslice = bytes.slice_ref(&subset);
324    /// assert_eq!(&subslice[..], b"2345");
325    /// ```
326    ///
327    /// # Panics
328    ///
329    /// Requires that the given `sub` slice is in fact contained within the
330    /// `Bytes` buffer; otherwise this function will panic.
331    pub fn slice_ref(&self, subset: &[u8]) -> Self {
332        // Empty slice and empty Bytes may have their pointers reset
333        // so explicitly allow empty slice to be a subslice of any slice.
334        if subset.is_empty() {
335            return Bytes::new();
336        }
337
338        let bytes_p = self.as_ptr() as usize;
339        let bytes_len = self.len();
340
341        let sub_p = subset.as_ptr() as usize;
342        let sub_len = subset.len();
343
344        assert!(
345            sub_p >= bytes_p,
346            "subset pointer ({:p}) is smaller than self pointer ({:p})",
347            subset.as_ptr(),
348            self.as_ptr(),
349        );
350        assert!(
351            sub_p + sub_len <= bytes_p + bytes_len,
352            "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
353            self.as_ptr(),
354            bytes_len,
355            subset.as_ptr(),
356            sub_len,
357        );
358
359        let sub_offset = sub_p - bytes_p;
360
361        self.slice(sub_offset..(sub_offset + sub_len))
362    }
363
364    /// Splits the bytes into two at the given index.
365    ///
366    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
367    /// contains elements `[at, len)`.
368    ///
369    /// This is an `O(1)` operation that just increases the reference count and
370    /// sets a few indices.
371    ///
372    /// # Examples
373    ///
374    /// ```
375    /// use bytes::Bytes;
376    ///
377    /// let mut a = Bytes::from(&b"hello world"[..]);
378    /// let b = a.split_off(5);
379    ///
380    /// assert_eq!(&a[..], b"hello");
381    /// assert_eq!(&b[..], b" world");
382    /// ```
383    ///
384    /// # Panics
385    ///
386    /// Panics if `at > len`.
387    #[must_use = "consider Bytes::truncate if you don't need the other half"]
388    pub fn split_off(&mut self, at: usize) -> Self {
389        if at == self.len() {
390            return Bytes::new();
391        }
392
393        if at == 0 {
394            return mem::replace(self, Bytes::new());
395        }
396
397        assert!(
398            at <= self.len(),
399            "split_off out of bounds: {:?} <= {:?}",
400            at,
401            self.len(),
402        );
403
404        let mut ret = self.clone();
405
406        self.len = at;
407
408        unsafe { ret.inc_start(at) };
409
410        ret
411    }
412
413    /// Splits the bytes into two at the given index.
414    ///
415    /// Afterwards `self` contains elements `[at, len)`, and the returned
416    /// `Bytes` contains elements `[0, at)`.
417    ///
418    /// This is an `O(1)` operation that just increases the reference count and
419    /// sets a few indices.
420    ///
421    /// # Examples
422    ///
423    /// ```
424    /// use bytes::Bytes;
425    ///
426    /// let mut a = Bytes::from(&b"hello world"[..]);
427    /// let b = a.split_to(5);
428    ///
429    /// assert_eq!(&a[..], b" world");
430    /// assert_eq!(&b[..], b"hello");
431    /// ```
432    ///
433    /// # Panics
434    ///
435    /// Panics if `at > len`.
436    #[must_use = "consider Bytes::advance if you don't need the other half"]
437    pub fn split_to(&mut self, at: usize) -> Self {
438        if at == self.len() {
439            return mem::replace(self, Bytes::new());
440        }
441
442        if at == 0 {
443            return Bytes::new();
444        }
445
446        assert!(
447            at <= self.len(),
448            "split_to out of bounds: {:?} <= {:?}",
449            at,
450            self.len(),
451        );
452
453        let mut ret = self.clone();
454
455        unsafe { self.inc_start(at) };
456
457        ret.len = at;
458        ret
459    }
460
461    /// Shortens the buffer, keeping the first `len` bytes and dropping the
462    /// rest.
463    ///
464    /// If `len` is greater than the buffer's current length, this has no
465    /// effect.
466    ///
467    /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
468    /// excess bytes to be returned instead of dropped.
469    ///
470    /// # Examples
471    ///
472    /// ```
473    /// use bytes::Bytes;
474    ///
475    /// let mut buf = Bytes::from(&b"hello world"[..]);
476    /// buf.truncate(5);
477    /// assert_eq!(buf, b"hello"[..]);
478    /// ```
479    #[inline]
480    pub fn truncate(&mut self, len: usize) {
481        if len < self.len {
482            // The Vec "promotable" vtables do not store the capacity,
483            // so we cannot truncate while using this repr. We *have* to
484            // promote using `split_off` so the capacity can be stored.
485            if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
486                || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
487            {
488                drop(self.split_off(len));
489            } else {
490                self.len = len;
491            }
492        }
493    }
494
495    /// Clears the buffer, removing all data.
496    ///
497    /// # Examples
498    ///
499    /// ```
500    /// use bytes::Bytes;
501    ///
502    /// let mut buf = Bytes::from(&b"hello world"[..]);
503    /// buf.clear();
504    /// assert!(buf.is_empty());
505    /// ```
506    #[inline]
507    pub fn clear(&mut self) {
508        self.truncate(0);
509    }
510
511    /// Try to convert self into `BytesMut`.
512    ///
513    /// If `self` is unique for the entire original buffer, this will succeed
514    /// and return a `BytesMut` with the contents of `self` without copying.
515    /// If `self` is not unique for the entire original buffer, this will fail
516    /// and return self.
517    ///
518    /// # Examples
519    ///
520    /// ```
521    /// use bytes::{Bytes, BytesMut};
522    ///
523    /// let bytes = Bytes::from(b"hello".to_vec());
524    /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..])));
525    /// ```
526    pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
527        if self.is_unique() {
528            Ok(self.into())
529        } else {
530            Err(self)
531        }
532    }
533
534    #[inline]
535    pub(crate) unsafe fn with_vtable(
536        ptr: *const u8,
537        len: usize,
538        data: AtomicPtr<()>,
539        vtable: &'static Vtable,
540    ) -> Bytes {
541        Bytes {
542            ptr,
543            len,
544            data,
545            vtable,
546        }
547    }
548
549    // private
550
551    #[inline]
552    fn as_slice(&self) -> &[u8] {
553        unsafe { slice::from_raw_parts(self.ptr, self.len) }
554    }
555
556    #[inline]
557    unsafe fn inc_start(&mut self, by: usize) {
558        // should already be asserted, but debug assert for tests
559        debug_assert!(self.len >= by, "internal: inc_start out of bounds");
560        self.len -= by;
561        self.ptr = self.ptr.add(by);
562    }
563}
564
565// Vtable must enforce this behavior
566unsafe impl Send for Bytes {}
567unsafe impl Sync for Bytes {}
568
569impl Drop for Bytes {
570    #[inline]
571    fn drop(&mut self) {
572        unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
573    }
574}
575
576impl Clone for Bytes {
577    #[inline]
578    fn clone(&self) -> Bytes {
579        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
580    }
581}
582
583impl Buf for Bytes {
584    #[inline]
585    fn remaining(&self) -> usize {
586        self.len()
587    }
588
589    #[inline]
590    fn chunk(&self) -> &[u8] {
591        self.as_slice()
592    }
593
594    #[inline]
595    fn advance(&mut self, cnt: usize) {
596        assert!(
597            cnt <= self.len(),
598            "cannot advance past `remaining`: {:?} <= {:?}",
599            cnt,
600            self.len(),
601        );
602
603        unsafe {
604            self.inc_start(cnt);
605        }
606    }
607
608    fn copy_to_bytes(&mut self, len: usize) -> Self {
609        self.split_to(len)
610    }
611}
612
613impl Deref for Bytes {
614    type Target = [u8];
615
616    #[inline]
617    fn deref(&self) -> &[u8] {
618        self.as_slice()
619    }
620}
621
622impl AsRef<[u8]> for Bytes {
623    #[inline]
624    fn as_ref(&self) -> &[u8] {
625        self.as_slice()
626    }
627}
628
629impl hash::Hash for Bytes {
630    fn hash<H>(&self, state: &mut H)
631    where
632        H: hash::Hasher,
633    {
634        self.as_slice().hash(state);
635    }
636}
637
638impl Borrow<[u8]> for Bytes {
639    fn borrow(&self) -> &[u8] {
640        self.as_slice()
641    }
642}
643
644impl IntoIterator for Bytes {
645    type Item = u8;
646    type IntoIter = IntoIter<Bytes>;
647
648    fn into_iter(self) -> Self::IntoIter {
649        IntoIter::new(self)
650    }
651}
652
653impl<'a> IntoIterator for &'a Bytes {
654    type Item = &'a u8;
655    type IntoIter = core::slice::Iter<'a, u8>;
656
657    fn into_iter(self) -> Self::IntoIter {
658        self.as_slice().iter()
659    }
660}
661
662impl FromIterator<u8> for Bytes {
663    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
664        Vec::from_iter(into_iter).into()
665    }
666}
667
668// impl Eq
669
670impl PartialEq for Bytes {
671    fn eq(&self, other: &Bytes) -> bool {
672        self.as_slice() == other.as_slice()
673    }
674}
675
676impl PartialOrd for Bytes {
677    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
678        self.as_slice().partial_cmp(other.as_slice())
679    }
680}
681
682impl Ord for Bytes {
683    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
684        self.as_slice().cmp(other.as_slice())
685    }
686}
687
688impl Eq for Bytes {}
689
690impl PartialEq<[u8]> for Bytes {
691    fn eq(&self, other: &[u8]) -> bool {
692        self.as_slice() == other
693    }
694}
695
696impl PartialOrd<[u8]> for Bytes {
697    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
698        self.as_slice().partial_cmp(other)
699    }
700}
701
702impl PartialEq<Bytes> for [u8] {
703    fn eq(&self, other: &Bytes) -> bool {
704        *other == *self
705    }
706}
707
708impl PartialOrd<Bytes> for [u8] {
709    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
710        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
711    }
712}
713
714impl PartialEq<str> for Bytes {
715    fn eq(&self, other: &str) -> bool {
716        self.as_slice() == other.as_bytes()
717    }
718}
719
720impl PartialOrd<str> for Bytes {
721    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
722        self.as_slice().partial_cmp(other.as_bytes())
723    }
724}
725
726impl PartialEq<Bytes> for str {
727    fn eq(&self, other: &Bytes) -> bool {
728        *other == *self
729    }
730}
731
732impl PartialOrd<Bytes> for str {
733    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
734        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
735    }
736}
737
738impl PartialEq<Vec<u8>> for Bytes {
739    fn eq(&self, other: &Vec<u8>) -> bool {
740        *self == other[..]
741    }
742}
743
744impl PartialOrd<Vec<u8>> for Bytes {
745    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
746        self.as_slice().partial_cmp(&other[..])
747    }
748}
749
750impl PartialEq<Bytes> for Vec<u8> {
751    fn eq(&self, other: &Bytes) -> bool {
752        *other == *self
753    }
754}
755
756impl PartialOrd<Bytes> for Vec<u8> {
757    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
758        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
759    }
760}
761
762impl PartialEq<String> for Bytes {
763    fn eq(&self, other: &String) -> bool {
764        *self == other[..]
765    }
766}
767
768impl PartialOrd<String> for Bytes {
769    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
770        self.as_slice().partial_cmp(other.as_bytes())
771    }
772}
773
774impl PartialEq<Bytes> for String {
775    fn eq(&self, other: &Bytes) -> bool {
776        *other == *self
777    }
778}
779
780impl PartialOrd<Bytes> for String {
781    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
782        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
783    }
784}
785
786impl PartialEq<Bytes> for &[u8] {
787    fn eq(&self, other: &Bytes) -> bool {
788        *other == *self
789    }
790}
791
792impl PartialOrd<Bytes> for &[u8] {
793    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
794        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
795    }
796}
797
798impl PartialEq<Bytes> for &str {
799    fn eq(&self, other: &Bytes) -> bool {
800        *other == *self
801    }
802}
803
804impl PartialOrd<Bytes> for &str {
805    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
806        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
807    }
808}
809
810impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
811where
812    Bytes: PartialEq<T>,
813{
814    fn eq(&self, other: &&'a T) -> bool {
815        *self == **other
816    }
817}
818
819impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
820where
821    Bytes: PartialOrd<T>,
822{
823    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
824        self.partial_cmp(&**other)
825    }
826}
827
828// impl From
829
830impl Default for Bytes {
831    #[inline]
832    fn default() -> Bytes {
833        Bytes::new()
834    }
835}
836
837impl From<&'static [u8]> for Bytes {
838    fn from(slice: &'static [u8]) -> Bytes {
839        Bytes::from_static(slice)
840    }
841}
842
843impl From<&'static str> for Bytes {
844    fn from(slice: &'static str) -> Bytes {
845        Bytes::from_static(slice.as_bytes())
846    }
847}
848
849impl From<Vec<u8>> for Bytes {
850    fn from(vec: Vec<u8>) -> Bytes {
851        let mut vec = ManuallyDrop::new(vec);
852        let ptr = vec.as_mut_ptr();
853        let len = vec.len();
854        let cap = vec.capacity();
855
856        // Avoid an extra allocation if possible.
857        if len == cap {
858            let vec = ManuallyDrop::into_inner(vec);
859            return Bytes::from(vec.into_boxed_slice());
860        }
861
862        let shared = Box::new(Shared {
863            buf: ptr,
864            cap,
865            ref_cnt: AtomicUsize::new(1),
866        });
867
868        let shared = Box::into_raw(shared);
869        // The pointer should be aligned, so this assert should
870        // always succeed.
871        debug_assert!(
872            0 == (shared as usize & KIND_MASK),
873            "internal: Box<Shared> should have an aligned pointer",
874        );
875        Bytes {
876            ptr,
877            len,
878            data: AtomicPtr::new(shared as _),
879            vtable: &SHARED_VTABLE,
880        }
881    }
882}
883
884impl From<Box<[u8]>> for Bytes {
885    fn from(slice: Box<[u8]>) -> Bytes {
886        // Box<[u8]> doesn't contain a heap allocation for empty slices,
887        // so the pointer isn't aligned enough for the KIND_VEC stashing to
888        // work.
889        if slice.is_empty() {
890            return Bytes::new();
891        }
892
893        let len = slice.len();
894        let ptr = Box::into_raw(slice) as *mut u8;
895
896        if ptr as usize & 0x1 == 0 {
897            let data = ptr_map(ptr, |addr| addr | KIND_VEC);
898            Bytes {
899                ptr,
900                len,
901                data: AtomicPtr::new(data.cast()),
902                vtable: &PROMOTABLE_EVEN_VTABLE,
903            }
904        } else {
905            Bytes {
906                ptr,
907                len,
908                data: AtomicPtr::new(ptr.cast()),
909                vtable: &PROMOTABLE_ODD_VTABLE,
910            }
911        }
912    }
913}
914
915impl From<Bytes> for BytesMut {
916    /// Convert self into `BytesMut`.
917    ///
918    /// If `bytes` is unique for the entire original buffer, this will return a
919    /// `BytesMut` with the contents of `bytes` without copying.
920    /// If `bytes` is not unique for the entire original buffer, this will make
921    /// a copy of `bytes` subset of the original buffer in a new `BytesMut`.
922    ///
923    /// # Examples
924    ///
925    /// ```
926    /// use bytes::{Bytes, BytesMut};
927    ///
928    /// let bytes = Bytes::from(b"hello".to_vec());
929    /// assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..]));
930    /// ```
931    fn from(bytes: Bytes) -> Self {
932        let bytes = ManuallyDrop::new(bytes);
933        unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) }
934    }
935}
936
937impl From<String> for Bytes {
938    fn from(s: String) -> Bytes {
939        Bytes::from(s.into_bytes())
940    }
941}
942
943impl From<Bytes> for Vec<u8> {
944    fn from(bytes: Bytes) -> Vec<u8> {
945        let bytes = ManuallyDrop::new(bytes);
946        unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
947    }
948}
949
950// ===== impl Vtable =====
951
952impl fmt::Debug for Vtable {
953    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
954        f.debug_struct("Vtable")
955            .field("clone", &(self.clone as *const ()))
956            .field("drop", &(self.drop as *const ()))
957            .finish()
958    }
959}
960
961// ===== impl StaticVtable =====
962
963const STATIC_VTABLE: Vtable = Vtable {
964    clone: static_clone,
965    to_vec: static_to_vec,
966    to_mut: static_to_mut,
967    is_unique: static_is_unique,
968    drop: static_drop,
969};
970
971unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
972    let slice = slice::from_raw_parts(ptr, len);
973    Bytes::from_static(slice)
974}
975
976unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
977    let slice = slice::from_raw_parts(ptr, len);
978    slice.to_vec()
979}
980
981unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
982    let slice = slice::from_raw_parts(ptr, len);
983    BytesMut::from(slice)
984}
985
986fn static_is_unique(_: &AtomicPtr<()>) -> bool {
987    false
988}
989
990unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
991    // nothing to drop for &'static [u8]
992}
993
994// ===== impl PromotableVtable =====
995
996static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
997    clone: promotable_even_clone,
998    to_vec: promotable_even_to_vec,
999    to_mut: promotable_even_to_mut,
1000    is_unique: promotable_is_unique,
1001    drop: promotable_even_drop,
1002};
1003
1004static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1005    clone: promotable_odd_clone,
1006    to_vec: promotable_odd_to_vec,
1007    to_mut: promotable_odd_to_mut,
1008    is_unique: promotable_is_unique,
1009    drop: promotable_odd_drop,
1010};
1011
1012unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1013    let shared = data.load(Ordering::Acquire);
1014    let kind = shared as usize & KIND_MASK;
1015
1016    if kind == KIND_ARC {
1017        shallow_clone_arc(shared.cast(), ptr, len)
1018    } else {
1019        debug_assert_eq!(kind, KIND_VEC);
1020        let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1021        shallow_clone_vec(data, shared, buf, ptr, len)
1022    }
1023}
1024
1025unsafe fn promotable_to_vec(
1026    data: &AtomicPtr<()>,
1027    ptr: *const u8,
1028    len: usize,
1029    f: fn(*mut ()) -> *mut u8,
1030) -> Vec<u8> {
1031    let shared = data.load(Ordering::Acquire);
1032    let kind = shared as usize & KIND_MASK;
1033
1034    if kind == KIND_ARC {
1035        shared_to_vec_impl(shared.cast(), ptr, len)
1036    } else {
1037        // If Bytes holds a Vec, then the offset must be 0.
1038        debug_assert_eq!(kind, KIND_VEC);
1039
1040        let buf = f(shared);
1041
1042        let cap = offset_from(ptr, buf) + len;
1043
1044        // Copy back buffer
1045        ptr::copy(ptr, buf, len);
1046
1047        Vec::from_raw_parts(buf, len, cap)
1048    }
1049}
1050
1051unsafe fn promotable_to_mut(
1052    data: &AtomicPtr<()>,
1053    ptr: *const u8,
1054    len: usize,
1055    f: fn(*mut ()) -> *mut u8,
1056) -> BytesMut {
1057    let shared = data.load(Ordering::Acquire);
1058    let kind = shared as usize & KIND_MASK;
1059
1060    if kind == KIND_ARC {
1061        shared_to_mut_impl(shared.cast(), ptr, len)
1062    } else {
1063        // KIND_VEC is a view of an underlying buffer at a certain offset.
1064        // The ptr + len always represents the end of that buffer.
1065        // Before truncating it, it is first promoted to KIND_ARC.
1066        // Thus, we can safely reconstruct a Vec from it without leaking memory.
1067        debug_assert_eq!(kind, KIND_VEC);
1068
1069        let buf = f(shared);
1070        let off = offset_from(ptr, buf);
1071        let cap = off + len;
1072        let v = Vec::from_raw_parts(buf, cap, cap);
1073
1074        let mut b = BytesMut::from_vec(v);
1075        b.advance_unchecked(off);
1076        b
1077    }
1078}
1079
1080unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1081    promotable_to_vec(data, ptr, len, |shared| {
1082        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1083    })
1084}
1085
1086unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1087    promotable_to_mut(data, ptr, len, |shared| {
1088        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1089    })
1090}
1091
1092unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1093    data.with_mut(|shared| {
1094        let shared = *shared;
1095        let kind = shared as usize & KIND_MASK;
1096
1097        if kind == KIND_ARC {
1098            release_shared(shared.cast());
1099        } else {
1100            debug_assert_eq!(kind, KIND_VEC);
1101            let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1102            free_boxed_slice(buf, ptr, len);
1103        }
1104    });
1105}
1106
1107unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1108    let shared = data.load(Ordering::Acquire);
1109    let kind = shared as usize & KIND_MASK;
1110
1111    if kind == KIND_ARC {
1112        shallow_clone_arc(shared as _, ptr, len)
1113    } else {
1114        debug_assert_eq!(kind, KIND_VEC);
1115        shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1116    }
1117}
1118
1119unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1120    promotable_to_vec(data, ptr, len, |shared| shared.cast())
1121}
1122
1123unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1124    promotable_to_mut(data, ptr, len, |shared| shared.cast())
1125}
1126
1127unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1128    data.with_mut(|shared| {
1129        let shared = *shared;
1130        let kind = shared as usize & KIND_MASK;
1131
1132        if kind == KIND_ARC {
1133            release_shared(shared.cast());
1134        } else {
1135            debug_assert_eq!(kind, KIND_VEC);
1136
1137            free_boxed_slice(shared.cast(), ptr, len);
1138        }
1139    });
1140}
1141
1142unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1143    let shared = data.load(Ordering::Acquire);
1144    let kind = shared as usize & KIND_MASK;
1145
1146    if kind == KIND_ARC {
1147        let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1148        ref_cnt == 1
1149    } else {
1150        true
1151    }
1152}
1153
1154unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1155    let cap = offset_from(offset, buf) + len;
1156    dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1157}
1158
1159// ===== impl SharedVtable =====
1160
1161struct Shared {
1162    // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
1163    buf: *mut u8,
1164    cap: usize,
1165    ref_cnt: AtomicUsize,
1166}
1167
1168impl Drop for Shared {
1169    fn drop(&mut self) {
1170        unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1171    }
1172}
1173
1174// Assert that the alignment of `Shared` is divisible by 2.
1175// This is a necessary invariant since we depend on allocating `Shared` a
1176// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
1177// This flag is set when the LSB is 0.
1178const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
1179
1180static SHARED_VTABLE: Vtable = Vtable {
1181    clone: shared_clone,
1182    to_vec: shared_to_vec,
1183    to_mut: shared_to_mut,
1184    is_unique: shared_is_unique,
1185    drop: shared_drop,
1186};
1187
1188const KIND_ARC: usize = 0b0;
1189const KIND_VEC: usize = 0b1;
1190const KIND_MASK: usize = 0b1;
1191
1192unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1193    let shared = data.load(Ordering::Relaxed);
1194    shallow_clone_arc(shared as _, ptr, len)
1195}
1196
1197unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1198    // Check that the ref_cnt is 1 (unique).
1199    //
1200    // If it is unique, then it is set to 0 with AcqRel fence for the same
1201    // reason in release_shared.
1202    //
1203    // Otherwise, we take the other branch and call release_shared.
1204    if (*shared)
1205        .ref_cnt
1206        .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1207        .is_ok()
1208    {
1209        // Deallocate the `Shared` instance without running its destructor.
1210        let shared = *Box::from_raw(shared);
1211        let shared = ManuallyDrop::new(shared);
1212        let buf = shared.buf;
1213        let cap = shared.cap;
1214
1215        // Copy back buffer
1216        ptr::copy(ptr, buf, len);
1217
1218        Vec::from_raw_parts(buf, len, cap)
1219    } else {
1220        let v = slice::from_raw_parts(ptr, len).to_vec();
1221        release_shared(shared);
1222        v
1223    }
1224}
1225
1226unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1227    shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1228}
1229
1230unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1231    // The goal is to check if the current handle is the only handle
1232    // that currently has access to the buffer. This is done by
1233    // checking if the `ref_cnt` is currently 1.
1234    //
1235    // The `Acquire` ordering synchronizes with the `Release` as
1236    // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1237    // operation guarantees that any mutations done in other threads
1238    // are ordered before the `ref_cnt` is decremented. As such,
1239    // this `Acquire` will guarantee that those mutations are
1240    // visible to the current thread.
1241    //
1242    // Otherwise, we take the other branch, copy the data and call `release_shared`.
1243    if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1244        // Deallocate the `Shared` instance without running its destructor.
1245        let shared = *Box::from_raw(shared);
1246        let shared = ManuallyDrop::new(shared);
1247        let buf = shared.buf;
1248        let cap = shared.cap;
1249
1250        // Rebuild Vec
1251        let off = offset_from(ptr, buf);
1252        let v = Vec::from_raw_parts(buf, len + off, cap);
1253
1254        let mut b = BytesMut::from_vec(v);
1255        b.advance_unchecked(off);
1256        b
1257    } else {
1258        // Copy the data from Shared in a new Vec, then release it
1259        let v = slice::from_raw_parts(ptr, len).to_vec();
1260        release_shared(shared);
1261        BytesMut::from_vec(v)
1262    }
1263}
1264
1265unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1266    shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1267}
1268
1269pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1270    let shared = data.load(Ordering::Acquire);
1271    let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1272    ref_cnt == 1
1273}
1274
1275unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1276    data.with_mut(|shared| {
1277        release_shared(shared.cast());
1278    });
1279}
1280
1281unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1282    let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1283
1284    if old_size > usize::MAX >> 1 {
1285        crate::abort();
1286    }
1287
1288    Bytes {
1289        ptr,
1290        len,
1291        data: AtomicPtr::new(shared as _),
1292        vtable: &SHARED_VTABLE,
1293    }
1294}
1295
1296#[cold]
1297unsafe fn shallow_clone_vec(
1298    atom: &AtomicPtr<()>,
1299    ptr: *const (),
1300    buf: *mut u8,
1301    offset: *const u8,
1302    len: usize,
1303) -> Bytes {
1304    // If  the buffer is still tracked in a `Vec<u8>`. It is time to
1305    // promote the vec to an `Arc`. This could potentially be called
1306    // concurrently, so some care must be taken.
1307
1308    // First, allocate a new `Shared` instance containing the
1309    // `Vec` fields. It's important to note that `ptr`, `len`,
1310    // and `cap` cannot be mutated without having `&mut self`.
1311    // This means that these fields will not be concurrently
1312    // updated and since the buffer hasn't been promoted to an
1313    // `Arc`, those three fields still are the components of the
1314    // vector.
1315    let shared = Box::new(Shared {
1316        buf,
1317        cap: offset_from(offset, buf) + len,
1318        // Initialize refcount to 2. One for this reference, and one
1319        // for the new clone that will be returned from
1320        // `shallow_clone`.
1321        ref_cnt: AtomicUsize::new(2),
1322    });
1323
1324    let shared = Box::into_raw(shared);
1325
1326    // The pointer should be aligned, so this assert should
1327    // always succeed.
1328    debug_assert!(
1329        0 == (shared as usize & KIND_MASK),
1330        "internal: Box<Shared> should have an aligned pointer",
1331    );
1332
1333    // Try compare & swapping the pointer into the `arc` field.
1334    // `Release` is used synchronize with other threads that
1335    // will load the `arc` field.
1336    //
1337    // If the `compare_exchange` fails, then the thread lost the
1338    // race to promote the buffer to shared. The `Acquire`
1339    // ordering will synchronize with the `compare_exchange`
1340    // that happened in the other thread and the `Shared`
1341    // pointed to by `actual` will be visible.
1342    match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1343        Ok(actual) => {
1344            debug_assert!(actual as usize == ptr as usize);
1345            // The upgrade was successful, the new handle can be
1346            // returned.
1347            Bytes {
1348                ptr: offset,
1349                len,
1350                data: AtomicPtr::new(shared as _),
1351                vtable: &SHARED_VTABLE,
1352            }
1353        }
1354        Err(actual) => {
1355            // The upgrade failed, a concurrent clone happened. Release
1356            // the allocation that was made in this thread, it will not
1357            // be needed.
1358            let shared = Box::from_raw(shared);
1359            mem::forget(*shared);
1360
1361            // Buffer already promoted to shared storage, so increment ref
1362            // count.
1363            shallow_clone_arc(actual as _, offset, len)
1364        }
1365    }
1366}
1367
1368unsafe fn release_shared(ptr: *mut Shared) {
1369    // `Shared` storage... follow the drop steps from Arc.
1370    if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1371        return;
1372    }
1373
1374    // This fence is needed to prevent reordering of use of the data and
1375    // deletion of the data.  Because it is marked `Release`, the decreasing
1376    // of the reference count synchronizes with this `Acquire` fence. This
1377    // means that use of the data happens before decreasing the reference
1378    // count, which happens before this fence, which happens before the
1379    // deletion of the data.
1380    //
1381    // As explained in the [Boost documentation][1],
1382    //
1383    // > It is important to enforce any possible access to the object in one
1384    // > thread (through an existing reference) to *happen before* deleting
1385    // > the object in a different thread. This is achieved by a "release"
1386    // > operation after dropping a reference (any access to the object
1387    // > through this reference must obviously happened before), and an
1388    // > "acquire" operation before deleting the object.
1389    //
1390    // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1391    //
1392    // Thread sanitizer does not support atomic fences. Use an atomic load
1393    // instead.
1394    (*ptr).ref_cnt.load(Ordering::Acquire);
1395
1396    // Drop the data
1397    drop(Box::from_raw(ptr));
1398}
1399
1400// Ideally we would always use this version of `ptr_map` since it is strict
1401// provenance compatible, but it results in worse codegen. We will however still
1402// use it on miri because it gives better diagnostics for people who test bytes
1403// code with miri.
1404//
1405// See https://github.com/tokio-rs/bytes/pull/545 for more info.
1406#[cfg(miri)]
1407fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1408where
1409    F: FnOnce(usize) -> usize,
1410{
1411    let old_addr = ptr as usize;
1412    let new_addr = f(old_addr);
1413    let diff = new_addr.wrapping_sub(old_addr);
1414    ptr.wrapping_add(diff)
1415}
1416
1417#[cfg(not(miri))]
1418fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1419where
1420    F: FnOnce(usize) -> usize,
1421{
1422    let old_addr = ptr as usize;
1423    let new_addr = f(old_addr);
1424    new_addr as *mut u8
1425}
1426
1427// compile-fails
1428
1429/// ```compile_fail
1430/// use bytes::Bytes;
1431/// #[deny(unused_must_use)]
1432/// {
1433///     let mut b1 = Bytes::from("hello world");
1434///     b1.split_to(6);
1435/// }
1436/// ```
1437fn _split_to_must_use() {}
1438
1439/// ```compile_fail
1440/// use bytes::Bytes;
1441/// #[deny(unused_must_use)]
1442/// {
1443///     let mut b1 = Bytes::from("hello world");
1444///     b1.split_off(6);
1445/// }
1446/// ```
1447fn _split_off_must_use() {}
1448
1449// fuzz tests
1450#[cfg(all(test, loom))]
1451mod fuzz {
1452    use loom::sync::Arc;
1453    use loom::thread;
1454
1455    use super::Bytes;
1456    #[test]
1457    fn bytes_cloning_vec() {
1458        loom::model(|| {
1459            let a = Bytes::from(b"abcdefgh".to_vec());
1460            let addr = a.as_ptr() as usize;
1461
1462            // test the Bytes::clone is Sync by putting it in an Arc
1463            let a1 = Arc::new(a);
1464            let a2 = a1.clone();
1465
1466            let t1 = thread::spawn(move || {
1467                let b: Bytes = (*a1).clone();
1468                assert_eq!(b.as_ptr() as usize, addr);
1469            });
1470
1471            let t2 = thread::spawn(move || {
1472                let b: Bytes = (*a2).clone();
1473                assert_eq!(b.as_ptr() as usize, addr);
1474            });
1475
1476            t1.join().unwrap();
1477            t2.join().unwrap();
1478        });
1479    }
1480}