1use core::iter::FromIterator;
2use core::mem::{self, ManuallyDrop};
3use core::ops::{Deref, RangeBounds};
4use core::{cmp, fmt, hash, ptr, slice, usize};
5
6use alloc::{
7 alloc::{dealloc, Layout},
8 borrow::Borrow,
9 boxed::Box,
10 string::String,
11 vec::Vec,
12};
13
14use crate::buf::IntoIter;
15#[allow(unused)]
16use crate::loom::sync::atomic::AtomicMut;
17use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
18use crate::{offset_from, Buf, BytesMut};
19
20pub struct Bytes {
102 ptr: *const u8,
103 len: usize,
104 data: AtomicPtr<()>,
106 vtable: &'static Vtable,
107}
108
109pub(crate) struct Vtable {
110 pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
112 pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
116 pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
117 pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
119 pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
121}
122
123impl Bytes {
124 #[inline]
137 #[cfg(not(all(loom, test)))]
138 pub const fn new() -> Self {
139 const EMPTY: &[u8] = &[];
142 Bytes::from_static(EMPTY)
143 }
144
145 #[cfg(all(loom, test))]
146 pub fn new() -> Self {
147 const EMPTY: &[u8] = &[];
148 Bytes::from_static(EMPTY)
149 }
150
151 #[inline]
165 #[cfg(not(all(loom, test)))]
166 pub const fn from_static(bytes: &'static [u8]) -> Self {
167 Bytes {
168 ptr: bytes.as_ptr(),
169 len: bytes.len(),
170 data: AtomicPtr::new(ptr::null_mut()),
171 vtable: &STATIC_VTABLE,
172 }
173 }
174
175 #[cfg(all(loom, test))]
176 pub fn from_static(bytes: &'static [u8]) -> Self {
177 Bytes {
178 ptr: bytes.as_ptr(),
179 len: bytes.len(),
180 data: AtomicPtr::new(ptr::null_mut()),
181 vtable: &STATIC_VTABLE,
182 }
183 }
184
185 #[inline]
196 pub const fn len(&self) -> usize {
197 self.len
198 }
199
200 #[inline]
211 pub const fn is_empty(&self) -> bool {
212 self.len == 0
213 }
214
215 pub fn is_unique(&self) -> bool {
234 unsafe { (self.vtable.is_unique)(&self.data) }
235 }
236
237 pub fn copy_from_slice(data: &[u8]) -> Self {
239 data.to_vec().into()
240 }
241
242 pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
265 use core::ops::Bound;
266
267 let len = self.len();
268
269 let begin = match range.start_bound() {
270 Bound::Included(&n) => n,
271 Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
272 Bound::Unbounded => 0,
273 };
274
275 let end = match range.end_bound() {
276 Bound::Included(&n) => n.checked_add(1).expect("out of range"),
277 Bound::Excluded(&n) => n,
278 Bound::Unbounded => len,
279 };
280
281 assert!(
282 begin <= end,
283 "range start must not be greater than end: {:?} <= {:?}",
284 begin,
285 end,
286 );
287 assert!(
288 end <= len,
289 "range end out of bounds: {:?} <= {:?}",
290 end,
291 len,
292 );
293
294 if end == begin {
295 return Bytes::new();
296 }
297
298 let mut ret = self.clone();
299
300 ret.len = end - begin;
301 ret.ptr = unsafe { ret.ptr.add(begin) };
302
303 ret
304 }
305
306 pub fn slice_ref(&self, subset: &[u8]) -> Self {
332 if subset.is_empty() {
335 return Bytes::new();
336 }
337
338 let bytes_p = self.as_ptr() as usize;
339 let bytes_len = self.len();
340
341 let sub_p = subset.as_ptr() as usize;
342 let sub_len = subset.len();
343
344 assert!(
345 sub_p >= bytes_p,
346 "subset pointer ({:p}) is smaller than self pointer ({:p})",
347 subset.as_ptr(),
348 self.as_ptr(),
349 );
350 assert!(
351 sub_p + sub_len <= bytes_p + bytes_len,
352 "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
353 self.as_ptr(),
354 bytes_len,
355 subset.as_ptr(),
356 sub_len,
357 );
358
359 let sub_offset = sub_p - bytes_p;
360
361 self.slice(sub_offset..(sub_offset + sub_len))
362 }
363
364 #[must_use = "consider Bytes::truncate if you don't need the other half"]
388 pub fn split_off(&mut self, at: usize) -> Self {
389 if at == self.len() {
390 return Bytes::new();
391 }
392
393 if at == 0 {
394 return mem::replace(self, Bytes::new());
395 }
396
397 assert!(
398 at <= self.len(),
399 "split_off out of bounds: {:?} <= {:?}",
400 at,
401 self.len(),
402 );
403
404 let mut ret = self.clone();
405
406 self.len = at;
407
408 unsafe { ret.inc_start(at) };
409
410 ret
411 }
412
413 #[must_use = "consider Bytes::advance if you don't need the other half"]
437 pub fn split_to(&mut self, at: usize) -> Self {
438 if at == self.len() {
439 return mem::replace(self, Bytes::new());
440 }
441
442 if at == 0 {
443 return Bytes::new();
444 }
445
446 assert!(
447 at <= self.len(),
448 "split_to out of bounds: {:?} <= {:?}",
449 at,
450 self.len(),
451 );
452
453 let mut ret = self.clone();
454
455 unsafe { self.inc_start(at) };
456
457 ret.len = at;
458 ret
459 }
460
461 #[inline]
480 pub fn truncate(&mut self, len: usize) {
481 if len < self.len {
482 if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
486 || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
487 {
488 drop(self.split_off(len));
489 } else {
490 self.len = len;
491 }
492 }
493 }
494
495 #[inline]
507 pub fn clear(&mut self) {
508 self.truncate(0);
509 }
510
511 pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
527 if self.is_unique() {
528 Ok(self.into())
529 } else {
530 Err(self)
531 }
532 }
533
534 #[inline]
535 pub(crate) unsafe fn with_vtable(
536 ptr: *const u8,
537 len: usize,
538 data: AtomicPtr<()>,
539 vtable: &'static Vtable,
540 ) -> Bytes {
541 Bytes {
542 ptr,
543 len,
544 data,
545 vtable,
546 }
547 }
548
549 #[inline]
552 fn as_slice(&self) -> &[u8] {
553 unsafe { slice::from_raw_parts(self.ptr, self.len) }
554 }
555
556 #[inline]
557 unsafe fn inc_start(&mut self, by: usize) {
558 debug_assert!(self.len >= by, "internal: inc_start out of bounds");
560 self.len -= by;
561 self.ptr = self.ptr.add(by);
562 }
563}
564
565unsafe impl Send for Bytes {}
567unsafe impl Sync for Bytes {}
568
569impl Drop for Bytes {
570 #[inline]
571 fn drop(&mut self) {
572 unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
573 }
574}
575
576impl Clone for Bytes {
577 #[inline]
578 fn clone(&self) -> Bytes {
579 unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
580 }
581}
582
583impl Buf for Bytes {
584 #[inline]
585 fn remaining(&self) -> usize {
586 self.len()
587 }
588
589 #[inline]
590 fn chunk(&self) -> &[u8] {
591 self.as_slice()
592 }
593
594 #[inline]
595 fn advance(&mut self, cnt: usize) {
596 assert!(
597 cnt <= self.len(),
598 "cannot advance past `remaining`: {:?} <= {:?}",
599 cnt,
600 self.len(),
601 );
602
603 unsafe {
604 self.inc_start(cnt);
605 }
606 }
607
608 fn copy_to_bytes(&mut self, len: usize) -> Self {
609 self.split_to(len)
610 }
611}
612
613impl Deref for Bytes {
614 type Target = [u8];
615
616 #[inline]
617 fn deref(&self) -> &[u8] {
618 self.as_slice()
619 }
620}
621
622impl AsRef<[u8]> for Bytes {
623 #[inline]
624 fn as_ref(&self) -> &[u8] {
625 self.as_slice()
626 }
627}
628
629impl hash::Hash for Bytes {
630 fn hash<H>(&self, state: &mut H)
631 where
632 H: hash::Hasher,
633 {
634 self.as_slice().hash(state);
635 }
636}
637
638impl Borrow<[u8]> for Bytes {
639 fn borrow(&self) -> &[u8] {
640 self.as_slice()
641 }
642}
643
644impl IntoIterator for Bytes {
645 type Item = u8;
646 type IntoIter = IntoIter<Bytes>;
647
648 fn into_iter(self) -> Self::IntoIter {
649 IntoIter::new(self)
650 }
651}
652
653impl<'a> IntoIterator for &'a Bytes {
654 type Item = &'a u8;
655 type IntoIter = core::slice::Iter<'a, u8>;
656
657 fn into_iter(self) -> Self::IntoIter {
658 self.as_slice().iter()
659 }
660}
661
662impl FromIterator<u8> for Bytes {
663 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
664 Vec::from_iter(into_iter).into()
665 }
666}
667
668impl PartialEq for Bytes {
671 fn eq(&self, other: &Bytes) -> bool {
672 self.as_slice() == other.as_slice()
673 }
674}
675
676impl PartialOrd for Bytes {
677 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
678 self.as_slice().partial_cmp(other.as_slice())
679 }
680}
681
682impl Ord for Bytes {
683 fn cmp(&self, other: &Bytes) -> cmp::Ordering {
684 self.as_slice().cmp(other.as_slice())
685 }
686}
687
688impl Eq for Bytes {}
689
690impl PartialEq<[u8]> for Bytes {
691 fn eq(&self, other: &[u8]) -> bool {
692 self.as_slice() == other
693 }
694}
695
696impl PartialOrd<[u8]> for Bytes {
697 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
698 self.as_slice().partial_cmp(other)
699 }
700}
701
702impl PartialEq<Bytes> for [u8] {
703 fn eq(&self, other: &Bytes) -> bool {
704 *other == *self
705 }
706}
707
708impl PartialOrd<Bytes> for [u8] {
709 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
710 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
711 }
712}
713
714impl PartialEq<str> for Bytes {
715 fn eq(&self, other: &str) -> bool {
716 self.as_slice() == other.as_bytes()
717 }
718}
719
720impl PartialOrd<str> for Bytes {
721 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
722 self.as_slice().partial_cmp(other.as_bytes())
723 }
724}
725
726impl PartialEq<Bytes> for str {
727 fn eq(&self, other: &Bytes) -> bool {
728 *other == *self
729 }
730}
731
732impl PartialOrd<Bytes> for str {
733 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
734 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
735 }
736}
737
738impl PartialEq<Vec<u8>> for Bytes {
739 fn eq(&self, other: &Vec<u8>) -> bool {
740 *self == other[..]
741 }
742}
743
744impl PartialOrd<Vec<u8>> for Bytes {
745 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
746 self.as_slice().partial_cmp(&other[..])
747 }
748}
749
750impl PartialEq<Bytes> for Vec<u8> {
751 fn eq(&self, other: &Bytes) -> bool {
752 *other == *self
753 }
754}
755
756impl PartialOrd<Bytes> for Vec<u8> {
757 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
758 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
759 }
760}
761
762impl PartialEq<String> for Bytes {
763 fn eq(&self, other: &String) -> bool {
764 *self == other[..]
765 }
766}
767
768impl PartialOrd<String> for Bytes {
769 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
770 self.as_slice().partial_cmp(other.as_bytes())
771 }
772}
773
774impl PartialEq<Bytes> for String {
775 fn eq(&self, other: &Bytes) -> bool {
776 *other == *self
777 }
778}
779
780impl PartialOrd<Bytes> for String {
781 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
782 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
783 }
784}
785
786impl PartialEq<Bytes> for &[u8] {
787 fn eq(&self, other: &Bytes) -> bool {
788 *other == *self
789 }
790}
791
792impl PartialOrd<Bytes> for &[u8] {
793 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
794 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
795 }
796}
797
798impl PartialEq<Bytes> for &str {
799 fn eq(&self, other: &Bytes) -> bool {
800 *other == *self
801 }
802}
803
804impl PartialOrd<Bytes> for &str {
805 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
806 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
807 }
808}
809
810impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
811where
812 Bytes: PartialEq<T>,
813{
814 fn eq(&self, other: &&'a T) -> bool {
815 *self == **other
816 }
817}
818
819impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
820where
821 Bytes: PartialOrd<T>,
822{
823 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
824 self.partial_cmp(&**other)
825 }
826}
827
828impl Default for Bytes {
831 #[inline]
832 fn default() -> Bytes {
833 Bytes::new()
834 }
835}
836
837impl From<&'static [u8]> for Bytes {
838 fn from(slice: &'static [u8]) -> Bytes {
839 Bytes::from_static(slice)
840 }
841}
842
843impl From<&'static str> for Bytes {
844 fn from(slice: &'static str) -> Bytes {
845 Bytes::from_static(slice.as_bytes())
846 }
847}
848
849impl From<Vec<u8>> for Bytes {
850 fn from(vec: Vec<u8>) -> Bytes {
851 let mut vec = ManuallyDrop::new(vec);
852 let ptr = vec.as_mut_ptr();
853 let len = vec.len();
854 let cap = vec.capacity();
855
856 if len == cap {
858 let vec = ManuallyDrop::into_inner(vec);
859 return Bytes::from(vec.into_boxed_slice());
860 }
861
862 let shared = Box::new(Shared {
863 buf: ptr,
864 cap,
865 ref_cnt: AtomicUsize::new(1),
866 });
867
868 let shared = Box::into_raw(shared);
869 debug_assert!(
872 0 == (shared as usize & KIND_MASK),
873 "internal: Box<Shared> should have an aligned pointer",
874 );
875 Bytes {
876 ptr,
877 len,
878 data: AtomicPtr::new(shared as _),
879 vtable: &SHARED_VTABLE,
880 }
881 }
882}
883
884impl From<Box<[u8]>> for Bytes {
885 fn from(slice: Box<[u8]>) -> Bytes {
886 if slice.is_empty() {
890 return Bytes::new();
891 }
892
893 let len = slice.len();
894 let ptr = Box::into_raw(slice) as *mut u8;
895
896 if ptr as usize & 0x1 == 0 {
897 let data = ptr_map(ptr, |addr| addr | KIND_VEC);
898 Bytes {
899 ptr,
900 len,
901 data: AtomicPtr::new(data.cast()),
902 vtable: &PROMOTABLE_EVEN_VTABLE,
903 }
904 } else {
905 Bytes {
906 ptr,
907 len,
908 data: AtomicPtr::new(ptr.cast()),
909 vtable: &PROMOTABLE_ODD_VTABLE,
910 }
911 }
912 }
913}
914
915impl From<Bytes> for BytesMut {
916 fn from(bytes: Bytes) -> Self {
932 let bytes = ManuallyDrop::new(bytes);
933 unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) }
934 }
935}
936
937impl From<String> for Bytes {
938 fn from(s: String) -> Bytes {
939 Bytes::from(s.into_bytes())
940 }
941}
942
943impl From<Bytes> for Vec<u8> {
944 fn from(bytes: Bytes) -> Vec<u8> {
945 let bytes = ManuallyDrop::new(bytes);
946 unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
947 }
948}
949
950impl fmt::Debug for Vtable {
953 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
954 f.debug_struct("Vtable")
955 .field("clone", &(self.clone as *const ()))
956 .field("drop", &(self.drop as *const ()))
957 .finish()
958 }
959}
960
961const STATIC_VTABLE: Vtable = Vtable {
964 clone: static_clone,
965 to_vec: static_to_vec,
966 to_mut: static_to_mut,
967 is_unique: static_is_unique,
968 drop: static_drop,
969};
970
971unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
972 let slice = slice::from_raw_parts(ptr, len);
973 Bytes::from_static(slice)
974}
975
976unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
977 let slice = slice::from_raw_parts(ptr, len);
978 slice.to_vec()
979}
980
981unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
982 let slice = slice::from_raw_parts(ptr, len);
983 BytesMut::from(slice)
984}
985
986fn static_is_unique(_: &AtomicPtr<()>) -> bool {
987 false
988}
989
990unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
991 }
993
994static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
997 clone: promotable_even_clone,
998 to_vec: promotable_even_to_vec,
999 to_mut: promotable_even_to_mut,
1000 is_unique: promotable_is_unique,
1001 drop: promotable_even_drop,
1002};
1003
1004static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1005 clone: promotable_odd_clone,
1006 to_vec: promotable_odd_to_vec,
1007 to_mut: promotable_odd_to_mut,
1008 is_unique: promotable_is_unique,
1009 drop: promotable_odd_drop,
1010};
1011
1012unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1013 let shared = data.load(Ordering::Acquire);
1014 let kind = shared as usize & KIND_MASK;
1015
1016 if kind == KIND_ARC {
1017 shallow_clone_arc(shared.cast(), ptr, len)
1018 } else {
1019 debug_assert_eq!(kind, KIND_VEC);
1020 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1021 shallow_clone_vec(data, shared, buf, ptr, len)
1022 }
1023}
1024
1025unsafe fn promotable_to_vec(
1026 data: &AtomicPtr<()>,
1027 ptr: *const u8,
1028 len: usize,
1029 f: fn(*mut ()) -> *mut u8,
1030) -> Vec<u8> {
1031 let shared = data.load(Ordering::Acquire);
1032 let kind = shared as usize & KIND_MASK;
1033
1034 if kind == KIND_ARC {
1035 shared_to_vec_impl(shared.cast(), ptr, len)
1036 } else {
1037 debug_assert_eq!(kind, KIND_VEC);
1039
1040 let buf = f(shared);
1041
1042 let cap = offset_from(ptr, buf) + len;
1043
1044 ptr::copy(ptr, buf, len);
1046
1047 Vec::from_raw_parts(buf, len, cap)
1048 }
1049}
1050
1051unsafe fn promotable_to_mut(
1052 data: &AtomicPtr<()>,
1053 ptr: *const u8,
1054 len: usize,
1055 f: fn(*mut ()) -> *mut u8,
1056) -> BytesMut {
1057 let shared = data.load(Ordering::Acquire);
1058 let kind = shared as usize & KIND_MASK;
1059
1060 if kind == KIND_ARC {
1061 shared_to_mut_impl(shared.cast(), ptr, len)
1062 } else {
1063 debug_assert_eq!(kind, KIND_VEC);
1068
1069 let buf = f(shared);
1070 let off = offset_from(ptr, buf);
1071 let cap = off + len;
1072 let v = Vec::from_raw_parts(buf, cap, cap);
1073
1074 let mut b = BytesMut::from_vec(v);
1075 b.advance_unchecked(off);
1076 b
1077 }
1078}
1079
1080unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1081 promotable_to_vec(data, ptr, len, |shared| {
1082 ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1083 })
1084}
1085
1086unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1087 promotable_to_mut(data, ptr, len, |shared| {
1088 ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1089 })
1090}
1091
1092unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1093 data.with_mut(|shared| {
1094 let shared = *shared;
1095 let kind = shared as usize & KIND_MASK;
1096
1097 if kind == KIND_ARC {
1098 release_shared(shared.cast());
1099 } else {
1100 debug_assert_eq!(kind, KIND_VEC);
1101 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1102 free_boxed_slice(buf, ptr, len);
1103 }
1104 });
1105}
1106
1107unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1108 let shared = data.load(Ordering::Acquire);
1109 let kind = shared as usize & KIND_MASK;
1110
1111 if kind == KIND_ARC {
1112 shallow_clone_arc(shared as _, ptr, len)
1113 } else {
1114 debug_assert_eq!(kind, KIND_VEC);
1115 shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1116 }
1117}
1118
1119unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1120 promotable_to_vec(data, ptr, len, |shared| shared.cast())
1121}
1122
1123unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1124 promotable_to_mut(data, ptr, len, |shared| shared.cast())
1125}
1126
1127unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1128 data.with_mut(|shared| {
1129 let shared = *shared;
1130 let kind = shared as usize & KIND_MASK;
1131
1132 if kind == KIND_ARC {
1133 release_shared(shared.cast());
1134 } else {
1135 debug_assert_eq!(kind, KIND_VEC);
1136
1137 free_boxed_slice(shared.cast(), ptr, len);
1138 }
1139 });
1140}
1141
1142unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1143 let shared = data.load(Ordering::Acquire);
1144 let kind = shared as usize & KIND_MASK;
1145
1146 if kind == KIND_ARC {
1147 let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1148 ref_cnt == 1
1149 } else {
1150 true
1151 }
1152}
1153
1154unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1155 let cap = offset_from(offset, buf) + len;
1156 dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1157}
1158
1159struct Shared {
1162 buf: *mut u8,
1164 cap: usize,
1165 ref_cnt: AtomicUsize,
1166}
1167
1168impl Drop for Shared {
1169 fn drop(&mut self) {
1170 unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1171 }
1172}
1173
1174const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; static SHARED_VTABLE: Vtable = Vtable {
1181 clone: shared_clone,
1182 to_vec: shared_to_vec,
1183 to_mut: shared_to_mut,
1184 is_unique: shared_is_unique,
1185 drop: shared_drop,
1186};
1187
1188const KIND_ARC: usize = 0b0;
1189const KIND_VEC: usize = 0b1;
1190const KIND_MASK: usize = 0b1;
1191
1192unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1193 let shared = data.load(Ordering::Relaxed);
1194 shallow_clone_arc(shared as _, ptr, len)
1195}
1196
1197unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1198 if (*shared)
1205 .ref_cnt
1206 .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1207 .is_ok()
1208 {
1209 let shared = *Box::from_raw(shared);
1211 let shared = ManuallyDrop::new(shared);
1212 let buf = shared.buf;
1213 let cap = shared.cap;
1214
1215 ptr::copy(ptr, buf, len);
1217
1218 Vec::from_raw_parts(buf, len, cap)
1219 } else {
1220 let v = slice::from_raw_parts(ptr, len).to_vec();
1221 release_shared(shared);
1222 v
1223 }
1224}
1225
1226unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1227 shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1228}
1229
1230unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1231 if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1244 let shared = *Box::from_raw(shared);
1246 let shared = ManuallyDrop::new(shared);
1247 let buf = shared.buf;
1248 let cap = shared.cap;
1249
1250 let off = offset_from(ptr, buf);
1252 let v = Vec::from_raw_parts(buf, len + off, cap);
1253
1254 let mut b = BytesMut::from_vec(v);
1255 b.advance_unchecked(off);
1256 b
1257 } else {
1258 let v = slice::from_raw_parts(ptr, len).to_vec();
1260 release_shared(shared);
1261 BytesMut::from_vec(v)
1262 }
1263}
1264
1265unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1266 shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1267}
1268
1269pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1270 let shared = data.load(Ordering::Acquire);
1271 let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1272 ref_cnt == 1
1273}
1274
1275unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1276 data.with_mut(|shared| {
1277 release_shared(shared.cast());
1278 });
1279}
1280
1281unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1282 let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1283
1284 if old_size > usize::MAX >> 1 {
1285 crate::abort();
1286 }
1287
1288 Bytes {
1289 ptr,
1290 len,
1291 data: AtomicPtr::new(shared as _),
1292 vtable: &SHARED_VTABLE,
1293 }
1294}
1295
1296#[cold]
1297unsafe fn shallow_clone_vec(
1298 atom: &AtomicPtr<()>,
1299 ptr: *const (),
1300 buf: *mut u8,
1301 offset: *const u8,
1302 len: usize,
1303) -> Bytes {
1304 let shared = Box::new(Shared {
1316 buf,
1317 cap: offset_from(offset, buf) + len,
1318 ref_cnt: AtomicUsize::new(2),
1322 });
1323
1324 let shared = Box::into_raw(shared);
1325
1326 debug_assert!(
1329 0 == (shared as usize & KIND_MASK),
1330 "internal: Box<Shared> should have an aligned pointer",
1331 );
1332
1333 match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1343 Ok(actual) => {
1344 debug_assert!(actual as usize == ptr as usize);
1345 Bytes {
1348 ptr: offset,
1349 len,
1350 data: AtomicPtr::new(shared as _),
1351 vtable: &SHARED_VTABLE,
1352 }
1353 }
1354 Err(actual) => {
1355 let shared = Box::from_raw(shared);
1359 mem::forget(*shared);
1360
1361 shallow_clone_arc(actual as _, offset, len)
1364 }
1365 }
1366}
1367
1368unsafe fn release_shared(ptr: *mut Shared) {
1369 if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1371 return;
1372 }
1373
1374 (*ptr).ref_cnt.load(Ordering::Acquire);
1395
1396 drop(Box::from_raw(ptr));
1398}
1399
1400#[cfg(miri)]
1407fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1408where
1409 F: FnOnce(usize) -> usize,
1410{
1411 let old_addr = ptr as usize;
1412 let new_addr = f(old_addr);
1413 let diff = new_addr.wrapping_sub(old_addr);
1414 ptr.wrapping_add(diff)
1415}
1416
1417#[cfg(not(miri))]
1418fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1419where
1420 F: FnOnce(usize) -> usize,
1421{
1422 let old_addr = ptr as usize;
1423 let new_addr = f(old_addr);
1424 new_addr as *mut u8
1425}
1426
1427fn _split_to_must_use() {}
1438
1439fn _split_off_must_use() {}
1448
1449#[cfg(all(test, loom))]
1451mod fuzz {
1452 use loom::sync::Arc;
1453 use loom::thread;
1454
1455 use super::Bytes;
1456 #[test]
1457 fn bytes_cloning_vec() {
1458 loom::model(|| {
1459 let a = Bytes::from(b"abcdefgh".to_vec());
1460 let addr = a.as_ptr() as usize;
1461
1462 let a1 = Arc::new(a);
1464 let a2 = a1.clone();
1465
1466 let t1 = thread::spawn(move || {
1467 let b: Bytes = (*a1).clone();
1468 assert_eq!(b.as_ptr() as usize, addr);
1469 });
1470
1471 let t2 = thread::spawn(move || {
1472 let b: Bytes = (*a2).clone();
1473 assert_eq!(b.as_ptr() as usize, addr);
1474 });
1475
1476 t1.join().unwrap();
1477 t2.join().unwrap();
1478 });
1479 }
1480}