@@ -982,6 +982,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
982
982
// `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can
983
983
// overflow.
984
984
let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
985
+ // Used in the drop guard below.
986
+ let old_head = self.head;
985
987
986
988
if self.len == 0 {
987
989
self.head = 0;
@@ -1034,12 +1036,74 @@ impl<T, A: Allocator> VecDeque<T, A> {
1034
1036
}
1035
1037
self.head = new_head;
1036
1038
}
1037
- self . buf . shrink_to_fit ( target_cap) ;
1039
+
1040
+ struct Guard<'a, T, A: Allocator> {
1041
+ deque: &'a mut VecDeque<T, A>,
1042
+ old_head: usize,
1043
+ target_cap: usize,
1044
+ }
1045
+
1046
+ impl<T, A: Allocator> Drop for Guard<'_, T, A> {
1047
+ #[cold]
1048
+ fn drop(&mut self) {
1049
+ unsafe {
1050
+ // SAFETY: This is only called if `buf.shrink_to_fit` unwinds,
1051
+ // which is the only time it's safe to call `abort_shrink`.
1052
+ self.deque.abort_shrink(self.old_head, self.target_cap)
1053
+ }
1054
+ }
1055
+ }
1056
+
1057
+ let guard = Guard { deque: self, old_head, target_cap };
1058
+
1059
+ guard.deque.buf.shrink_to_fit(target_cap);
1060
+
1061
+ // Don't drop the guard if we didn't unwind.
1062
+ mem::forget(guard);
1038
1063
1039
1064
debug_assert!(self.head < self.capacity() || self.capacity() == 0);
1040
1065
debug_assert!(self.len <= self.capacity());
1041
1066
}
1042
1067
1068
+ /// Reverts the deque back into a consistent state in case `shrink_to` failed.
1069
+ /// This is necessary to prevent UB if the backing allocator returns an error
1070
+ /// from `shrink` and `handle_alloc_error` subsequently unwinds (see #123369).
1071
+ ///
1072
+ /// `old_head` refers to the head index before `shrink_to` was called. `target_cap`
1073
+ /// is the capacity that it was trying to shrink to.
1074
+ unsafe fn abort_shrink(&mut self, old_head: usize, target_cap: usize) {
1075
+ // Moral equivalent of self.head + self.len <= target_cap. Won't overflow
1076
+ // because `self.len <= target_cap`.
1077
+ if self.head <= target_cap - self.len {
1078
+ // The deque's buffer is contiguous, so no need to copy anything around.
1079
+ return;
1080
+ }
1081
+
1082
+ // `shrink_to` already copied the head to fit into the new capacity, so this won't overflow.
1083
+ let head_len = target_cap - self.head;
1084
+ // `self.head > target_cap - self.len` => `self.len > target_cap - self.head =: head_len` so this must be positive.
1085
+ let tail_len = self.len - head_len;
1086
+
1087
+ if tail_len <= cmp::min(head_len, self.capacity() - target_cap) {
1088
+ // There's enough spare capacity to copy the tail to the back (because `tail_len < self.capacity() - target_cap`),
1089
+ // and copying the tail should be cheaper than copying the head (because `tail_len <= head_len`).
1090
+
1091
+ unsafe {
1092
+ // The old tail and the new tail can't overlap because the head slice lies between them. The
1093
+ // head slice ends at `target_cap`, so that's where we copy to.
1094
+ self.copy_nonoverlapping(0, target_cap, tail_len);
1095
+ }
1096
+ } else {
1097
+ // Either there's not enough spare capacity to make the deque contiguous, or the head is shorter than the tail
1098
+ // (and therefore hopefully cheaper to copy).
1099
+ unsafe {
1100
+ // The old and the new head slice can overlap, so we can't use `copy_nonoverlapping` here.
1101
+ self.copy(self.head, old_head, head_len);
1102
+ self.head = old_head;
1103
+ }
1104
+ }
1105
+ }
1106
+
1043
1107
/// Shortens the deque, keeping the first `len` elements and dropping
1044
1108
/// the rest.
1045
1109
///
0 commit comments