Skip to content

Commit 4d75327

Browse files
safe
1 parent ba6d0fd commit 4d75327

File tree

3 files changed

+20
-159
lines changed

3 files changed

+20
-159
lines changed

src/decoding/decodebuffer.rs

+2-30
Original file line numberDiff line numberDiff line change
@@ -107,20 +107,7 @@ impl DecodeBuffer {
107107
// We need to copy in chunks.
108108
self.repeat_in_chunks(offset, match_length, start_idx);
109109
} else {
110-
// can just copy parts of the existing buffer
111-
// SAFETY: Requirements checked:
112-
// 1. start_idx + match_length must be <= self.buffer.len()
113-
// We know that:
114-
// 1. start_idx = self.buffer.len() - offset
115-
// 2. end_idx = start_idx + match_length
116-
// 3. end_idx <= self.buffer.len()
117-
// Thus follows: start_idx + match_length <= self.buffer.len()
118-
//
119-
// 2. explicitly reserved enough memory for the whole match_length
120-
unsafe {
121-
self.buffer
122-
.extend_from_within_unchecked(start_idx, match_length)
123-
};
110+
self.buffer.extend_from_within(start_idx, match_length);
124111
}
125112

126113
self.total_output_counter += match_length as u64;
@@ -137,22 +124,7 @@ impl DecodeBuffer {
137124
while copied_counter_left > 0 {
138125
let chunksize = usize::min(offset, copied_counter_left);
139126

140-
// SAFETY: Requirements checked:
141-
// 1. start_idx + chunksize must be <= self.buffer.len()
142-
// We know that:
143-
// 1. start_idx starts at buffer.len() - offset
144-
// 2. chunksize <= offset (== offset for each iteration but the last, and match_length modulo offset in the last iteration)
145-
// 3. the buffer grows by offset many bytes each iteration but the last
146-
// 4. start_idx is increased by the same amount as the buffer grows each iteration
147-
//
148-
// Thus follows: start_idx + chunksize == self.buffer.len() in each iteration but the last, where match_length modulo offset == chunksize < offset
149-
// Meaning: start_idx + chunksize <= self.buffer.len()
150-
//
151-
// 2. explicitly reserved enough memory for the whole match_length
152-
unsafe {
153-
self.buffer
154-
.extend_from_within_unchecked(start_idx, chunksize)
155-
};
127+
self.buffer.extend_from_within(start_idx, chunksize);
156128
copied_counter_left -= chunksize;
157129
start_idx += chunksize;
158130
}

src/decoding/ringbuffer.rs

+17-129
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
use alloc::collections::VecDeque;
2-
use core::{cmp, hint::unreachable_unchecked, mem::MaybeUninit, slice};
2+
use core::cmp;
33

44
pub struct RingBuffer {
5-
buf: VecDeque<MaybeUninit<u8>>,
5+
buf: VecDeque<u8>,
66
}
77

88
impl RingBuffer {
@@ -24,12 +24,10 @@ impl RingBuffer {
2424
}
2525

2626
/// Return the amount of available space (in bytes) of the buffer.
27+
#[cfg(test)]
2728
pub fn free(&self) -> usize {
2829
let len = self.buf.len();
2930
let capacity = self.buf.capacity();
30-
if len > capacity {
31-
unsafe { unreachable_unchecked() }
32-
}
3331

3432
capacity - len
3533
}
@@ -46,41 +44,23 @@ impl RingBuffer {
4644

4745
/// Ensure that there's space for `amount` elements in the buffer.
4846
pub fn reserve(&mut self, additional: usize) {
49-
if self.free() < additional {
50-
self.reserve_amortized(additional);
51-
}
52-
53-
if self.free() < additional {
54-
unsafe { unreachable_unchecked() }
55-
}
56-
}
57-
58-
#[inline(never)]
59-
#[cold]
60-
fn reserve_amortized(&mut self, additional: usize) {
6147
self.buf.reserve(additional);
6248
}
6349

6450
#[allow(dead_code)]
6551
pub fn push_back(&mut self, byte: u8) {
66-
self.reserve(1);
67-
self.buf.push_back(MaybeUninit::new(byte));
52+
self.buf.push_back(byte);
6853
}
6954

7055
/// Fetch the byte stored at the selected index from the buffer, returning it, or
7156
/// `None` if the index is out of bounds.
7257
#[allow(dead_code)]
7358
pub fn get(&self, idx: usize) -> Option<u8> {
74-
self.buf
75-
.get(idx)
76-
.map(|&byte| unsafe { MaybeUninit::assume_init(byte) })
59+
self.buf.get(idx).copied()
7760
}
7861

7962
/// Append the provided data to the end of `self`.
8063
pub fn extend(&mut self, data: &[u8]) {
81-
let len = data.len();
82-
let data = data.as_ptr().cast::<MaybeUninit<u8>>();
83-
let data = unsafe { slice::from_raw_parts(data, len) };
8464
self.buf.extend(data);
8565
}
8666

@@ -94,16 +74,12 @@ impl RingBuffer {
9474

9575
/// Return references to each part of the ring buffer.
9676
pub fn as_slices(&self) -> (&[u8], &[u8]) {
97-
let (a, b) = self.buf.as_slices();
98-
99-
(unsafe { slice_assume_init_ref_polyfill(a) }, unsafe {
100-
slice_assume_init_ref_polyfill(b)
101-
})
77+
self.buf.as_slices()
10278
}
10379

10480
/// Copies elements from the provided range to the end of the buffer.
10581
#[allow(dead_code)]
106-
pub fn extend_from_within(&mut self, start: usize, len: usize) {
82+
pub fn extend_from_within(&mut self, mut start: usize, len: usize) {
10783
if start + len > self.len() {
10884
panic!(
10985
"Calls to this functions must respect start ({}) + len ({}) <= self.len() ({})!",
@@ -113,43 +89,15 @@ impl RingBuffer {
11389
);
11490
}
11591

116-
self.reserve(len);
117-
118-
// SAFETY: Requirements checked:
119-
// 1. explicitly checked above, resulting in a panic if it does not hold
120-
// 2. explicitly reserved enough memory
121-
unsafe { self.extend_from_within_unchecked(start, len) }
122-
}
123-
124-
/// Copies data from the provided range to the end of the buffer, without
125-
/// first verifying that the unoccupied capacity is available.
126-
///
127-
/// SAFETY:
128-
/// For this to be safe two requirements need to hold:
129-
/// 1. start + len <= self.len() so we do not copy uninitialised memory
130-
/// 2. More then len reserved space so we do not write out-of-bounds
131-
#[warn(unsafe_op_in_unsafe_fn)]
132-
pub unsafe fn extend_from_within_unchecked(&mut self, mut start: usize, len: usize) {
133-
debug_assert!(start + len <= self.len());
134-
debug_assert!(self.free() >= len);
135-
136-
if self.free() < len {
137-
unsafe { unreachable_unchecked() }
138-
}
139-
14092
let original_len = self.len();
14193
let mut intermediate = {
14294
IntermediateRingBuffer {
14395
this: self,
14496
original_len,
145-
disarmed: false,
14697
}
14798
};
14899

149-
intermediate
150-
.this
151-
.buf
152-
.resize_with(original_len + len, MaybeUninit::uninit);
100+
intermediate.this.buf.resize(original_len + len, 0);
153101
debug_assert_eq!(intermediate.this.buf.len(), original_len + len);
154102

155103
let (a, b, a_spare, b_spare) = intermediate.as_slices_spare_mut();
@@ -158,7 +106,7 @@ impl RingBuffer {
158106
let skip = cmp::min(a.len(), start);
159107
start -= skip;
160108
let a = &a[skip..];
161-
let b = unsafe { b.get_unchecked(start..) };
109+
let b = &b[start..];
162110

163111
let mut remaining_copy_len = len;
164112

@@ -168,7 +116,6 @@ impl RingBuffer {
168116
remaining_copy_len -= copy_at_least;
169117

170118
if remaining_copy_len == 0 {
171-
intermediate.disarmed = true;
172119
return;
173120
}
174121

@@ -181,7 +128,6 @@ impl RingBuffer {
181128
remaining_copy_len -= copy_at_least;
182129

183130
if remaining_copy_len == 0 {
184-
intermediate.disarmed = true;
185131
return;
186132
}
187133

@@ -193,7 +139,6 @@ impl RingBuffer {
193139
remaining_copy_len -= copy_at_least;
194140

195141
if remaining_copy_len == 0 {
196-
intermediate.disarmed = true;
197142
return;
198143
}
199144

@@ -205,22 +150,17 @@ impl RingBuffer {
205150
remaining_copy_len -= copy_at_least;
206151

207152
debug_assert_eq!(remaining_copy_len, 0);
208-
209-
intermediate.disarmed = true;
210153
}
211154
}
212155

213156
struct IntermediateRingBuffer<'a> {
214157
this: &'a mut RingBuffer,
215158
original_len: usize,
216-
disarmed: bool,
217159
}
218160

219161
impl<'a> IntermediateRingBuffer<'a> {
220162
// inspired by `Vec::split_at_spare_mut`
221-
fn as_slices_spare_mut(
222-
&mut self,
223-
) -> (&[u8], &[u8], &mut [MaybeUninit<u8>], &mut [MaybeUninit<u8>]) {
163+
fn as_slices_spare_mut(&mut self) -> (&[u8], &[u8], &mut [u8], &mut [u8]) {
224164
let (a, b) = self.this.buf.as_mut_slices();
225165
debug_assert!(a.len() + b.len() >= self.original_len);
226166

@@ -230,26 +170,11 @@ impl<'a> IntermediateRingBuffer<'a> {
230170
let b_mid = remaining_init_len;
231171
debug_assert!(b.len() >= b_mid);
232172

233-
let (a, a_spare) = unsafe { a.split_at_mut_unchecked(a_mid) };
234-
let (b, b_spare) = unsafe { b.split_at_mut_unchecked(b_mid) };
173+
let (a, a_spare) = a.split_at_mut(a_mid);
174+
let (b, b_spare) = b.split_at_mut(b_mid);
235175
debug_assert!(a_spare.is_empty() || b.is_empty());
236176

237-
(
238-
unsafe { slice_assume_init_ref_polyfill(a) },
239-
unsafe { slice_assume_init_ref_polyfill(b) },
240-
a_spare,
241-
b_spare,
242-
)
243-
}
244-
}
245-
246-
impl<'a> Drop for IntermediateRingBuffer<'a> {
247-
fn drop(&mut self) {
248-
if self.disarmed {
249-
return;
250-
}
251-
252-
self.this.buf.truncate(self.original_len);
177+
(a, b, a_spare, b_spare)
253178
}
254179
}
255180

@@ -266,48 +191,11 @@ impl<'a> Drop for IntermediateRingBuffer<'a> {
266191
/// The chunk size is not part of the contract and may change depending on the target platform.
267192
///
268193
/// If that isn't possible we just fall back to ptr::copy_nonoverlapping
269-
fn copy_bytes_overshooting(src: &[u8], dst: &mut [MaybeUninit<u8>], copy_at_least: usize) {
270-
// this assert is required for this function to be safe
271-
// the optimizer should be able to remove it given how the caller
272-
// has somehow to figure out `copy_at_least <= src.len() && copy_at_least <= dst.len()`
273-
assert!(src.len() >= copy_at_least && dst.len() >= copy_at_least);
274-
275-
type CopyType = usize;
276-
277-
const COPY_AT_ONCE_SIZE: usize = core::mem::size_of::<CopyType>();
278-
let min_buffer_size = usize::min(src.len(), dst.len());
279-
280-
// this check should be removed by the optimizer thanks to the above assert
281-
// if `src.len() >= copy_at_least && dst.len() >= copy_at_least` then `min_buffer_size >= copy_at_least`
282-
assert!(min_buffer_size >= copy_at_least);
283-
284-
// these bounds checks are removed because this is guaranteed:
285-
// `min_buffer_size <= src.len() && min_buffer_size <= dst.len()`
286-
let src = &src[..min_buffer_size];
287-
let dst = &mut dst[..min_buffer_size];
288-
289-
// Can copy in just one read+write, very common case
290-
if min_buffer_size >= COPY_AT_ONCE_SIZE && copy_at_least <= COPY_AT_ONCE_SIZE {
291-
let chunk = unsafe { src.as_ptr().cast::<CopyType>().read_unaligned() };
292-
unsafe { dst.as_mut_ptr().cast::<CopyType>().write_unaligned(chunk) };
293-
} else {
294-
unsafe {
295-
dst.as_mut_ptr()
296-
.cast::<u8>()
297-
.copy_from_nonoverlapping(src.as_ptr(), copy_at_least)
298-
};
299-
}
300-
301-
debug_assert_eq!(&src[..copy_at_least], unsafe {
302-
slice_assume_init_ref_polyfill(&dst[..copy_at_least])
303-
});
304-
}
194+
fn copy_bytes_overshooting(src: &[u8], dst: &mut [u8], copy_at_least: usize) {
195+
let src = &src[..copy_at_least];
196+
let dst = &mut dst[..copy_at_least];
305197

306-
#[inline(always)]
307-
unsafe fn slice_assume_init_ref_polyfill(slice: &[MaybeUninit<u8>]) -> &[u8] {
308-
let len = slice.len();
309-
let data = slice.as_ptr().cast::<u8>();
310-
slice::from_raw_parts(data, len)
198+
dst.copy_from_slice(src);
311199
}
312200

313201
#[cfg(test)]

src/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
//! than the original implementation.
1515
#![no_std]
1616
#![deny(trivial_casts, trivial_numeric_casts, rust_2018_idioms)]
17+
#![forbid(unsafe_code)]
1718

1819
#[cfg(feature = "std")]
1920
extern crate std;

0 commit comments

Comments
 (0)