1
1
use alloc:: collections:: VecDeque ;
2
- use core:: { cmp, hint :: unreachable_unchecked , mem :: MaybeUninit , slice } ;
2
+ use core:: cmp;
3
3
4
4
pub struct RingBuffer {
5
- buf : VecDeque < MaybeUninit < u8 > > ,
5
+ buf : VecDeque < u8 > ,
6
6
}
7
7
8
8
impl RingBuffer {
@@ -24,12 +24,10 @@ impl RingBuffer {
24
24
}
25
25
26
26
/// Return the amount of available space (in bytes) of the buffer.
27
+ #[ cfg( test) ]
27
28
pub fn free ( & self ) -> usize {
28
29
let len = self . buf . len ( ) ;
29
30
let capacity = self . buf . capacity ( ) ;
30
- if len > capacity {
31
- unsafe { unreachable_unchecked ( ) }
32
- }
33
31
34
32
capacity - len
35
33
}
@@ -46,41 +44,23 @@ impl RingBuffer {
46
44
47
45
/// Ensure that there's space for `amount` elements in the buffer.
48
46
pub fn reserve ( & mut self , additional : usize ) {
49
- if self . free ( ) < additional {
50
- self . reserve_amortized ( additional) ;
51
- }
52
-
53
- if self . free ( ) < additional {
54
- unsafe { unreachable_unchecked ( ) }
55
- }
56
- }
57
-
58
- #[ inline( never) ]
59
- #[ cold]
60
- fn reserve_amortized ( & mut self , additional : usize ) {
61
47
self . buf . reserve ( additional) ;
62
48
}
63
49
64
50
#[ allow( dead_code) ]
65
51
pub fn push_back ( & mut self , byte : u8 ) {
66
- self . reserve ( 1 ) ;
67
- self . buf . push_back ( MaybeUninit :: new ( byte) ) ;
52
+ self . buf . push_back ( byte) ;
68
53
}
69
54
70
55
/// Fetch the byte stored at the selected index from the buffer, returning it, or
71
56
/// `None` if the index is out of bounds.
72
57
#[ allow( dead_code) ]
73
58
pub fn get ( & self , idx : usize ) -> Option < u8 > {
74
- self . buf
75
- . get ( idx)
76
- . map ( |& byte| unsafe { MaybeUninit :: assume_init ( byte) } )
59
+ self . buf . get ( idx) . copied ( )
77
60
}
78
61
79
62
/// Append the provided data to the end of `self`.
80
63
pub fn extend ( & mut self , data : & [ u8 ] ) {
81
- let len = data. len ( ) ;
82
- let data = data. as_ptr ( ) . cast :: < MaybeUninit < u8 > > ( ) ;
83
- let data = unsafe { slice:: from_raw_parts ( data, len) } ;
84
64
self . buf . extend ( data) ;
85
65
}
86
66
@@ -94,16 +74,12 @@ impl RingBuffer {
94
74
95
75
/// Return references to each part of the ring buffer.
96
76
pub fn as_slices ( & self ) -> ( & [ u8 ] , & [ u8 ] ) {
97
- let ( a, b) = self . buf . as_slices ( ) ;
98
-
99
- ( unsafe { slice_assume_init_ref_polyfill ( a) } , unsafe {
100
- slice_assume_init_ref_polyfill ( b)
101
- } )
77
+ self . buf . as_slices ( )
102
78
}
103
79
104
80
/// Copies elements from the provided range to the end of the buffer.
105
81
#[ allow( dead_code) ]
106
- pub fn extend_from_within ( & mut self , start : usize , len : usize ) {
82
+ pub fn extend_from_within ( & mut self , mut start : usize , len : usize ) {
107
83
if start + len > self . len ( ) {
108
84
panic ! (
109
85
"Calls to this functions must respect start ({}) + len ({}) <= self.len() ({})!" ,
@@ -113,43 +89,15 @@ impl RingBuffer {
113
89
) ;
114
90
}
115
91
116
- self . reserve ( len) ;
117
-
118
- // SAFETY: Requirements checked:
119
- // 1. explicitly checked above, resulting in a panic if it does not hold
120
- // 2. explicitly reserved enough memory
121
- unsafe { self . extend_from_within_unchecked ( start, len) }
122
- }
123
-
124
- /// Copies data from the provided range to the end of the buffer, without
125
- /// first verifying that the unoccupied capacity is available.
126
- ///
127
- /// SAFETY:
128
- /// For this to be safe two requirements need to hold:
129
- /// 1. start + len <= self.len() so we do not copy uninitialised memory
130
- /// 2. More then len reserved space so we do not write out-of-bounds
131
- #[ warn( unsafe_op_in_unsafe_fn) ]
132
- pub unsafe fn extend_from_within_unchecked ( & mut self , mut start : usize , len : usize ) {
133
- debug_assert ! ( start + len <= self . len( ) ) ;
134
- debug_assert ! ( self . free( ) >= len) ;
135
-
136
- if self . free ( ) < len {
137
- unsafe { unreachable_unchecked ( ) }
138
- }
139
-
140
92
let original_len = self . len ( ) ;
141
93
let mut intermediate = {
142
94
IntermediateRingBuffer {
143
95
this : self ,
144
96
original_len,
145
- disarmed : false ,
146
97
}
147
98
} ;
148
99
149
- intermediate
150
- . this
151
- . buf
152
- . resize_with ( original_len + len, MaybeUninit :: uninit) ;
100
+ intermediate. this . buf . resize ( original_len + len, 0 ) ;
153
101
debug_assert_eq ! ( intermediate. this. buf. len( ) , original_len + len) ;
154
102
155
103
let ( a, b, a_spare, b_spare) = intermediate. as_slices_spare_mut ( ) ;
@@ -158,7 +106,7 @@ impl RingBuffer {
158
106
let skip = cmp:: min ( a. len ( ) , start) ;
159
107
start -= skip;
160
108
let a = & a[ skip..] ;
161
- let b = unsafe { b . get_unchecked ( start..) } ;
109
+ let b = & b [ start..] ;
162
110
163
111
let mut remaining_copy_len = len;
164
112
@@ -168,7 +116,6 @@ impl RingBuffer {
168
116
remaining_copy_len -= copy_at_least;
169
117
170
118
if remaining_copy_len == 0 {
171
- intermediate. disarmed = true ;
172
119
return ;
173
120
}
174
121
@@ -181,7 +128,6 @@ impl RingBuffer {
181
128
remaining_copy_len -= copy_at_least;
182
129
183
130
if remaining_copy_len == 0 {
184
- intermediate. disarmed = true ;
185
131
return ;
186
132
}
187
133
@@ -193,7 +139,6 @@ impl RingBuffer {
193
139
remaining_copy_len -= copy_at_least;
194
140
195
141
if remaining_copy_len == 0 {
196
- intermediate. disarmed = true ;
197
142
return ;
198
143
}
199
144
@@ -205,22 +150,17 @@ impl RingBuffer {
205
150
remaining_copy_len -= copy_at_least;
206
151
207
152
debug_assert_eq ! ( remaining_copy_len, 0 ) ;
208
-
209
- intermediate. disarmed = true ;
210
153
}
211
154
}
212
155
213
156
struct IntermediateRingBuffer < ' a > {
214
157
this : & ' a mut RingBuffer ,
215
158
original_len : usize ,
216
- disarmed : bool ,
217
159
}
218
160
219
161
impl < ' a > IntermediateRingBuffer < ' a > {
220
162
// inspired by `Vec::split_at_spare_mut`
221
- fn as_slices_spare_mut (
222
- & mut self ,
223
- ) -> ( & [ u8 ] , & [ u8 ] , & mut [ MaybeUninit < u8 > ] , & mut [ MaybeUninit < u8 > ] ) {
163
+ fn as_slices_spare_mut ( & mut self ) -> ( & [ u8 ] , & [ u8 ] , & mut [ u8 ] , & mut [ u8 ] ) {
224
164
let ( a, b) = self . this . buf . as_mut_slices ( ) ;
225
165
debug_assert ! ( a. len( ) + b. len( ) >= self . original_len) ;
226
166
@@ -230,26 +170,11 @@ impl<'a> IntermediateRingBuffer<'a> {
230
170
let b_mid = remaining_init_len;
231
171
debug_assert ! ( b. len( ) >= b_mid) ;
232
172
233
- let ( a, a_spare) = unsafe { a . split_at_mut_unchecked ( a_mid) } ;
234
- let ( b, b_spare) = unsafe { b . split_at_mut_unchecked ( b_mid) } ;
173
+ let ( a, a_spare) = a . split_at_mut ( a_mid) ;
174
+ let ( b, b_spare) = b . split_at_mut ( b_mid) ;
235
175
debug_assert ! ( a_spare. is_empty( ) || b. is_empty( ) ) ;
236
176
237
- (
238
- unsafe { slice_assume_init_ref_polyfill ( a) } ,
239
- unsafe { slice_assume_init_ref_polyfill ( b) } ,
240
- a_spare,
241
- b_spare,
242
- )
243
- }
244
- }
245
-
246
- impl < ' a > Drop for IntermediateRingBuffer < ' a > {
247
- fn drop ( & mut self ) {
248
- if self . disarmed {
249
- return ;
250
- }
251
-
252
- self . this . buf . truncate ( self . original_len ) ;
177
+ ( a, b, a_spare, b_spare)
253
178
}
254
179
}
255
180
@@ -266,48 +191,11 @@ impl<'a> Drop for IntermediateRingBuffer<'a> {
266
191
/// The chunk size is not part of the contract and may change depending on the target platform.
267
192
///
268
193
/// If that isn't possible we just fall back to ptr::copy_nonoverlapping
269
- fn copy_bytes_overshooting ( src : & [ u8 ] , dst : & mut [ MaybeUninit < u8 > ] , copy_at_least : usize ) {
270
- // this assert is required for this function to be safe
271
- // the optimizer should be able to remove it given how the caller
272
- // has somehow to figure out `copy_at_least <= src.len() && copy_at_least <= dst.len()`
273
- assert ! ( src. len( ) >= copy_at_least && dst. len( ) >= copy_at_least) ;
274
-
275
- type CopyType = usize ;
276
-
277
- const COPY_AT_ONCE_SIZE : usize = core:: mem:: size_of :: < CopyType > ( ) ;
278
- let min_buffer_size = usize:: min ( src. len ( ) , dst. len ( ) ) ;
279
-
280
- // this check should be removed by the optimizer thanks to the above assert
281
- // if `src.len() >= copy_at_least && dst.len() >= copy_at_least` then `min_buffer_size >= copy_at_least`
282
- assert ! ( min_buffer_size >= copy_at_least) ;
283
-
284
- // these bounds checks are removed because this is guaranteed:
285
- // `min_buffer_size <= src.len() && min_buffer_size <= dst.len()`
286
- let src = & src[ ..min_buffer_size] ;
287
- let dst = & mut dst[ ..min_buffer_size] ;
288
-
289
- // Can copy in just one read+write, very common case
290
- if min_buffer_size >= COPY_AT_ONCE_SIZE && copy_at_least <= COPY_AT_ONCE_SIZE {
291
- let chunk = unsafe { src. as_ptr ( ) . cast :: < CopyType > ( ) . read_unaligned ( ) } ;
292
- unsafe { dst. as_mut_ptr ( ) . cast :: < CopyType > ( ) . write_unaligned ( chunk) } ;
293
- } else {
294
- unsafe {
295
- dst. as_mut_ptr ( )
296
- . cast :: < u8 > ( )
297
- . copy_from_nonoverlapping ( src. as_ptr ( ) , copy_at_least)
298
- } ;
299
- }
300
-
301
- debug_assert_eq ! ( & src[ ..copy_at_least] , unsafe {
302
- slice_assume_init_ref_polyfill( & dst[ ..copy_at_least] )
303
- } ) ;
304
- }
194
+ fn copy_bytes_overshooting ( src : & [ u8 ] , dst : & mut [ u8 ] , copy_at_least : usize ) {
195
+ let src = & src[ ..copy_at_least] ;
196
+ let dst = & mut dst[ ..copy_at_least] ;
305
197
306
- #[ inline( always) ]
307
- unsafe fn slice_assume_init_ref_polyfill ( slice : & [ MaybeUninit < u8 > ] ) -> & [ u8 ] {
308
- let len = slice. len ( ) ;
309
- let data = slice. as_ptr ( ) . cast :: < u8 > ( ) ;
310
- slice:: from_raw_parts ( data, len)
198
+ dst. copy_from_slice ( src) ;
311
199
}
312
200
313
201
#[ cfg( test) ]
0 commit comments