/home/runner/work/ringbuffer/ringbuffer/src/ringbuffer_trait.rs
Line | Count | Source |
1 | | use core::ops::{Index, IndexMut}; |
2 | | |
3 | | #[cfg(feature = "alloc")] |
4 | | extern crate alloc; |
5 | | #[cfg(feature = "alloc")] |
6 | | use alloc::vec::Vec; |
7 | | |
8 | | /// `RingBuffer` is a trait defining the standard interface for all `RingBuffer` |
9 | | /// implementations ([`AllocRingBuffer`](crate::AllocRingBuffer), [`ConstGenericRingBuffer`](crate::ConstGenericRingBuffer)) |
10 | | /// |
11 | | /// This trait is not object safe, so can't be used dynamically. However it is possible to |
12 | | /// define a generic function over types implementing `RingBuffer`. |
13 | | /// |
14 | | /// # Safety |
15 | | /// Implementing this implies that the ringbuffer upholds some safety |
16 | | /// guarantees, such as returning a different value from `get_mut` any |
17 | | /// for every different index passed in. See the exact requirements |
18 | | /// in the safety comment on the next function of the mutable Iterator |
19 | | /// implementation, since these safety guarantees are necessary for |
20 | | /// [`iter_mut`](RingBuffer::iter_mut) to work |
21 | | pub unsafe trait RingBuffer<T>: |
22 | | Sized + IntoIterator<Item = T> + Extend<T> + Index<usize, Output = T> + IndexMut<usize> |
23 | | { |
24 | | /// Returns the length of the internal buffer. |
25 | | /// This length grows up to the capacity and then stops growing. |
26 | | /// This is because when the length is reached, new items are appended at the start. |
27 | 3.63M | fn len(&self) -> usize { |
28 | 3.63M | // Safety: self is a RingBuffer |
29 | 3.63M | unsafe { Self::ptr_len(self) } |
30 | 3.63M | } |
31 | | |
32 | | /// Raw pointer version of len |
33 | | /// |
34 | | /// # Safety |
35 | | /// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer` |
36 | | #[doc(hidden)] |
37 | | unsafe fn ptr_len(rb: *const Self) -> usize; |
38 | | |
39 | | /// Returns true if the buffer is entirely empty. |
40 | | #[inline] |
41 | 1.41M | fn is_empty(&self) -> bool { |
42 | 1.41M | self.len() == 0 |
43 | 1.41M | } |
44 | | |
45 | | /// Returns true when the length of the ringbuffer equals the capacity. This happens whenever |
46 | | /// more elements than capacity have been pushed to the buffer. |
47 | | #[inline] |
48 | 1.40M | fn is_full(&self) -> bool { |
49 | 1.40M | self.len() == self.capacity() |
50 | 1.40M | } |
51 | | |
52 | | /// Returns the capacity of the buffer. |
53 | 1.41M | fn capacity(&self) -> usize { |
54 | 1.41M | // Safety: self is a RingBuffer |
55 | 1.41M | unsafe { Self::ptr_capacity(self) } |
56 | 1.41M | } |
57 | | |
58 | | /// Returns the number of elements allocated for this ringbuffer (can be larger than capacity). |
59 | 402k | fn buffer_size(&self) -> usize { |
60 | 402k | // Safety: self is a RingBuffer |
61 | 402k | unsafe { Self::ptr_buffer_size(self) } |
62 | 402k | } |
63 | | |
64 | | /// Raw pointer version of capacity. |
65 | | /// |
66 | | /// # Safety |
67 | | /// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer` |
68 | | #[doc(hidden)] |
69 | | unsafe fn ptr_capacity(rb: *const Self) -> usize; |
70 | | |
71 | | /// Raw pointer version of `buffer_size`. |
72 | | /// |
73 | | /// # Safety |
74 | | /// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer` |
75 | | #[doc(hidden)] |
76 | | unsafe fn ptr_buffer_size(rb: *const Self) -> usize; |
77 | | |
78 | | /// Alias for [`enqueue`] |
79 | | #[deprecated = "use enqueue instead"] |
80 | | #[inline] |
81 | 12 | fn push(&mut self, value: T) { |
82 | 12 | let _ = self.enqueue(value); |
83 | 12 | } |
84 | | |
85 | | /// Adds a value onto the buffer. |
86 | | /// |
87 | | /// Cycles around if capacity is reached. |
88 | | /// Forms a more natural counterpart to [`dequeue`](RingBuffer::dequeue). |
89 | | /// An alias is provided with [`push`](RingBuffer::push). |
90 | | fn enqueue(&mut self, value: T) -> Option<T>; |
91 | | |
92 | | /// dequeues the top item off the ringbuffer, and moves this item out. |
93 | | fn dequeue(&mut self) -> Option<T>; |
94 | | |
95 | | /// dequeues the top item off the queue, but does not return it. Instead it is dropped. |
96 | | /// If the ringbuffer is empty, this function is a nop. |
97 | | #[inline] |
98 | | #[deprecated = "use dequeue instead"] |
99 | 18 | fn skip(&mut self) { |
100 | 18 | let _ = self.dequeue(); |
101 | 18 | } |
102 | | |
103 | | /// Returns an iterator over the elements in the ringbuffer, |
104 | | /// dequeueing elements as they are iterated over. |
105 | | /// |
106 | | /// ``` |
107 | | /// use ringbuffer::{AllocRingBuffer, RingBuffer}; |
108 | | /// |
109 | | /// let mut rb = AllocRingBuffer::new(16); |
110 | | /// for i in 0..8 { |
111 | | /// rb.push(i); |
112 | | /// } |
113 | | /// |
114 | | /// assert_eq!(rb.len(), 8); |
115 | | /// |
116 | | /// for i in rb.drain() { |
117 | | /// // prints the numbers 0 through 8 |
118 | | /// println!("{}", i); |
119 | | /// } |
120 | | /// |
121 | | /// // No elements remain |
122 | | /// assert_eq!(rb.len(), 0); |
123 | | /// |
124 | | /// ``` |
125 | 257 | fn drain(&mut self) -> RingBufferDrainingIterator<T, Self> { |
126 | 257 | RingBufferDrainingIterator::new(self) |
127 | 257 | } |
128 | | |
129 | | /// Sets every element in the ringbuffer to the value returned by f. |
130 | | fn fill_with<F: FnMut() -> T>(&mut self, f: F); |
131 | | |
132 | | /// Sets every element in the ringbuffer to it's default value |
133 | 12 | fn fill_default(&mut self) |
134 | 12 | where |
135 | 12 | T: Default, |
136 | 12 | { |
137 | 12 | self.fill_with(Default::default); |
138 | 12 | } |
139 | | |
140 | | /// Sets every element in the ringbuffer to `value` |
141 | 13 | fn fill(&mut self, value: T) |
142 | 13 | where |
143 | 13 | T: Clone, |
144 | 13 | { |
145 | 50 | self.fill_with(|| value.clone()); |
146 | 13 | } |
147 | | |
148 | | /// Empties the buffer entirely. Sets the length to 0 but keeps the capacity allocated. |
149 | | fn clear(&mut self); |
150 | | |
151 | | /// Gets a value relative to the current index. 0 is the next index to be written to with push. |
152 | | /// -1 and down are the last elements pushed and 0 and up are the items that were pushed the longest ago. |
153 | | fn get_signed(&self, index: isize) -> Option<&T>; |
154 | | |
155 | | /// Gets a value relative to the current index. 0 is the next index to be written to with push. |
156 | | fn get(&self, index: usize) -> Option<&T>; |
157 | | |
158 | | /// Gets a value relative to the current index mutably. 0 is the next index to be written to with push. |
159 | | /// -1 and down are the last elements pushed and 0 and up are the items that were pushed the longest ago. |
160 | | #[inline] |
161 | 21 | fn get_mut_signed(&mut self, index: isize) -> Option<&mut T> { |
162 | 21 | // Safety: self is a RingBuffer |
163 | 21 | unsafe { Self::ptr_get_mut_signed(self, index).map(|i| &mut *i18 ) } |
164 | 21 | } |
165 | | |
166 | | /// Gets a value relative to the current index mutably. 0 is the next index to be written to with push. |
167 | | #[inline] |
168 | 21 | fn get_mut(&mut self, index: usize) -> Option<&mut T> { |
169 | 21 | // Safety: self is a RingBuffer |
170 | 21 | unsafe { Self::ptr_get_mut(self, index).map(|i| &mut *i15 ) } |
171 | 21 | } |
172 | | |
173 | | /// same as [`get_mut`](RingBuffer::get_mut) but on raw pointers. |
174 | | /// |
175 | | /// # Safety |
176 | | /// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer` |
177 | | #[doc(hidden)] |
178 | | unsafe fn ptr_get_mut(rb: *mut Self, index: usize) -> Option<*mut T>; |
179 | | |
180 | | /// same as [`get_mut`](RingBuffer::get_mut) but on raw pointers. |
181 | | /// |
182 | | /// # Safety |
183 | | /// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer` |
184 | | #[doc(hidden)] |
185 | | unsafe fn ptr_get_mut_signed(rb: *mut Self, index: isize) -> Option<*mut T>; |
186 | | |
187 | | /// Returns the value at the current index. |
188 | | /// This is the value that will be overwritten by the next push and also the value pushed |
189 | | /// the longest ago. (alias of [`Self::front`]) |
190 | | #[inline] |
191 | 6 | fn peek(&self) -> Option<&T> { |
192 | 6 | self.front() |
193 | 6 | } |
194 | | |
195 | | /// Returns the value at the front of the queue. |
196 | | /// This is the value that will be overwritten by the next push and also the value pushed |
197 | | /// the longest ago. |
198 | | /// (alias of peek) |
199 | | #[inline] |
200 | 12 | fn front(&self) -> Option<&T> { |
201 | 12 | self.get(0) |
202 | 12 | } |
203 | | |
204 | | /// Returns a mutable reference to the value at the back of the queue. |
205 | | /// This is the value that will be overwritten by the next push. |
206 | | /// (alias of peek) |
207 | | #[inline] |
208 | 6 | fn front_mut(&mut self) -> Option<&mut T> { |
209 | 6 | self.get_mut(0) |
210 | 6 | } |
211 | | |
212 | | /// Returns the value at the back of the queue. |
213 | | /// This is the item that was pushed most recently. |
214 | | #[inline] |
215 | 6 | fn back(&self) -> Option<&T> { |
216 | 6 | self.get_signed(-1) |
217 | 6 | } |
218 | | |
219 | | /// Returns a mutable reference to the value at the back of the queue. |
220 | | /// This is the item that was pushed most recently. |
221 | | #[inline] |
222 | 6 | fn back_mut(&mut self) -> Option<&mut T> { |
223 | 6 | self.get_mut_signed(-1) |
224 | 6 | } |
225 | | |
226 | | /// Creates a mutable iterator over the buffer starting from the item pushed the longest ago, |
227 | | /// and ending at the element most recently pushed. |
228 | | #[inline] |
229 | 15 | fn iter_mut(&mut self) -> RingBufferMutIterator<T, Self> { |
230 | 15 | RingBufferMutIterator::new(self) |
231 | 15 | } |
232 | | |
233 | | /// Creates an iterator over the buffer starting from the item pushed the longest ago, |
234 | | /// and ending at the element most recently pushed. |
235 | | #[inline] |
236 | 250 | fn iter(&self) -> RingBufferIterator<T, Self> { |
237 | 250 | RingBufferIterator::new(self) |
238 | 250 | } |
239 | | |
240 | | /// Converts the buffer to a vector. This Copies all elements in the ringbuffer. |
241 | | #[cfg(feature = "alloc")] |
242 | 115 | fn to_vec(&self) -> Vec<T> |
243 | 115 | where |
244 | 115 | T: Clone, |
245 | 115 | { |
246 | 115 | self.iter().cloned().collect() |
247 | 115 | } |
248 | | |
249 | | /// Returns true if elem is in the ringbuffer. |
250 | 6 | fn contains(&self, elem: &T) -> bool |
251 | 6 | where |
252 | 6 | T: PartialEq, |
253 | 6 | { |
254 | 9 | self.iter().any(|i| i == elem) |
255 | 6 | } |
256 | | } |
257 | | |
258 | | mod iter { |
259 | | use crate::RingBuffer; |
260 | | use core::iter::FusedIterator; |
261 | | use core::marker::PhantomData; |
262 | | use core::ptr::NonNull; |
263 | | |
264 | | /// `RingBufferIterator` holds a reference to a `RingBuffer` and iterates over it. `index` is the |
265 | | /// current iterator position. |
266 | | pub struct RingBufferIterator<'rb, T, RB: RingBuffer<T>> { |
267 | | obj: &'rb RB, |
268 | | len: usize, |
269 | | index: usize, |
270 | | phantom: PhantomData<T>, |
271 | | } |
272 | | |
273 | | impl<'rb, T, RB: RingBuffer<T>> RingBufferIterator<'rb, T, RB> { |
274 | | #[inline] |
275 | 250 | pub fn new(obj: &'rb RB) -> Self { |
276 | 250 | Self { |
277 | 250 | obj, |
278 | 250 | len: obj.len(), |
279 | 250 | index: 0, |
280 | 250 | phantom: PhantomData, |
281 | 250 | } |
282 | 250 | } |
283 | | } |
284 | | |
285 | | impl<'rb, T: 'rb, RB: RingBuffer<T>> Iterator for RingBufferIterator<'rb, T, RB> { |
286 | | type Item = &'rb T; |
287 | | |
288 | | #[inline] |
289 | 11.8k | fn next(&mut self) -> Option<Self::Item> { |
290 | 11.8k | if self.index < self.len { |
291 | 11.6k | let res = self.obj.get(self.index); |
292 | 11.6k | self.index += 1; |
293 | 11.6k | res |
294 | | } else { |
295 | 181 | None |
296 | | } |
297 | 11.8k | } |
298 | | |
299 | 115 | fn size_hint(&self) -> (usize, Option<usize>) { |
300 | 115 | (self.len, Some(self.len)) |
301 | 115 | } |
302 | | } |
303 | | |
304 | | impl<'rb, T: 'rb, RB: RingBuffer<T>> FusedIterator for RingBufferIterator<'rb, T, RB> {} |
305 | | |
306 | | impl<'rb, T: 'rb, RB: RingBuffer<T>> ExactSizeIterator for RingBufferIterator<'rb, T, RB> {} |
307 | | |
308 | | impl<'rb, T: 'rb, RB: RingBuffer<T>> DoubleEndedIterator for RingBufferIterator<'rb, T, RB> { |
309 | | #[inline] |
310 | 39 | fn next_back(&mut self) -> Option<Self::Item> { |
311 | 39 | if self.len > 0 && self.index < self.len { |
312 | 36 | let res = self.obj.get(self.len - 1); |
313 | 36 | self.len -= 1; |
314 | 36 | res |
315 | | } else { |
316 | 3 | None |
317 | | } |
318 | 39 | } |
319 | | } |
320 | | |
321 | | /// `RingBufferMutIterator` holds a reference to a `RingBuffer` and iterates over it. `index` is the |
322 | | /// current iterator position. |
323 | | /// |
324 | | /// WARNING: NEVER ACCESS THE `obj` FIELD OUTSIDE OF NEXT. It's private on purpose, and |
325 | | /// can technically be accessed in the same module. However, this breaks the safety of `next()` |
326 | | pub struct RingBufferMutIterator<'rb, T, RB: RingBuffer<T>> { |
327 | | obj: NonNull<RB>, |
328 | | index: usize, |
329 | | len: usize, |
330 | | phantom: PhantomData<&'rb mut T>, |
331 | | } |
332 | | |
333 | | impl<'rb, T, RB: RingBuffer<T>> RingBufferMutIterator<'rb, T, RB> { |
334 | 15 | pub fn new(obj: &'rb mut RB) -> Self { |
335 | 15 | Self { |
336 | 15 | len: obj.len(), |
337 | 15 | obj: NonNull::from(obj), |
338 | 15 | index: 0, |
339 | 15 | phantom: PhantomData, |
340 | 15 | } |
341 | 15 | } |
342 | | } |
343 | | |
344 | | impl<'rb, T: 'rb, RB: RingBuffer<T> + 'rb> FusedIterator for RingBufferMutIterator<'rb, T, RB> {} |
345 | | |
346 | | impl<'rb, T: 'rb, RB: RingBuffer<T> + 'rb> ExactSizeIterator for RingBufferMutIterator<'rb, T, RB> {} |
347 | | |
348 | | impl<'rb, T: 'rb, RB: RingBuffer<T> + 'rb> DoubleEndedIterator |
349 | | for RingBufferMutIterator<'rb, T, RB> |
350 | | { |
351 | | #[inline] |
352 | 12 | fn next_back(&mut self) -> Option<Self::Item> { |
353 | 12 | if self.len > 0 && self.index < self.len { |
354 | 9 | self.len -= 1; |
355 | 9 | let res = unsafe { RB::ptr_get_mut(self.obj.as_ptr(), self.len) }; |
356 | 9 | res.map(|i| unsafe { &mut *i }) |
357 | | } else { |
358 | 3 | None |
359 | | } |
360 | 12 | } |
361 | | } |
362 | | |
363 | | impl<'rb, T, RB: RingBuffer<T> + 'rb> Iterator for RingBufferMutIterator<'rb, T, RB> { |
364 | | type Item = &'rb mut T; |
365 | | |
366 | 47 | fn next(&mut self) -> Option<Self::Item> { |
367 | 47 | if self.index < self.len { |
368 | 35 | let res = unsafe { RB::ptr_get_mut(self.obj.as_ptr(), self.index) }; |
369 | 35 | self.index += 1; |
370 | 35 | // Safety: ptr_get_mut always returns a valid pointer |
371 | 35 | res.map(|i| unsafe { &mut *i }) |
372 | | } else { |
373 | 12 | None |
374 | | } |
375 | 47 | } |
376 | | |
377 | 3 | fn size_hint(&self) -> (usize, Option<usize>) { |
378 | 3 | (self.len, Some(self.len)) |
379 | 3 | } |
380 | | } |
381 | | |
382 | | /// `RingBufferMutIterator` holds a reference to a `RingBuffer` and iterates over it. |
383 | | pub struct RingBufferDrainingIterator<'rb, T, RB: RingBuffer<T>> { |
384 | | obj: &'rb mut RB, |
385 | | phantom: PhantomData<T>, |
386 | | } |
387 | | |
388 | | impl<'rb, T, RB: RingBuffer<T>> RingBufferDrainingIterator<'rb, T, RB> { |
389 | | #[inline] |
390 | 257 | pub fn new(obj: &'rb mut RB) -> Self { |
391 | 257 | Self { |
392 | 257 | obj, |
393 | 257 | phantom: PhantomData, |
394 | 257 | } |
395 | 257 | } |
396 | | } |
397 | | |
398 | | impl<T, RB: RingBuffer<T>> Iterator for RingBufferDrainingIterator<'_, T, RB> { |
399 | | type Item = T; |
400 | | |
401 | 2.81k | fn next(&mut self) -> Option<T> { |
402 | 2.81k | self.obj.dequeue() |
403 | 2.81k | } |
404 | | |
405 | 4 | fn size_hint(&self) -> (usize, Option<usize>) { |
406 | 4 | (self.obj.len(), Some(self.obj.len())) |
407 | 4 | } |
408 | | } |
409 | | |
410 | | /// `RingBufferIntoIterator` holds a `RingBuffer` and iterates over it. |
411 | | pub struct RingBufferIntoIterator<T, RB: RingBuffer<T>> { |
412 | | obj: RB, |
413 | | phantom: PhantomData<T>, |
414 | | } |
415 | | |
416 | | impl<T, RB: RingBuffer<T>> RingBufferIntoIterator<T, RB> { |
417 | | #[inline] |
418 | 3 | pub fn new(obj: RB) -> Self { |
419 | 3 | Self { |
420 | 3 | obj, |
421 | 3 | phantom: PhantomData, |
422 | 3 | } |
423 | 3 | } |
424 | | } |
425 | | |
426 | | impl<T, RB: RingBuffer<T>> Iterator for RingBufferIntoIterator<T, RB> { |
427 | | type Item = T; |
428 | | |
429 | | #[inline] |
430 | 24 | fn next(&mut self) -> Option<Self::Item> { |
431 | 24 | self.obj.dequeue() |
432 | 24 | } |
433 | | |
434 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
435 | 0 | (self.obj.len(), Some(self.obj.len())) |
436 | 0 | } |
437 | | } |
438 | | } |
439 | | |
440 | | pub use iter::{ |
441 | | RingBufferDrainingIterator, RingBufferIntoIterator, RingBufferIterator, RingBufferMutIterator, |
442 | | }; |
443 | | |
444 | | /// Implement various functions on implementors of [`RingBuffer`]. |
445 | | /// This is to avoid duplicate code. |
446 | | macro_rules! impl_ringbuffer { |
447 | | ($readptr: ident, $writeptr: ident) => { |
448 | | #[inline] |
449 | 3.42M | unsafe fn ptr_len(rb: *const Self) -> usize { |
450 | 3.42M | (*rb).$writeptr - (*rb).$readptr |
451 | 3.42M | } |
452 | | }; |
453 | | } |
454 | | |
455 | | /// Implement various functions on implementors of [`RingBuffer`]. |
456 | | /// This is to avoid duplicate code. |
457 | | macro_rules! impl_ringbuffer_ext { |
458 | | ($get_unchecked: ident, $get_unchecked_mut: ident, $readptr: ident, $writeptr: ident, $mask: expr) => { |
459 | | #[inline] |
460 | 400k | fn get_signed(&self, index: isize) -> Option<&T> { |
461 | | use core::ops::Not; |
462 | 400k | self.is_empty().not().then(move || {400k |
463 | 400k | let index_from_readptr = if index >= 0 { |
464 | 4 | index |
465 | | } else { |
466 | 400k | self.len() as isize + index |
467 | | }; |
468 | | |
469 | 400k | let normalized_index = |
470 | 400k | self.$readptr as isize + index_from_readptr.rem_euclid(self.len() as isize); |
471 | 400k | |
472 | 400k | unsafe { |
473 | 400k | // SAFETY: index has been modulo-ed to be within range |
474 | 400k | // to be within bounds |
475 | 400k | $get_unchecked(self, $mask(self.buffer_size(), normalized_index as usize)) |
476 | 400k | } |
477 | 400k | }) |
478 | 400k | } |
479 | | |
480 | | #[inline] |
481 | 2.60k | fn get(&self, index: usize) -> Option<&T> { |
482 | | use core::ops::Not; |
483 | 2.60k | self.is_empty().not().then(move || { |
484 | 2.60k | let normalized_index = self.$readptr + index.rem_euclid(self.len()); |
485 | 2.60k | unsafe { |
486 | 2.60k | // SAFETY: index has been modulo-ed to be within range |
487 | 2.60k | // to be within bounds |
488 | 2.60k | $get_unchecked(self, $mask(self.buffer_size(), normalized_index)) |
489 | 2.60k | } |
490 | 2.60k | }) |
491 | 2.60k | } |
492 | | |
493 | | #[inline] |
494 | | #[doc(hidden)] |
495 | 14 | unsafe fn ptr_get_mut_signed(rb: *mut Self, index: isize) -> Option<*mut T> { |
496 | 14 | (Self::ptr_len(rb) != 0).then(move || {12 |
497 | 12 | let index_from_readptr = if index >= 0 { |
498 | 4 | index |
499 | | } else { |
500 | 8 | Self::ptr_len(rb) as isize + index |
501 | | }; |
502 | | |
503 | 12 | let normalized_index = (*rb).$readptr as isize |
504 | 12 | + index_from_readptr.rem_euclid(Self::ptr_len(rb) as isize); |
505 | 12 | |
506 | 12 | unsafe { |
507 | 12 | // SAFETY: index has been modulo-ed to be within range |
508 | 12 | // to be within bounds |
509 | 12 | $get_unchecked_mut( |
510 | 12 | rb, |
511 | 12 | $mask(Self::ptr_buffer_size(rb), normalized_index as usize), |
512 | 12 | ) |
513 | 12 | } |
514 | 14 | }) |
515 | 14 | } |
516 | | |
517 | | #[inline] |
518 | | #[doc(hidden)] |
519 | 42 | unsafe fn ptr_get_mut(rb: *mut Self, index: usize) -> Option<*mut T> { |
520 | 42 | (Self::ptr_len(rb) != 0).then(move || { |
521 | 38 | let normalized_index = (*rb).$readptr + index.rem_euclid(Self::ptr_len(rb)); |
522 | 38 | |
523 | 38 | unsafe { |
524 | 38 | // SAFETY: index has been modulo-ed to be within range |
525 | 38 | // to be within bounds |
526 | 38 | $get_unchecked_mut(rb, $mask(Self::ptr_buffer_size(rb), normalized_index)) |
527 | 38 | } |
528 | 42 | }) |
529 | 42 | } |
530 | | |
531 | | #[inline] |
532 | 21 | fn clear(&mut self) { |
533 | 38 | for i in self.drain()21 { |
534 | 38 | drop(i); |
535 | 38 | } |
536 | | |
537 | 21 | self.$readptr = 0; |
538 | 21 | self.$writeptr = 0; |
539 | 21 | } |
540 | | }; |
541 | | } |