Files
@ 1aef293674a6
Branch filter:
Location: CSY/reowolf/src/collections/raw_vec.rs
1aef293674a6
3.6 KiB
application/rls-services+xml
experimenting with multithreaded scheduler sync primitives
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 | use std::{mem, ptr, cmp};
use std::alloc::{Layout, alloc, dealloc};
#[derive(Debug)]
enum AllocError {
CapacityOverflow,
}
/// Generic raw vector. It has a base pointer, a capacity and a length. Basic
/// operations are supported, but the user of the structure is responsible for
/// ensuring that no illegal mutable access occurs.
/// A lot of the logic is simply stolen from the std lib. The destructor will
/// free the backing memory, but will not run any destructors.
pub struct RawVec<T: Sized> {
base: *mut T,
cap: usize,
len: usize,
}
impl<T: Sized> RawVec<T> {
const T_ALIGNMENT: usize = mem::align_of::<T>();
const T_SIZE: usize = mem::size_of::<T>();
const GROWTH_RATE: usize = 2;
pub fn new() -> Self {
Self{
base: ptr::null_mut(),
cap: 0,
len: 0,
}
}
pub fn with_capacity(capacity: usize) -> Self {
// Could be done a bit more efficiently
let mut result = Self::new();
result.ensure_space(capacity);
return result;
}
pub unsafe fn get(&self, idx: usize) -> *const T {
debug_assert!(idx < self.len);
return self.base.add(idx);
}
pub unsafe fn get_mut(&self, idx: usize) -> *mut T {
debug_assert!(idx < self.len);
return self.base.add(idx);
}
pub fn push(&mut self, item: T) {
self.ensure_space(1);
unsafe {
let target = self.base.add(self.len);
std::ptr::write(target, item);
self.len += 1;
}
}
pub fn len(&self) -> usize {
return self.len;
}
fn ensure_space(&mut self, additional: usize) -> Result<(), AllocError>{
debug_assert!(Self::T_SIZE != 0);
debug_assert!(self.cap >= self.len);
if self.cap - self.len < additional {
// Need to resize. Note that due to all checked conditions we have
// that new_cap >= 1.
debug_assert!(additional > 0);
let new_cap = self.len.checked_add(additional).unwrap();
let new_cap = cmp::max(new_cap, self.cap * Self::GROWTH_RATE);
let layout = Layout::array::<T>(new_cap)
.map_err(|_| AllocError::CapacityOverflow)?;
debug_assert_eq!(new_cap * Self::T_SIZE, layout.size());
unsafe {
// Allocate new storage, transfer bits, deallocate old store
let new_base = alloc(layout);
if self.cap > 0 {
let old_base = self.base as *mut u8;
let (old_size, old_layout) = self.current_layout();
ptr::copy_nonoverlapping(new_base, old_base, old_size);
dealloc(old_base, old_layout);
}
self.base = new_base;
self.cap = new_cap;
}
} // else: still enough space
return Ok(());
}
#[inline]
fn current_layout(&self) -> (usize, Layout) {
debug_assert!(Self::T_SIZE > 0);
let old_size = self.cap * Self::T_SIZE;
unsafe {
return (
old_size,
Layout::from_size_align_unchecked(old_size, Self::T_ALIGNMENT)
);
}
}
}
impl<T: Sized> Drop for RawVec<T> {
fn drop(&mut self) {
if self.cap > 0 {
debug_assert!(!self.base.is_null());
let (_, layout) = self.current_layout();
unsafe {
dealloc(self.base, layout);
if cfg!(debug_assertions) {
self.base = ptr::null_mut();
}
}
}
}
}
|