Files
@ 40fcfdd81ceb
Branch filter:
Location: CSY/reowolf/src/collections/raw_vec.rs
40fcfdd81ceb
3.9 KiB
application/rls-services+xml
cleaned up dependencies
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | use std::{mem, ptr, cmp};
use std::alloc::{Layout, alloc, dealloc};
#[derive(Debug)]
enum AllocError {
CapacityOverflow,
}
/// Generic raw vector. It has a base pointer, a capacity and a length. Basic
/// operations are supported, but the user of the structure is responsible for
/// ensuring that no illegal mutable access occurs.
/// A lot of the logic is simply stolen from the std lib. The destructor will
/// free the backing memory, but will not run any destructors.
/// Try to use functions to modify the length. But feel free if you know what
/// you're doing
pub struct RawVec<T: Sized> {
base: *mut T,
cap: usize,
pub len: usize,
}
impl<T: Sized> RawVec<T> {
const T_ALIGNMENT: usize = mem::align_of::<T>();
const T_SIZE: usize = mem::size_of::<T>();
const GROWTH_RATE: usize = 2;
pub fn new() -> Self {
Self{
base: ptr::null_mut(),
cap: 0,
len: 0,
}
}
pub fn with_capacity(capacity: usize) -> Self {
// Could be done a bit more efficiently
let mut result = Self::new();
result.ensure_space(capacity).unwrap();
return result;
}
#[inline]
pub unsafe fn get(&self, idx: usize) -> *const T {
debug_assert!(idx < self.len);
return self.base.add(idx);
}
#[inline]
pub unsafe fn get_mut(&self, idx: usize) -> *mut T {
debug_assert!(idx < self.len);
return self.base.add(idx);
}
/// Pushes a new element to the end of the list.
pub fn push(&mut self, item: T) {
self.ensure_space(1).unwrap();
unsafe {
let target = self.base.add(self.len);
std::ptr::write(target, item);
self.len += 1;
}
}
pub fn len(&self) -> usize {
return self.len;
}
pub fn as_slice(&self) -> &[T] {
return unsafe{
std::slice::from_raw_parts(self.base, self.len)
};
}
fn ensure_space(&mut self, additional: usize) -> Result<(), AllocError>{
debug_assert!(Self::T_SIZE != 0);
debug_assert!(self.cap >= self.len);
if self.cap - self.len < additional {
// Need to resize. Note that due to all checked conditions we have
// that new_cap >= 1.
debug_assert!(additional > 0);
let new_cap = self.len.checked_add(additional).unwrap();
let new_cap = cmp::max(new_cap, self.cap * Self::GROWTH_RATE);
let layout = Layout::array::<T>(new_cap)
.map_err(|_| AllocError::CapacityOverflow)?;
debug_assert_eq!(new_cap * Self::T_SIZE, layout.size());
unsafe {
// Allocate new storage, transfer bits, deallocate old store
let new_base = alloc(layout);
if self.cap > 0 {
let old_base = self.base as *mut u8;
let (old_size, old_layout) = self.current_layout();
ptr::copy_nonoverlapping(old_base, new_base, old_size);
dealloc(old_base, old_layout);
}
self.base = new_base as *mut T;
self.cap = new_cap;
}
} // else: still enough space
return Ok(());
}
#[inline]
fn current_layout(&self) -> (usize, Layout) {
debug_assert!(Self::T_SIZE > 0);
let old_size = self.cap * Self::T_SIZE;
unsafe {
return (
old_size,
Layout::from_size_align_unchecked(old_size, Self::T_ALIGNMENT)
);
}
}
}
impl<T: Sized> Drop for RawVec<T> {
fn drop(&mut self) {
if self.cap > 0 {
debug_assert!(!self.base.is_null());
let (_, layout) = self.current_layout();
unsafe {
dealloc(self.base as *mut u8, layout);
dbg_code!({ self.base = ptr::null_mut(); });
}
}
}
}
|