use std::{mem, ptr, cmp}; use std::alloc::{Layout, alloc, dealloc}; #[derive(Debug)] enum AllocError { CapacityOverflow, } /// Generic raw vector. It has a base pointer, a capacity and a length. Basic /// operations are supported, but the user of the structure is responsible for /// ensuring that no illegal mutable access occurs. /// A lot of the logic is simply stolen from the std lib. The destructor will /// free the backing memory, but will not run any destructors. /// Try to use functions to modify the length. But feel free if you know what /// you're doing pub struct RawVec { base: *mut T, cap: usize, pub len: usize, } impl RawVec { const T_ALIGNMENT: usize = mem::align_of::(); const T_SIZE: usize = mem::size_of::(); const GROWTH_RATE: usize = 2; pub fn new() -> Self { Self{ base: ptr::null_mut(), cap: 0, len: 0, } } pub fn with_capacity(capacity: usize) -> Self { // Could be done a bit more efficiently let mut result = Self::new(); result.ensure_space(capacity).unwrap(); return result; } #[inline] pub unsafe fn get(&self, idx: usize) -> *const T { debug_assert!(idx < self.len); return self.base.add(idx); } #[inline] pub unsafe fn get_mut(&self, idx: usize) -> *mut T { debug_assert!(idx < self.len); return self.base.add(idx); } /// Pushes a new element to the end of the list. pub fn push(&mut self, item: T) { self.ensure_space(1).unwrap(); unsafe { let target = self.base.add(self.len); std::ptr::write(target, item); self.len += 1; } } /// Moves the elements in the range [from_idx, from_idx + num_to_move) to /// the range [to_idx, to_idx + num_to_move). Caller must make sure that all /// non-overlapping elements of the second range had their destructor called /// in case those elements were used. pub fn move_range(&mut self, from_idx: usize, to_idx: usize, num_to_move: usize) { debug_assert!(from_idx + num_to_move <= self.len); debug_assert!(to_idx + num_to_move <= self.len); // maybe not in future, for now this is fine unsafe { let source = self.base.add(from_idx); let target = self.base.add(to_idx); std::ptr::copy(source, target, num_to_move); } } pub fn len(&self) -> usize { return self.len; } pub fn as_slice(&self) -> &[T] { return unsafe{ std::slice::from_raw_parts(self.base, self.len) }; } fn ensure_space(&mut self, additional: usize) -> Result<(), AllocError>{ debug_assert!(Self::T_SIZE != 0); debug_assert!(self.cap >= self.len); if self.cap - self.len < additional { // Need to resize. Note that due to all checked conditions we have // that new_cap >= 1. debug_assert!(additional > 0); let new_cap = self.len.checked_add(additional).unwrap(); let new_cap = cmp::max(new_cap, self.cap * Self::GROWTH_RATE); let layout = Layout::array::(new_cap) .map_err(|_| AllocError::CapacityOverflow)?; debug_assert_eq!(new_cap * Self::T_SIZE, layout.size()); unsafe { // Allocate new storage, transfer bits, deallocate old store let new_base = alloc(layout); if self.cap > 0 { let old_base = self.base as *mut u8; let (old_size, old_layout) = self.current_layout(); ptr::copy_nonoverlapping(old_base, new_base, old_size); dealloc(old_base, old_layout); } self.base = new_base as *mut T; self.cap = new_cap; } } // else: still enough space return Ok(()); } #[inline] fn current_layout(&self) -> (usize, Layout) { debug_assert!(Self::T_SIZE > 0); let old_size = self.cap * Self::T_SIZE; unsafe { return ( old_size, Layout::from_size_align_unchecked(old_size, Self::T_ALIGNMENT) ); } } } impl Drop for RawVec { fn drop(&mut self) { if self.cap > 0 { debug_assert!(!self.base.is_null()); let (_, layout) = self.current_layout(); unsafe { dealloc(self.base as *mut u8, layout); dbg_code!({ self.base = ptr::null_mut(); }); } } } }