Files @ 1aef293674a6
Branch filter:

Location: CSY/reowolf/src/collections/raw_vec.rs

1aef293674a6 3.6 KiB application/rls-services+xml Show Annotation Show as Raw Download as Raw
mh
experimenting with multithreaded scheduler sync primitives
use std::{mem, ptr, cmp};
use std::alloc::{Layout, alloc, dealloc};

#[derive(Debug)]
enum AllocError {
    CapacityOverflow,
}

/// Generic raw vector. It has a base pointer, a capacity and a length. Basic
/// operations are supported, but the user of the structure is responsible for
/// ensuring that no illegal mutable access occurs.
/// A lot of the logic is simply stolen from the std lib. The destructor will
/// free the backing memory, but will not run any destructors.
pub struct RawVec<T: Sized> {
    base: *mut T,
    cap: usize,
    len: usize,
}

impl<T: Sized> RawVec<T> {
    const T_ALIGNMENT: usize = mem::align_of::<T>();
    const T_SIZE: usize = mem::size_of::<T>();
    
    const GROWTH_RATE: usize = 2;

    pub fn new() -> Self {
        Self{
            base: ptr::null_mut(),
            cap: 0,
            len: 0,
        }
    }

    pub fn with_capacity(capacity: usize) -> Self {
        // Could be done a bit more efficiently
        let mut result = Self::new();
        result.ensure_space(capacity);
        return result;
    }

    pub unsafe fn get(&self, idx: usize) -> *const T {
        debug_assert!(idx < self.len);
        return self.base.add(idx);
    }

    pub unsafe fn get_mut(&self, idx: usize) -> *mut T {
        debug_assert!(idx < self.len);
        return self.base.add(idx);
    }

    pub fn push(&mut self, item: T) {
        self.ensure_space(1);
        unsafe {
            let target = self.base.add(self.len);
            std::ptr::write(target, item);
            self.len += 1;
        }
    }

    pub fn len(&self) -> usize {
        return self.len;
    }

    fn ensure_space(&mut self, additional: usize) -> Result<(), AllocError>{
        debug_assert!(Self::T_SIZE != 0);
        debug_assert!(self.cap >= self.len);
        if self.cap - self.len < additional {
            // Need to resize. Note that due to all checked conditions we have
            // that new_cap >= 1.
            debug_assert!(additional > 0);
            let new_cap = self.len.checked_add(additional).unwrap();
            let new_cap = cmp::max(new_cap, self.cap * Self::GROWTH_RATE);
            
            let layout = Layout::array::<T>(new_cap)
                .map_err(|_| AllocError::CapacityOverflow)?;
            debug_assert_eq!(new_cap * Self::T_SIZE, layout.size());

            unsafe {
                // Allocate new storage, transfer bits, deallocate old store
                let new_base = alloc(layout);

                if self.cap > 0 {
                    let old_base = self.base as *mut u8;
                    let (old_size, old_layout) = self.current_layout();

                    ptr::copy_nonoverlapping(new_base, old_base, old_size);
                    dealloc(old_base, old_layout);
                }

                self.base = new_base;
                self.cap = new_cap;
            }
        } // else: still enough space

        return Ok(());
    }

    #[inline]
    fn current_layout(&self) -> (usize, Layout) {
        debug_assert!(Self::T_SIZE > 0);
        let old_size = self.cap * Self::T_SIZE;
        unsafe {
            return (
                old_size,
                Layout::from_size_align_unchecked(old_size, Self::T_ALIGNMENT)
            );
        }
    }
}

impl<T: Sized> Drop for RawVec<T> {
    fn drop(&mut self) {
        if self.cap > 0 {
            debug_assert!(!self.base.is_null());
            let (_, layout) = self.current_layout();
            unsafe {
                dealloc(self.base, layout);
                if cfg!(debug_assertions) {
                    self.base = ptr::null_mut();
                }
            }
        }
    }
}