Changeset - 3da401203200
[Not reviewed]
0 1 0
Christopher Esterhuyse - 5 years ago 2020-02-14 17:47:52
christopher.esterhuyse@gmail.com
matrices seem very promising
1 file changed with 42 insertions and 4 deletions:
0 comments (0 inline, 0 general)
src/runtime/ecs.rs
Show inline comments
 
@@ -589,116 +589,153 @@ EXTREME approach:
 

	
 
==================
 
*/
 

	
 
impl Debug for FlagMatrix {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        for r in 0..self.dims[0] {
 
            write!(f, "|")?;
 
            for c in 0..self.dims[1] {
 
                write!(
 
                    f,
 
                    "{}",
 
                    match self.test([r, c]) {
 
                        false => '0',
 
                        true => '1',
 
                    }
 
                )?;
 
            }
 
            write!(f, "|\n")?;
 
        }
 
        Ok(())
 
    }
 
}
 

	
 
// invariant: all bits outside of reach of columns or rows BUT inside the
 
// allocation, are zero.
 
// invariant: all bits outside of 0..columns and 0..rows BUT in the allocated space are ZERO
 
struct FlagMatrix {
 
    bytes: *mut u32,
 
    u32s_total: usize,
 
    u32s_per_row: usize,
 
    dims: [usize; 2],
 
}
 
#[inline(always)]
 
fn ceiling_to_mul_32(value: usize) -> usize {
 
    (value + 31) & !31
 
}
 
impl Drop for FlagMatrix {
 
    fn drop(&mut self) {
 
        let layout = Self::layout_for(self.u32s_total);
 
        unsafe {
 
            // ?
 
            std::alloc::dealloc(self.bytes as *mut u8, layout);
 
        }
 
    }
 
}
 
impl FlagMatrix {
 
    fn get_dims(&self) -> &[usize; 2] {
 
        &self.dims
 
    }
 

	
 
    fn set_entire_row(&mut self, row: usize) {
 
        assert!(row < self.dims[0]);
 
        let mut cols_left = self.dims[1];
 
        unsafe {
 
            let mut ptr = self.bytes.add(self.offset_of_chunk_unchecked([row, 0]));
 
            while cols_left >= 32 {
 
                *ptr = !0u32;
 
                cols_left -= 32;
 
                ptr = ptr.add(1);
 
            }
 
            if cols_left > 0 {
 
                // jagged chunk!
 
                *ptr |= (!0) >> (32 - cols_left);
 
            }
 
        }
 
    }
 
    fn unset_entire_row(&mut self, row: usize) {
 
        assert!(row < self.dims[0]);
 
        let mut cols_left = self.dims[1];
 
        unsafe {
 
            let mut ptr = self.bytes.add(self.offset_of_chunk_unchecked([row, 0]));
 
            while cols_left > 0 {
 
                *ptr = 0u32;
 
                cols_left -= 32;
 
                ptr = ptr.add(1);
 
            }
 
        }
 
    }
 

	
 
    fn reshape(&mut self, new_dims: [usize; 2]) {
 
        dbg!(self.u32s_total, self.u32s_per_row);
 

	
 
        // 1. calc new u32s_per_row
 
        let new_u32s_per_row = match ceiling_to_mul_32(new_dims[1]) / 32 {
 
            min if min > self.u32s_per_row => Some(min * 2),
 
            _ => None,
 
        };
 

	
 
        // 2. calc new u32s_total
 
        let new_u32s_total = match new_u32s_per_row.unwrap_or(self.u32s_per_row) * new_dims[0] {
 
            min if min > self.u32s_total => Some(min * 2),
 
            _ => None,
 
        };
 

	
 
        // 3. set any bits no longer in columns to zero
 
        let new_last_chunk_zero_prefix = new_dims[1] % 32;
 
        if new_dims[1] < self.dims[1] {
 
            let old_min_u32_per_row = ceiling_to_mul_32(new_dims[1]) / 32;
 
            let new_min_u32_per_row = ceiling_to_mul_32(self.dims[1]) / 32;
 
            let common_rows = self.dims[0].min(new_dims[0]);
 
            if old_min_u32_per_row < new_min_u32_per_row {
 
                // zero chunks made entirely of removed columns
 
                for row in 0..common_rows {
 
                    unsafe {
 
                        self.bytes
 
                            .add(self.offset_of_chunk_unchecked([row, old_min_u32_per_row]))
 
                            .write_bytes(0u8, new_min_u32_per_row - old_min_u32_per_row);
 
                    }
 
                }
 
            }
 
            if new_last_chunk_zero_prefix > 0 {
 
                // wipe out new_last_chunk_zero_prefix-most significant bits of all new last column chunks
 
                let mask: u32 = !0u32 >> new_last_chunk_zero_prefix;
 
                for row in 0..common_rows {
 
                    let o_of = self.offset_of_chunk_unchecked([row, new_min_u32_per_row - 1]);
 
                    unsafe { *self.bytes.add(o_of) &= mask };
 
                }
 
            }
 
        }
 

	
 
        // TODO what about rows?
 
        // 4. if we won't do a new allocation, zero any bit no longer in rows
 
        if new_dims[0] < self.dims[0] && new_u32s_total.is_none() {
 
            // zero all bytes from beginning of first removed row,
 
            // to end of last removed row
 
            unsafe {
 
                self.bytes
 
                    .add(self.offset_of_chunk_unchecked([new_dims[0], 0]))
 
                    .write_bytes(0u8, self.u32s_per_row * (self.dims[0] - new_dims[0]));
 
            }
 
        }
 

	
 
        dbg!(new_u32s_per_row, new_u32s_total);
 
        match [new_u32s_per_row, new_u32s_total] {
 
            [None, None] => { /* do nothing */ }
 
            [None, Some(new_u32s_total)] => {
 
                // realloc only! column alignment is still OK
 
                // assert!(new_u32s_total > self.u32s_total);
 
                let old_layout = Self::layout_for(self.u32s_total);
 
                let new_layout = Self::layout_for(new_u32s_total);
 
                let new_bytes = unsafe {
 
                    let new_bytes = std::alloc::alloc(new_layout) as *mut u32;
 
                    // copy the previous total
 
                    self.bytes.copy_to_nonoverlapping(new_bytes, self.u32s_total);
 
                    // and zero the remainder
 
                    new_bytes
 
                        .add(self.u32s_total)
 
                        .write_bytes(0u8, new_u32s_total - self.u32s_total);
 
                    // drop the previous buffer
 
                    std::alloc::dealloc(self.bytes as *mut u8, old_layout);
 
                    new_bytes
 
                };
 
                self.bytes = new_bytes;
 
                println!("AFTER {:?}", self.bytes);
 
                self.u32s_total = new_u32s_total;
 
@@ -818,37 +855,38 @@ impl FlagMatrix {
 
        assert!(t_row < self.dims[0]);
 
        for &f_row in f_rows.iter() {
 
            assert!(f_row < self.dims[0]);
 
        }
 

	
 
        // 2. construct an unsafe iterator over chunks
 
        // column_chunk_range ensures all col_chunk_index values are in range.
 
        let column_chunk_range = 0..ceiling_to_mul_32(self.dims[1]) / 32;
 
        let chunk_iter = column_chunk_range.map(move |col_chunk_index| {
 
            // SAFETY: all rows and columns have already been bounds-checked.
 
            let t_chunk = unsafe { self.copy_chunk_unchecked(t_row, col_chunk_index) };
 
            f_rows.iter().fold(t_chunk, |chunk, &f_row| {
 
                let f_chunk = unsafe { self.copy_chunk_unchecked(f_row, col_chunk_index) };
 
                chunk & !f_chunk
 
            })
 
        });
 

	
 
        // 3. yield columns indices from the chunk iterator
 
        BitChunkIter::new(chunk_iter)
 
    }
 
}
 

	
 
#[test]
 
fn matrix() {
 
    let mut m = FlagMatrix::new([5, 5], [0, 0]);
 
    let mut m = FlagMatrix::new([6, 6], [0, 0]);
 
    for i in 0..5 {
 
        m.set([0, i]);
 
        m.set([i, i]);
 
    }
 
    m.set_entire_row(5);
 
    println!("{:?}", &m);
 
    m.reshape([6, 40]);
 
    let iter = m.col_iter_t1fn(0, &[1, 2, 3]);
 
    for c in iter {
 
        println!("{:?}", c);
 
    }
 
    println!("{:?}", &m);
 
}
0 comments (0 inline, 0 general)