Changeset - cc0baf0da727
[Not reviewed]
0 2 1
Christopher Esterhuyse - 5 years ago 2020-02-15 14:30:07
christopher.esterhuyse@gmail.com
properties as rows works much better
3 files changed with 269 insertions and 2 deletions:
0 comments (0 inline, 0 general)
src/runtime/bits.rs
Show inline comments
 
new file 100644
 
use crate::common::*;
 

	
 
/// Converts an iterator over contiguous u32 chunks into an iterator over usize
 
/// e.g. input [0b111000, 0b11] gives output [3, 4, 5, 32, 33]
 
/// observe that the bits per chunk are ordered from least to most significant bits, yielding smaller to larger usizes.
 
/// works by draining the inner u32 chunk iterator one u32 at a time, then draining that chunk until its 0.
 
struct BitChunkIter<I: Iterator<Item = u32>> {
 
    chunk_iter: I,
 
    next_bit_index: usize,
 
    cached: u32,
 
}
 

	
 
impl<I: Iterator<Item = u32>> BitChunkIter<I> {
 
    fn new(chunk_iter: I) -> Self {
 
        // first chunk is always a dummy zero, as if chunk_iter yielded Some(0).
 
        // Consequences:
 
        // 1. our next_bit_index is always off by 32 (we correct for it in Self::next) (no additional overhead)
 
        // 2. we cache u32 and not Option<u32>, because chunk_iter.next() is only called in Self::next.
 
        Self { chunk_iter, next_bit_index: 0, cached: 0 }
 
    }
 
}
 
impl<I: Iterator<Item = u32>> Iterator for BitChunkIter<I> {
 
    type Item = usize;
 
    fn next(&mut self) -> Option<Self::Item> {
 
        let mut chunk = self.cached;
 

	
 
        // loop until either:
 
        // 1. there are no more Items to return, or
 
        // 2. chunk encodes 1+ Items, one of which we will return.
 
        while chunk == 0 {
 
            // chunk is still empty! get the next one...
 
            chunk = self.chunk_iter.next()?;
 

	
 
            // ... and jump self.next_bit_index to the next multiple of 32.
 
            self.next_bit_index = (self.next_bit_index + 32) & !(32 - 1);
 
        }
 
        // assert(chunk > 0);
 

	
 
        // Shift the contents of chunk until the least significant bit is 1.
 
        // ... being sure to increment next_bit_index accordingly.
 
        #[inline(always)]
 
        fn skip_n_zeroes(chunk: &mut u32, n: usize, next_bit_index: &mut usize) {
 
            if *chunk & ((1 << n) - 1) == 0 {
 
                // n least significant bits are zero. skip n bits.
 
                *next_bit_index += n;
 
                *chunk >>= n;
 
            }
 
        }
 
        skip_n_zeroes(&mut chunk, 16, &mut self.next_bit_index);
 
        skip_n_zeroes(&mut chunk, 08, &mut self.next_bit_index);
 
        skip_n_zeroes(&mut chunk, 04, &mut self.next_bit_index);
 
        skip_n_zeroes(&mut chunk, 02, &mut self.next_bit_index);
 
        skip_n_zeroes(&mut chunk, 01, &mut self.next_bit_index);
 
        // least significant bit of chunk is 1.
 
        // assert(chunk & 1 == 1)
 

	
 
        // prepare our state for the next time Self::next is called.
 
        // Overwrite self.cached such that its shifted state is retained,
 
        // and jump over the bit whose index we are about to return.
 
        self.next_bit_index += 1;
 
        self.cached = chunk >> 1;
 

	
 
        // returned index is 32 smaller than self.next_bit_index because we use an
 
        // off-by-32 encoding to avoid having to cache an Option<u32>.
 
        Some(self.next_bit_index - 1 - 32)
 
    }
 
}
 

	
 
/*  --properties-->
 
     ___ ___ ___ ___
 
    |___|___|___|___|
 
  | |___|___|___|___|
 
  | |___|___|___|___|
 
  | |___|___|___|___|
 
  |
 
  V
 
 entity chunks (groups of 32)
 
*/
 
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
 
struct Pair {
 
    entity: u32,
 
    property: u32,
 
}
 
impl From<[u32; 2]> for Pair {
 
    fn from([entity, property]: [u32; 2]) -> Self {
 
        Pair { entity, property }
 
    }
 
}
 
struct BitMatrix {
 
    bounds: Pair,
 
    buffer: *mut u32,
 
}
 
impl Drop for BitMatrix {
 
    fn drop(&mut self) {
 
        let total_chunks = Self::row_chunks(self.bounds.property) as usize
 
            * Self::column_chunks(self.bounds.entity) as usize;
 
        let layout = Self::layout_for(total_chunks);
 
        unsafe {
 
            // ?
 
            std::alloc::dealloc(self.buffer as *mut u8, layout);
 
        }
 
    }
 
}
 
impl Debug for BitMatrix {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        let row_chunks = Self::row_chunks(self.bounds.property) as usize;
 
        let column_chunks = Self::column_chunks(self.bounds.entity) as usize;
 
        for property in 0..row_chunks {
 
            for entity_chunk in 0..column_chunks {
 
                write!(f, "|")?;
 
                let mut chunk = unsafe { *self.buffer.add(row_chunks * entity_chunk + property) };
 
                let end =
 
                    if entity_chunk + 1 == column_chunks { self.bounds.entity % 32 } else { 32 };
 
                for _ in 0..end {
 
                    let c = match chunk & 1 {
 
                        0 => '0',
 
                        _ => '1',
 
                    };
 
                    write!(f, "{}", c)?;
 
                    chunk >>= 1;
 
                }
 
            }
 
            write!(f, "|\n")?;
 
        }
 
        Ok(())
 
    }
 
}
 
impl BitMatrix {
 
    #[inline]
 
    fn ceiling_to_mul_32(value: u32) -> u32 {
 
        (value + 31) & !31
 
    }
 
    #[inline]
 
    fn row_of(entity: u32) -> u32 {
 
        entity / 32
 
    }
 
    #[inline]
 
    fn row_chunks(property_bound: u32) -> u32 {
 
        property_bound
 
    }
 
    #[inline]
 
    fn column_chunks(entity_bound: u32) -> u32 {
 
        Self::ceiling_to_mul_32(entity_bound) / 32
 
    }
 
    #[inline]
 
    fn offsets_unchecked(&self, at: Pair) -> [usize; 2] {
 
        let o_in = at.entity as usize % 32;
 
        let row = Self::row_of(at.entity);
 
        let row_chunks = self.bounds.property;
 
        let o_of = row as usize * row_chunks as usize + at.property as usize;
 
        [o_of, o_in]
 
    }
 
    // returns a u32 which has bits 000...000111...111
 
    // for the last JAGGED chunk given the column size
 
    // if the last chunk is not jagged (when entity_bound % 32 == 0)
 
    // None is returned,
 
    // otherwise Some(x) is returned such that x & chunk would mask out
 
    // the bits NOT in 0..entity_bound
 
    fn last_row_chunk_mask(entity_bound: u32) -> Option<u32> {
 
        let zero_prefix_len = entity_bound % 32;
 
        if zero_prefix_len == 0 {
 
            None
 
        } else {
 
            Some(!0u32 >> zero_prefix_len)
 
        }
 
    }
 
    fn assert_within_bounds(&self, at: Pair) {
 
        assert!(at.entity < self.bounds.entity);
 
        assert!(at.property < self.bounds.property);
 
    }
 
    /////////
 

	
 
    fn reshape(&mut self, dims: [usize; 2]) {
 
        todo!()
 
    }
 

	
 
    fn new(bounds: Pair) -> Self {
 
        let total_chunks = Self::row_chunks(bounds.property) as usize
 
            * Self::column_chunks(bounds.entity) as usize;
 
        let layout = Self::layout_for(total_chunks);
 
        let buffer;
 
        unsafe {
 
            buffer = std::alloc::alloc(layout) as *mut u32;
 
            buffer.write_bytes(0u8, total_chunks);
 
        };
 
        Self { buffer, bounds }
 
    }
 
    fn set(&mut self, at: Pair) {
 
        self.assert_within_bounds(at);
 
        let [o_of, o_in] = self.offsets_unchecked(at);
 
        unsafe { *self.buffer.add(o_of) |= 1 << o_in };
 
    }
 
    fn unset(&mut self, at: Pair) {
 
        self.assert_within_bounds(at);
 
        let [o_of, o_in] = self.offsets_unchecked(at);
 
        unsafe { *self.buffer.add(o_of) &= !(1 << o_in) };
 
    }
 
    fn test(&self, at: Pair) -> bool {
 
        self.assert_within_bounds(at);
 
        let [o_of, o_in] = self.offsets_unchecked(at);
 
        unsafe { (*self.buffer.add(o_of) & 1 << o_in) != 0 }
 
    }
 

	
 
    fn iter<'a, 'b>(
 
        &'a self,
 
        buf: &'b mut Vec<u32>,
 
        mut fold_fn: impl FnMut(&'b [u32]) -> u32,
 
    ) -> impl Iterator<Item = usize> + 'b {
 
        let buf_start = buf.len();
 
        let row_chunks = Self::row_chunks(self.bounds.property) as usize;
 
        let column_chunks = Self::column_chunks(self.bounds.entity);
 
        let mut ptr = self.buffer;
 
        for _row in 0..column_chunks {
 
            let slice;
 
            unsafe {
 
                slice = std::slice::from_raw_parts(ptr, row_chunks);
 
                ptr = ptr.add(row_chunks);
 
            }
 
            buf.push(fold_fn(slice));
 
        }
 
        if let Some(mask) = Self::last_row_chunk_mask(self.bounds.entity) {
 
            *buf.iter_mut().last().unwrap() &= mask;
 
        }
 
        BitChunkIter::new(buf.drain(buf_start..))
 
    }
 

	
 
    fn layout_for(total_chunks: usize) -> std::alloc::Layout {
 
        unsafe {
 
            // this layout is ALWAYS valid:
 
            // 1. size is always nonzero
 
            // 2. size is always a multiple of 4 and 4-aligned
 
            std::alloc::Layout::from_size_align_unchecked(4 * total_chunks.max(1), 4)
 
        }
 
    }
 
}
 

	
 
#[test]
 
fn matrix_test() {
 
    let mut m = BitMatrix::new(Pair { entity: 50, property: 3 });
 
    m.set([2, 0].into());
 
    m.set([40, 1].into());
 
    m.set([40, 2].into());
 
    m.set([40, 0].into());
 
    println!("{:?}", &m);
 

	
 
    let mut buf = vec![];
 
    for index in m.iter(&mut buf, move |slice| slice[0] ^ slice[1] ^ slice[2]) {
 
        println!("index {}", index);
 
    }
 
}
src/runtime/ecs.rs
Show inline comments
 
@@ -685,208 +685,222 @@ impl FlagMatrix {
 
            let old_min_u32_per_row = ceiling_to_mul_32(new_dims[1]) / 32;
 
            let new_min_u32_per_row = ceiling_to_mul_32(self.dims[1]) / 32;
 
            let common_rows = self.dims[0].min(new_dims[0]);
 
            if old_min_u32_per_row < new_min_u32_per_row {
 
                // zero chunks made entirely of removed columns
 
                for row in 0..common_rows {
 
                    unsafe {
 
                        self.bytes
 
                            .add(self.offset_of_chunk_unchecked([row, old_min_u32_per_row]))
 
                            .write_bytes(0u8, new_min_u32_per_row - old_min_u32_per_row);
 
                    }
 
                }
 
            }
 
            if new_last_chunk_zero_prefix > 0 {
 
                // wipe out new_last_chunk_zero_prefix-most significant bits of all new last column chunks
 
                let mask: u32 = !0u32 >> new_last_chunk_zero_prefix;
 
                for row in 0..common_rows {
 
                    let o_of = self.offset_of_chunk_unchecked([row, new_min_u32_per_row - 1]);
 
                    unsafe { *self.bytes.add(o_of) &= mask };
 
                }
 
            }
 
        }
 

	
 
        // 4. if we won't do a new allocation, zero any bit no longer in rows
 
        if new_dims[0] < self.dims[0] && new_u32s_total.is_none() {
 
            // zero all bytes from beginning of first removed row,
 
            // to end of last removed row
 
            unsafe {
 
                self.bytes
 
                    .add(self.offset_of_chunk_unchecked([new_dims[0], 0]))
 
                    .write_bytes(0u8, self.u32s_per_row * (self.dims[0] - new_dims[0]));
 
            }
 
        }
 

	
 
        dbg!(new_u32s_per_row, new_u32s_total);
 
        match [new_u32s_per_row, new_u32s_total] {
 
            [None, None] => { /* do nothing */ }
 
            [None, Some(new_u32s_total)] => {
 
                // realloc only! column alignment is still OK
 
                // assert!(new_u32s_total > self.u32s_total);
 
                let old_layout = Self::layout_for(self.u32s_total);
 
                let new_layout = Self::layout_for(new_u32s_total);
 
                let new_bytes = unsafe {
 
                    let new_bytes = std::alloc::alloc(new_layout) as *mut u32;
 
                    // copy the previous total
 
                    self.bytes.copy_to_nonoverlapping(new_bytes, self.u32s_total);
 
                    // and zero the remainder
 
                    new_bytes
 
                        .add(self.u32s_total)
 
                        .write_bytes(0u8, new_u32s_total - self.u32s_total);
 
                    // drop the previous buffer
 
                    std::alloc::dealloc(self.bytes as *mut u8, old_layout);
 
                    new_bytes
 
                };
 
                self.bytes = new_bytes;
 
                println!("AFTER {:?}", self.bytes);
 
                self.u32s_total = new_u32s_total;
 
            }
 
            [Some(new_u32s_per_row), None] => {
 
                // shift only!
 
                // assert!(new_u32s_per_row > self.u32s_per_row);
 
                for r in (0..self.dims[0]).rev() {
 
                    // iterate in REVERSE order because new row[n] may overwrite old row[n+m]
 
                    unsafe {
 
                        let src = self.bytes.add(r * self.u32s_per_row);
 
                        let dest = self.bytes.add(r * new_u32s_per_row);
 
                        // copy the used prefix
 
                        src.copy_to(dest, self.u32s_per_row);
 
                        // and zero the remainder
 
                        dest.add(self.u32s_per_row)
 
                            .write_bytes(0u8, new_u32s_per_row - self.u32s_per_row);
 
                    }
 
                }
 
                self.u32s_per_row = new_u32s_per_row;
 
            }
 
            [Some(new_u32s_per_row), Some(new_u32s_total)] => {
 
                // alloc AND shift!
 
                // assert!(new_u32s_total > self.u32s_total);
 
                // assert!(new_u32s_per_row > self.u32s_per_row);
 
                let old_layout = Self::layout_for(self.u32s_total);
 
                let new_layout = Self::layout_for(new_u32s_total);
 
                let new_bytes = unsafe { std::alloc::alloc(new_layout) as *mut u32 };
 
                for r in 0..self.dims[0] {
 
                    // iterate forwards over rows!
 
                    unsafe {
 
                        let src = self.bytes.add(r * self.u32s_per_row);
 
                        let dest = new_bytes.add(r * new_u32s_per_row);
 
                        // copy the used prefix
 
                        src.copy_to_nonoverlapping(dest, self.u32s_per_row);
 
                        // and zero the remainder
 
                        dest.add(self.u32s_per_row)
 
                            .write_bytes(0u8, new_u32s_per_row - self.u32s_per_row);
 
                    }
 
                }
 
                let fresh_rows_at = self.dims[0] * new_u32s_per_row;
 
                unsafe {
 
                    new_bytes.add(fresh_rows_at).write_bytes(0u8, new_u32s_total - fresh_rows_at);
 
                }
 
                unsafe { std::alloc::dealloc(self.bytes as *mut u8, old_layout) };
 
                self.u32s_per_row = new_u32s_per_row;
 
                self.bytes = new_bytes;
 
                self.u32s_total = new_u32s_total;
 
            }
 
        }
 
        self.dims = new_dims;
 
    }
 

	
 
    fn layout_for(u32s_total: usize) -> std::alloc::Layout {
 
        unsafe {
 
            // this layout is ALWAYS valid:
 
            // 1. size is always nonzero
 
            // 2. size is always a multiple of 4 and 4-aligned
 
            std::alloc::Layout::from_size_align_unchecked(4 * u32s_total.max(1), 4)
 
        }
 
    }
 
    fn new(dims: [usize; 2], extra_dim_space: [usize; 2]) -> Self {
 
        let u32s_per_row = ceiling_to_mul_32(dims[1] + extra_dim_space[1]) / 32;
 
        let u32s_total = u32s_per_row * (dims[0] + extra_dim_space[0]);
 
        let layout = Self::layout_for(u32s_total);
 
        let bytes = unsafe {
 
            // allocate
 
            let bytes = std::alloc::alloc(layout) as *mut u32;
 
            // and zero
 
            bytes.write_bytes(0u8, u32s_total);
 
            bytes
 
        };
 
        Self { bytes, u32s_total, u32s_per_row, dims }
 
    }
 
    fn assert_within_bounds(&self, at: [usize; 2]) {
 
        assert!(at[0] < self.dims[0]);
 
        assert!(at[1] < self.dims[1]);
 
    }
 
    #[inline(always)]
 
    fn offset_of_chunk_unchecked(&self, at: [usize; 2]) -> usize {
 
        (self.u32s_per_row * at[0]) + at[1] / 32
 
    }
 
    #[inline(always)]
 
    fn offsets_unchecked(&self, at: [usize; 2]) -> [usize; 2] {
 
        let of_chunk = self.offset_of_chunk_unchecked(at);
 
        let in_chunk = at[1] % 32;
 
        [of_chunk, in_chunk]
 
    }
 
    fn set(&mut self, at: [usize; 2]) {
 
        self.assert_within_bounds(at);
 
        let [o_of, o_in] = self.offsets_unchecked(at);
 
        unsafe { *self.bytes.add(o_of) |= 1 << o_in };
 
    }
 
    fn unset(&mut self, at: [usize; 2]) {
 
        self.assert_within_bounds(at);
 
        let [o_of, o_in] = self.offsets_unchecked(at);
 
        unsafe { *self.bytes.add(o_of) &= !(1 << o_in) };
 
    }
 
    fn test(&self, at: [usize; 2]) -> bool {
 
        self.assert_within_bounds(at);
 
        let [o_of, o_in] = self.offsets_unchecked(at);
 
        unsafe { *self.bytes.add(o_of) & (1 << o_in) != 0 }
 
    }
 
    unsafe fn copy_chunk_unchecked(&self, row: usize, col_chunk_index: usize) -> u32 {
 
        let o_of = (self.u32s_per_row * row) + col_chunk_index;
 
        *self.bytes.add(o_of)
 
    }
 

	
 
    /// return an efficient interator over column indices c in the range 0..self.dims[1]
 
    /// where self.test([t_row, c]) && f_rows.iter().all(|&f_row| !self.test([f_row, c]))
 
    fn col_iter_t1fn<'a, 'b: 'a>(
 
        &'a self,
 
        t_row: usize,
 
        f_rows: &'b [usize],
 
    ) -> impl Iterator<Item = usize> + 'a {
 
        // 1. ensure all ROWS indices are in range.
 
        assert!(t_row < self.dims[0]);
 
        for &f_row in f_rows.iter() {
 
            assert!(f_row < self.dims[0]);
 
        }
 

	
 
        // 2. construct an unsafe iterator over chunks
 
        // column_chunk_range ensures all col_chunk_index values are in range.
 
        let column_chunk_range = 0..ceiling_to_mul_32(self.dims[1]) / 32;
 
        let chunk_iter = column_chunk_range.map(move |col_chunk_index| {
 
            // SAFETY: all rows and columns have already been bounds-checked.
 
            let t_chunk = unsafe { self.copy_chunk_unchecked(t_row, col_chunk_index) };
 
            f_rows.iter().fold(t_chunk, |chunk, &f_row| {
 
                let f_chunk = unsafe { self.copy_chunk_unchecked(f_row, col_chunk_index) };
 
                chunk & !f_chunk
 
            })
 
        });
 

	
 
        // 3. yield columns indices from the chunk iterator
 
        BitChunkIter::new(chunk_iter)
 
    }
 
}
 

	
 
// trait RwMatrixBits {
 
//     fn set(&mut self, at: [usize;2]);
 
//     fn unset(&mut self, at: [usize;2]);
 
//     fn set_entire_row(&mut self, row: usize);
 
//     fn unset_entire_row(&mut self, row: usize);
 
// }
 

	
 
// struct MatrixRefW<'a> {
 
//     _inner: usize,
 
// }
 
// impl<'a> MatrixRefW<'a> {
 

	
 
// }
 

	
 
#[test]
 
fn matrix() {
 
    let mut m = FlagMatrix::new([6, 6], [0, 0]);
 
    for i in 0..5 {
 
        m.set([0, i]);
 
        m.set([i, i]);
 
    }
 
    m.set_entire_row(5);
 
    println!("{:?}", &m);
 
    m.reshape([6, 40]);
 
    let iter = m.col_iter_t1fn(0, &[1, 2, 3]);
 
    for c in iter {
 
        println!("{:?}", c);
 
    }
 
    println!("{:?}", &m);
 
}
src/runtime/mod.rs
Show inline comments
 
#[cfg(feature = "ffi")]
 
pub mod ffi;
 

	
 
// EXPERIMENTAL:
 
// mod predicate; // TODO later
 
// mod ecs;
 
mod bits;
 

	
 
mod actors;
 
pub(crate) mod communication;
 
pub(crate) mod connector;
 
pub(crate) mod endpoint;
 
pub mod errors;
 
// mod predicate; // TODO later
 
mod ecs;
 
mod serde;
 
pub(crate) mod setup;
 

	
 
pub(crate) type ProtocolD = crate::protocol::ProtocolDescriptionImpl;
 
pub(crate) type ProtocolS = crate::protocol::ComponentStateImpl;
 

	
 
use crate::common::*;
 
use actors::*;
 
use endpoint::*;
 
use errors::*;
 

	
 
#[derive(Debug, PartialEq)]
 
pub(crate) enum CommonSatResult {
 
    FormerNotLatter,
 
    LatterNotFormer,
 
    Equivalent,
 
    New(Predicate),
 
    Nonexistant,
 
}
 

	
 
#[derive(Clone, Eq, PartialEq, Hash)]
 
pub(crate) struct Predicate {
 
    pub assigned: BTreeMap<ChannelId, bool>,
 
}
 

	
 
#[derive(Debug, Default)]
 
struct SyncBatch {
 
    puts: HashMap<Key, Payload>,
 
    gets: HashSet<Key>,
 
}
 

	
 
#[derive(Debug)]
 
pub enum Connector {
 
    Unconfigured(Unconfigured),
 
    Configured(Configured),
 
    Connected(Connected), // TODO consider boxing. currently takes up a lot of stack real estate
 
}
 
#[derive(Debug)]
 
pub struct Unconfigured {
 
    pub controller_id: ControllerId,
 
}
 
#[derive(Debug)]
 
pub struct Configured {
 
    controller_id: ControllerId,
 
    polarities: Vec<Polarity>,
 
    bindings: HashMap<usize, PortBinding>,
 
    protocol_description: Arc<ProtocolD>,
 
    main_component: Vec<u8>,
 
}
 
#[derive(Debug)]
 
pub struct Connected {
 
    native_interface: Vec<(Key, Polarity)>,
 
    sync_batches: Vec<SyncBatch>,
 
    controller: Controller,
 
}
 

	
 
#[derive(Debug, Copy, Clone)]
 
pub enum PortBinding {
 
    Native,
 
    Active(SocketAddr),
 
    Passive(SocketAddr),
 
}
 

	
 
#[derive(Debug)]
 
struct Arena<T> {
 
    storage: Vec<T>,
 
}
 

	
 
#[derive(Debug)]
 
struct ReceivedMsg {
 
    recipient: Key,
 
    msg: Msg,
 
}
 

	
 
#[derive(Debug)]
 
struct MessengerState {
 
    poll: Poll,
 
    events: Events,
 
    delayed: Vec<ReceivedMsg>,
 
    undelayed: Vec<ReceivedMsg>,
 
    polled_undrained: IndexSet<Key>,
 
}
 
#[derive(Debug)]
 
struct ChannelIdStream {
 
    controller_id: ControllerId,
 
    next_channel_index: ChannelIndex,
 
}
 

	
 
#[derive(Debug)]
 
struct Controller {
 
    protocol_description: Arc<ProtocolD>,
 
    inner: ControllerInner,
 
    ephemeral: ControllerEphemeral,
 
}
 
#[derive(Debug)]
 
struct ControllerInner {
 
    round_index: usize,
 
    channel_id_stream: ChannelIdStream,
 
    endpoint_exts: Arena<EndpointExt>,
 
    messenger_state: MessengerState,
 
    mono_n: Option<MonoN>,
 
    mono_ps: Vec<MonoP>,
 
    family: ControllerFamily,
 
    logger: String,
 
}
 

	
 
/// This structure has its state entirely reset between synchronous rounds
 
#[derive(Debug, Default)]
 
struct ControllerEphemeral {
 
    solution_storage: SolutionStorage,
 
    poly_n: Option<PolyN>,
 
    poly_ps: Vec<PolyP>,
 
    ekey_to_holder: HashMap<Key, PolyId>,
 
}
 

	
 
#[derive(Debug)]
 
struct ControllerFamily {
 
    parent_ekey: Option<Key>,
 
    children_ekeys: Vec<Key>,
 
}
 

	
 
#[derive(Debug)]
 
pub(crate) enum SyncRunResult {
 
    BlockingForRecv,
 
    AllBranchesComplete,
 
    NoBranches,
 
}
 

	
 
// Used to identify poly actors
 
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
 
enum PolyId {
 
    N,
 
    P { index: usize },
 
}
 

	
 
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
 
pub(crate) enum SubtreeId {
 
    PolyN,
 
    PolyP { index: usize },
 
    ChildController { ekey: Key },
 
}
 

	
 
pub(crate) struct MonoPContext<'a> {
 
    inner: &'a mut ControllerInner,
 
    ekeys: &'a mut HashSet<Key>,
 
}
 
pub(crate) struct PolyPContext<'a> {
 
    my_subtree_id: SubtreeId,
 
    inner: &'a mut ControllerInner,
 
    solution_storage: &'a mut SolutionStorage,
 
}
 
impl PolyPContext<'_> {
 
    #[inline(always)]
 
    fn reborrow<'a>(&'a mut self) -> PolyPContext<'a> {
 
        let Self { solution_storage, my_subtree_id, inner } = self;
 
        PolyPContext { solution_storage, my_subtree_id: *my_subtree_id, inner }
 
    }
 
}
 
struct BranchPContext<'m, 'r> {
 
    m_ctx: PolyPContext<'m>,
 
    ekeys: &'r HashSet<Key>,
 
    predicate: &'r Predicate,
 
    inbox: &'r HashMap<Key, Payload>,
 
}
 

	
 
#[derive(Default)]
 
pub(crate) struct SolutionStorage {
 
    old_local: HashSet<Predicate>,
 
    new_local: HashSet<Predicate>,
 
    // this pair acts as SubtreeId -> HashSet<Predicate> which is friendlier to iteration
 
    subtree_solutions: Vec<HashSet<Predicate>>,
 
    subtree_id_to_index: HashMap<SubtreeId, usize>,
 
}
 

	
 
trait Messengerlike {
 
    fn get_state_mut(&mut self) -> &mut MessengerState;
 
    fn get_endpoint_mut(&mut self, eekey: Key) -> &mut Endpoint;
 

	
 
    fn delay(&mut self, received: ReceivedMsg) {
 
        self.get_state_mut().delayed.push(received);
 
    }
 
    fn undelay_all(&mut self) {
 
        let MessengerState { delayed, undelayed, .. } = self.get_state_mut();
 
        undelayed.extend(delayed.drain(..))
 
    }
 

	
 
    fn send(&mut self, to: Key, msg: Msg) -> Result<(), EndpointErr> {
 
        self.get_endpoint_mut(to).send(msg)
 
    }
 

	
 
    // attempt to receive a message from one of the endpoints before the deadline
 
    fn recv(&mut self, deadline: Instant) -> Result<Option<ReceivedMsg>, MessengerRecvErr> {
0 comments (0 inline, 0 general)