Changeset - 6471206c5c59
[Not reviewed]
3 2 5
Christopher Esterhuyse - 5 years ago 2020-02-19 16:26:12
christopheresterhuyse@gmail.com
more
7 files changed with 240 insertions and 69 deletions:
0 comments (0 inline, 0 general)
src/runtime/experimental/api.rs
Show inline comments
 
new file 100644
 
use crate::common::*;
 
use crate::runtime::endpoint::Endpoint;
 
use crate::runtime::endpoint::EndpointExt;
 
use crate::runtime::endpoint::EndpointInfo;
 

	
 
use std::net::SocketAddr;
 
use std::sync::Arc;
 

	
 
pub enum Coupling {
 
    Active,
 
    Passive,
 
}
 
pub struct Binding {
 
    pub coupling: Coupling,
 
    pub polarity: Polarity,
 
    pub addr: SocketAddr,
 
}
 
impl From<(Coupling, Polarity, SocketAddr)> for Binding {
 
    fn from((coupling, polarity, addr): (Coupling, Polarity, SocketAddr)) -> Self {
 
        Self { coupling, polarity, addr }
 
    }
 
}
 

	
 
pub struct MsgBuffer<'a> {
 
    slice: &'a mut [u8],
 
    len: usize,
 
}
 
impl MsgBuffer<'_> {
 
    pub fn clear(&mut self) {
 
        self.len = 0;
 
    }
 
    pub fn write_msg(&mut self, r: &[u8]) -> std::io::Result<()> {
 
        use std::io::Write;
 
        self.slice.write_all(r)?;
 
        self.len = r.len();
 
        Ok(())
 
    }
 
    pub fn read_msg(&self) -> &[u8] {
 
        &self.slice[0..self.len]
 
    }
 
}
 
impl<'a> From<&'a mut [u8]> for MsgBuffer<'a> {
 
    fn from(slice: &'a mut [u8]) -> Self {
 
        Self { slice, len: 0 }
 
    }
 
}
 

	
 
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
 
pub struct Port(pub u32);
 
pub struct PortOp<'a> {
 
    pub port: Port,
 
    pub msg: Option<&'a [u8]>,
 
}
 

	
 
#[derive(Default)]
 
struct ChannelIndexStream {
 
    next: u32,
 
}
 
impl ChannelIndexStream {
 
    fn next(&mut self) -> u32 {
 
        self.next += 1;
 
        self.next - 1
 
    }
 
}
 

	
 
enum Connector {
 
    Connecting(Connecting),
 
    Connected(Connected),
 
}
 

	
 
#[derive(Default)]
 
pub struct Connecting {
 
    bindings: Vec<Binding>, // invariant: no more than std::u32::MAX entries
 
}
 
impl Connecting {
 
    pub fn bind(&mut self, binding: Binding) -> Port {
 
        self.bindings.push(binding);
 
        // preserve invariant
 
        let pid: u32 = (self.bindings.len() - 1).try_into().expect("Port ID overflow!");
 
        Port(pid)
 
    }
 
    pub fn connect(&mut self, timeout: Option<Duration>) -> Result<Connected, ()> {
 
        let controller_id = 42;
 
        let channel_index_stream = ChannelIndexStream::default();
 
        // drain self if successful
 
        todo!()
 
    }
 
}
 
pub struct Protocol;
 
impl Protocol {
 
    pub fn parse(_pdl_text: &[u8]) -> Result<Self, ()> {
 
        todo!()
 
    }
 
}
 
struct ComponentExt {
 
    protocol: Arc<Protocol>,
 
    ports: HashSet<Port>,
 
    name: Vec<u8>,
 
}
 
pub struct Connected {
 
    native_ports: HashSet<Port>,
 
    controller_id: ControllerId,
 
    channel_index_stream: ChannelIndexStream,
 
    endpoint_exts: Vec<EndpointExt>, // invaraint
 
    components: Vec<ComponentExt>,
 
}
 
impl Connected {
 
    pub fn new_channel(&mut self) -> [Port; 2] {
 
        assert!(self.endpoint_exts.len() <= std::u32::MAX as usize - 2);
 
        let ports =
 
            [Port(self.endpoint_exts.len() as u32 - 1), Port(self.endpoint_exts.len() as u32)];
 
        let channel_id = ChannelId {
 
            controller_id: self.controller_id,
 
            channel_index: self.channel_index_stream.next(),
 
        };
 
        let [e0, e1] = Endpoint::new_memory_pair();
 
        self.endpoint_exts.push(EndpointExt {
 
            info: EndpointInfo { channel_id, polarity: Putter },
 
            endpoint: e0,
 
        });
 
        self.endpoint_exts.push(EndpointExt {
 
            info: EndpointInfo { channel_id, polarity: Getter },
 
            endpoint: e1,
 
        });
 
        ports
 
    }
 
    pub fn new_component(
 
        &mut self,
 
        protocol: &Arc<Protocol>,
 
        name: Vec<u8>,
 
        moved_ports: &[Port],
 
    ) -> Result<(), ()> {
 
        let moved_ports = moved_ports.iter().copied().collect();
 
        if !self.native_ports.is_superset(&moved_ports) {
 
            return Err(());
 
        }
 
        self.native_ports.retain(|e| !moved_ports.contains(e));
 
        self.components.push(ComponentExt { ports: moved_ports, protocol: protocol.clone(), name });
 
        // TODO add a singleton machine
 
        Ok(())
 
    }
 
    pub fn sync_set(&mut self, ops: &mut [PortOp]) {
 
        todo!()
 
    }
 
    pub fn sync_subsets(
 
        &mut self,
 
        _ops: &mut [PortOp],
 
        bit_subsets: &[&[usize]],
 
    ) -> Result<usize, ()> {
 
        for &bit_subset in bit_subsets {
 
            use super::bits::BitChunkIter;
 
            BitChunkIter::new(bit_subset.iter().copied());
 
        }
 
        todo!()
 
    }
 
}
 

	
 
#[test]
 
fn test() {
 
    let mut c = Connecting::default();
 
    let p0 = c.bind(Binding {
 
        coupling: Coupling::Active,
 
        polarity: Putter,
 
        addr: "127.0.0.1:8000".parse().unwrap(),
 
    });
 
    let p1 = c.bind(Binding {
 
        coupling: Coupling::Passive,
 
        polarity: Putter,
 
        addr: "127.0.0.1:8001".parse().unwrap(),
 
    });
 

	
 
    let proto_0 = Arc::new(Protocol::parse(b"").unwrap());
 
    let mut c = c.connect(None).unwrap();
 
    let [p2, p3] = c.new_channel();
 
    c.new_component(&proto_0, b"sync".to_vec(), &[p0, p2]).unwrap();
 
    let mut ops = [
 
        //
 
        PortOp { port: p1, msg: Some(b"hi!") },
 
        PortOp { port: p1, msg: Some(b"ahoy!") },
 
        PortOp { port: p1, msg: Some(b"hello!") },
 
    ];
 
    c.sync_subsets(&mut ops, &[&[0b001], &[0b010], &[0b100]]).unwrap();
 
}
src/runtime/experimental/bits.rs
Show inline comments
 
file renamed from src/runtime/bits.rs to src/runtime/experimental/bits.rs
 
use crate::common::*;
 

	
 
/// Given an iterator over BitChunk Items, iterates over the indices (each represented as a u32) for which the bit is SET,
 
/// treating the bits in the BitChunk as a contiguous array.
 
/// e.g. input [0b111000, 0b11] gives output [3, 4, 5, 32, 33].
 
/// observe that the bits per chunk are ordered from least to most significant bits, yielding smaller to larger usizes.
 
/// assumes chunk_iter will yield no more than std::u32::MAX / 32 chunks
 
struct BitChunkIter<I: Iterator<Item = BitChunk>> {
 

	
 
const fn usize_bytes() -> usize {
 
    std::mem::size_of::<usize>()
 
}
 
const fn usize_bits() -> usize {
 
    usize_bytes() * 8
 
}
 

	
 
pub(crate) struct BitChunkIter<I: Iterator<Item = usize>> {
 
    cached: usize,
 
    chunk_iter: I,
 
    next_bit_index: u32,
 
    cached: BitChunk,
 
}
 

	
 
impl<I: Iterator<Item = BitChunk>> BitChunkIter<I> {
 
    fn new(chunk_iter: I) -> Self {
 
impl<I: Iterator<Item = usize>> BitChunkIter<I> {
 
    pub fn new(chunk_iter: I) -> Self {
 
        // first chunk is always a dummy zero, as if chunk_iter yielded Some(FALSE).
 
        // Consequences:
 
        // 1. our next_bit_index is always off by BitChunk::bits() (we correct for it in Self::next) (no additional overhead)
 
        // 2. we cache BitChunk and not Option<BitChunk>, because chunk_iter.next() is only called in Self::next.
 
        Self { chunk_iter, next_bit_index: 0, cached: BitChunk(0) }
 
        // 1. our next_bit_index is always off by usize_bits() (we correct for it in Self::next) (no additional overhead)
 
        // 2. we cache usize and not Option<usize>, because chunk_iter.next() is only called in Self::next.
 
        Self { chunk_iter, next_bit_index: 0, cached: 0 }
 
    }
 
}
 
impl<I: Iterator<Item = BitChunk>> Iterator for BitChunkIter<I> {
 
impl<I: Iterator<Item = usize>> Iterator for BitChunkIter<I> {
 
    type Item = u32;
 
    fn next(&mut self) -> Option<Self::Item> {
 
        let mut chunk = self.cached;
 

	
 
        // loop until either:
 
        // 1. there are no more Items to return, or
 
        // 2. chunk encodes 1+ Items, one of which we will return.
 
        while !chunk.any() {
 
        while chunk == 0 {
 
            // chunk has no bits set! get the next one...
 
            chunk = self.chunk_iter.next()?;
 

	
 
            // ... and jump self.next_bit_index to the next multiple of BitChunk::chunk_bits().
 
            // ... and jump self.next_bit_index to the next multiple of usize_bits().
 
            self.next_bit_index =
 
                (self.next_bit_index + BitChunk::bits() as u32) & !(BitChunk::bits() as u32 - 1);
 
                (self.next_bit_index + usize_bits() as u32) & !(usize_bits() as u32 - 1);
 
        }
 
        // there exists 1+ set bits in chunk
 
        // assert(chunk > 0);
 

	
 
        // Until the least significant bit of chunk is 1:
 
        // 1. shift chunk to the right,
 
        // 2. and increment self.next_bit_index accordingly
 
        // effectively performs a little binary search, shifting 32, then 16, ...
 
        // TODO perhaps there is a more efficient SIMD op for this?
 
        const N_INIT: u32 = BitChunk::bits() as u32 / 2;
 
        const N_INIT: u32 = usize_bits() as u32 / 2;
 
        let mut n = N_INIT;
 
        while n >= 1 {
 
            // n is [32,16,8,4,2,1] on 64-bit machine
 
            // this loop is unrolled with release optimizations
 
            let n_least_significant_mask = (1 << n) - 1;
 
            if chunk.0 & n_least_significant_mask == 0 {
 
            if chunk & n_least_significant_mask == 0 {
 
                // no 1 set within 0..n least significant bits.
 
                self.next_bit_index += n;
 
                chunk.0 >>= n;
 
                chunk >>= n;
 
            }
 
            n /= 2;
 
        }
 
        // least significant bit of chunk is 1. Item to return is known.
 
        // assert(chunk & 1 == 1)
 

	
 
        // prepare our state for the next time Self::next is called.
 
        // Overwrite self.cached such that its shifted state is retained,
 
        // and jump over the bit whose index we are about to return.
 
        self.next_bit_index += 1;
 
        self.cached = BitChunk(chunk.0 >> 1);
 
        self.cached = chunk >> 1;
 

	
 
        // returned index is BitChunk::bits() smaller than self.next_bit_index because we use an
 
        // off-by-BitChunk::bits() encoding to avoid having to cache an Option<BitChunk>.
 
        Some(self.next_bit_index - 1 - BitChunk::bits() as u32)
 
        // returned index is usize_bits() smaller than self.next_bit_index because we use an
 
        // off-by-usize_bits() encoding to avoid having to cache an Option<usize>.
 
        Some(self.next_bit_index - 1 - usize_bits() as u32)
 
    }
 
}
 

	
 
/*  --properties-->
 
     ___ ___ ___ ___
 
    |___|___|___|___|
 
  | |___|___|___|___|
 
  | |___|___|___|___|
 
  | |___|___|___|___|
 
  |
 
  V
 
 entity chunks (groups of BitChunk::bits())
 
 entity chunks (groups of size usize_bits())
 
*/
 

	
 
// TODO newtypes Entity and Property
 

	
 
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
 
struct Pair {
 
    entity: u32,
 
    property: u32,
 
}
 
impl From<[u32; 2]> for Pair {
 
    fn from([entity, property]: [u32; 2]) -> Self {
 
        Pair { entity, property }
 
    }
 
}
 
struct BitMatrix {
 
    bounds: Pair,
 
    buffer: *mut BitChunk,
 
    buffer: *mut usize,
 
}
 
impl Drop for BitMatrix {
 
    fn drop(&mut self) {
 
        let total_chunks = Self::row_chunks(self.bounds.property as usize)
 
            * Self::column_chunks(self.bounds.entity as usize);
 
        let layout = Self::layout_for(total_chunks);
 
        unsafe {
 
            // ?
 
            std::alloc::dealloc(self.buffer as *mut u8, layout);
 
        }
 
    }
 
}
 
impl Debug for BitMatrix {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        let row_chunks = Self::row_chunks(self.bounds.property as usize);
 
        let column_chunks = Self::column_chunks(self.bounds.entity as usize);
 
        for property in 0..row_chunks {
 
            for entity_chunk in 0..column_chunks {
 
                write!(f, "|")?;
 
                let mut chunk = unsafe { *self.buffer.add(row_chunks * entity_chunk + property) };
 
                let end = if entity_chunk + 1 == column_chunks {
 
                    self.bounds.entity % BitChunk::bits() as u32
 
                    self.bounds.entity % usize_bits() as u32
 
                } else {
 
                    BitChunk::bits() as u32
 
                    usize_bits() as u32
 
                };
 
                for _ in 0..end {
 
                    let c = match chunk.0 & 1 {
 
                    let c = match chunk & 1 {
 
                        0 => '0',
 
                        _ => '1',
 
                    };
 
                    write!(f, "{}", c)?;
 
                    chunk.0 >>= 1;
 
                    chunk >>= 1;
 
                }
 
            }
 
            write!(f, "|\n")?;
 
        }
 
        Ok(())
 
    }
 
}
 
impl BitMatrix {
 
    #[inline]
 
    const fn chunk_len_ceil(value: usize) -> usize {
 
        (value + BitChunk::bits() - 1) & !(BitChunk::bits() - 1)
 
        (value + usize_bits() - 1) & !(usize_bits() - 1)
 
    }
 
    #[inline]
 
    const fn row_of(entity: usize) -> usize {
 
        entity / BitChunk::bits()
 
        entity / usize_bits()
 
    }
 
    #[inline]
 
    const fn row_chunks(property_bound: usize) -> usize {
 
        property_bound
 
    }
 
    #[inline]
 
    const fn column_chunks(entity_bound: usize) -> usize {
 
        Self::chunk_len_ceil(entity_bound) / BitChunk::bits()
 
        Self::chunk_len_ceil(entity_bound) / usize_bits()
 
    }
 
    #[inline]
 
    fn offsets_unchecked(&self, at: Pair) -> [usize; 2] {
 
        let o_in = at.entity as usize % BitChunk::bits();
 
        let o_in = at.entity as usize % usize_bits();
 
        let row = Self::row_of(at.entity as usize);
 
        let row_chunks = self.bounds.property as usize;
 
        let o_of = row * row_chunks + at.property as usize;
 
        [o_of, o_in]
 
    }
 
    // returns a u32 which has bits 000...000111...111
 
    // for the last JAGGED chunk given the column size
 
    // if the last chunk is not jagged (when entity_bound % 32 == 0)
 
    // None is returned,
 
    // otherwise Some(x) is returned such that x & chunk would mask out
 
    // the bits NOT in 0..entity_bound
 
    fn last_row_chunk_mask(entity_bound: u32) -> Option<BitChunk> {
 
        let zero_prefix_len = entity_bound as usize % BitChunk::bits();
 
    fn last_row_chunk_mask(entity_bound: u32) -> Option<usize> {
 
        let zero_prefix_len = entity_bound as usize % usize_bits();
 
        if zero_prefix_len == 0 {
 
            None
 
        } else {
 
            Some(BitChunk(!0 >> (BitChunk::bits() - zero_prefix_len)))
 
            Some(!0 >> (usize_bits() - zero_prefix_len))
 
        }
 
    }
 
    fn assert_within_bounds(&self, at: Pair) {
 
        assert!(at.entity < self.bounds.entity);
 
        assert!(at.property < self.bounds.property);
 
    }
 

	
 
    fn layout_for(mut total_chunks: usize) -> std::alloc::Layout {
 
        unsafe {
 
            // this layout is ALWAYS valid:
 
            // 1. size is always nonzero
 
            // 2. size is always a multiple of 4 and 4-aligned
 
            if total_chunks == 0 {
 
                total_chunks = 1;
 
            }
 
            std::alloc::Layout::from_size_align_unchecked(
 
                BitChunk::bytes() * total_chunks,
 
                BitChunk::bytes(),
 
                usize_bytes() * total_chunks,
 
                usize_bytes(),
 
            )
 
        }
 
    }
 
    /////////
 

	
 
    fn reshape(&mut self, bounds: Pair) {
 
        todo!()
 
    }
 

	
 
    fn new(bounds: Pair) -> Self {
 
        let total_chunks = Self::row_chunks(bounds.property as usize)
 
            * Self::column_chunks(bounds.entity as usize);
 
        let layout = Self::layout_for(total_chunks);
 
        let buffer;
 
        unsafe {
 
            buffer = std::alloc::alloc(layout) as *mut BitChunk;
 
            buffer = std::alloc::alloc(layout) as *mut usize;
 
            buffer.write_bytes(0u8, total_chunks);
 
        };
 
        Self { buffer, bounds }
 
    }
 
    fn set(&mut self, at: Pair) {
 
        self.assert_within_bounds(at);
 
        let [o_of, o_in] = self.offsets_unchecked(at);
 
        unsafe { *self.buffer.add(o_of) |= BitChunk(1 << o_in) };
 
        unsafe { *self.buffer.add(o_of) |= 1 << o_in };
 
    }
 
    fn unset(&mut self, at: Pair) {
 
        self.assert_within_bounds(at);
 
        let [o_of, o_in] = self.offsets_unchecked(at);
 
        unsafe { *self.buffer.add(o_of) &= !BitChunk(1 << o_in) };
 
        unsafe { *self.buffer.add(o_of) &= !(1 << o_in) };
 
    }
 
    fn test(&self, at: Pair) -> bool {
 
        self.assert_within_bounds(at);
 
        let [o_of, o_in] = self.offsets_unchecked(at);
 
        unsafe { (*self.buffer.add(o_of) & BitChunk(1 << o_in)).any() }
 
        unsafe { *self.buffer.add(o_of) & 1 << o_in != 0 }
 
    }
 

	
 
    fn batch_mut<'a, 'b>(&mut self, mut chunk_mut_fn: impl FnMut(&'b mut [BitChunk])) {
 
        let row_chunks = Self::row_chunks(self.bounds.property as usize);
 
        let column_chunks = Self::column_chunks(self.bounds.entity as usize);
 
        let mut ptr = self.buffer;
 
        for _row in 0..column_chunks {
 
            let slice;
 
            unsafe {
 
                let slicey = std::slice::from_raw_parts_mut(ptr, row_chunks);
 
                slice = std::mem::transmute(slicey);
 
                ptr = ptr.add(row_chunks);
 
@@ -251,52 +258,54 @@ impl BitMatrix {
 
                    ptr = ptr.add(1);
 
                }
 
            }
 
        }
 
    }
 

	
 
    /// given:
 
    /// 1. a buffer to work with
 
    /// 2. a _fold function_ for combining the properties of a given entity
 
    ///    and returning a new derived property (working )
 
    fn iter_entities_where<'a, 'b>(
 
        &'a self,
 
        buf: &'b mut Vec<BitChunk>,
 
        buf: &'b mut Vec<usize>,
 
        mut fold_fn: impl FnMut(&'b [BitChunk]) -> BitChunk,
 
    ) -> BitChunkIter<std::vec::Drain<'b, BitChunk>> {
 
    ) -> BitChunkIter<std::vec::Drain<'b, usize>> {
 
        let buf_start = buf.len();
 
        let row_chunks = Self::row_chunks(self.bounds.property as usize);
 
        let column_chunks = Self::column_chunks(self.bounds.entity as usize);
 
        let mut ptr = self.buffer;
 
        for _row in 0..column_chunks {
 
            let slice;
 
            unsafe {
 
                let slicey = std::slice::from_raw_parts(ptr, row_chunks);
 
                slice = std::mem::transmute(slicey);
 
                ptr = ptr.add(row_chunks);
 
            }
 
            buf.push(fold_fn(slice));
 
            let chunk = fold_fn(slice);
 
            buf.push(chunk.0);
 
        }
 
        if let Some(mask) = Self::last_row_chunk_mask(self.bounds.entity) {
 
            *buf.iter_mut().last().unwrap() &= mask;
 
        }
 
        BitChunkIter::new(buf.drain(buf_start..))
 
    }
 
}
 

	
 
use derive_more::*;
 
#[derive(
 
    Debug, Copy, Clone, BitAnd, Not, BitOr, BitXor, BitAndAssign, BitOrAssign, BitXorAssign,
 
)]
 
struct BitChunk(usize);
 
#[repr(transparent)]
 
pub struct BitChunk(usize);
 
impl BitChunk {
 
    const fn bits() -> usize {
 
        Self::bytes() * 8
 
    }
 
    const fn bytes() -> usize {
 
        std::mem::size_of::<Self>()
 
    }
 
    const fn any(self) -> bool {
 
        self.0 != FALSE.0
 
    }
 
    const fn all(self) -> bool {
 
        self.0 == TRUE.0
 
@@ -322,34 +331,12 @@ fn matrix_test() {
 
    }
 
    m.unset([62, 0].into());
 
    println!("{:?}", &m);
 

	
 
    m.batch_mut(move |p| p[1] = p[0] ^ TRUE);
 
    println!("{:?}", &m);
 

	
 
    let mut buf = vec![];
 
    for index in m.iter_entities_where(&mut buf, move |p| p[1]) {
 
        println!("index {}", index);
 
    }
 
}
 

	
 
/*
 
TODO
 
1. BitChunk newtype in matrix and bit iterator
 
2. make BitChunk a wrapper around usize, using mem::size_of for the shifting
 

	
 
    #[inline(always)]
 
    fn skip_n_zeroes(chunk: &mut usize, n: usize) {
 
        if *chunk & ((1 << n) - 1) == 0 {
 
            *chunk >>= n;
 
        }
 
    }
 
    let mut n = std::mem::size_of::<usize>() * 8 / 2;
 
    while n > 1 {
 
        if x & ((1 << n) - 1) == 0 {
 
            x >>= n;
 
        }
 
        n /= 2;
 
    }
 

	
 

	
 
*/
src/runtime/experimental/ecs.rs
Show inline comments
 
file renamed from src/runtime/ecs.rs to src/runtime/experimental/ecs.rs
src/runtime/experimental/mod.rs
Show inline comments
 
new file 100644
 
mod api;
 
mod bits;
src/runtime/experimental/predicate.rs
Show inline comments
 
file renamed from src/runtime/predicate.rs to src/runtime/experimental/predicate.rs
src/runtime/mod.rs
Show inline comments
 
#[cfg(feature = "ffi")]
 
pub mod ffi;
 

	
 
// EXPERIMENTAL:
 
// mod predicate; // TODO later
 
// mod ecs;
 
mod bits;
 

	
 
mod actors;
 
pub(crate) mod communication;
 
pub(crate) mod connector;
 
pub(crate) mod endpoint;
 
pub mod errors;
 
mod experimental;
 
mod serde;
 
pub(crate) mod setup;
 

	
 
pub(crate) type ProtocolD = crate::protocol::ProtocolDescriptionImpl;
 
pub(crate) type ProtocolS = crate::protocol::ComponentStateImpl;
 

	
 
use crate::common::*;
 
use actors::*;
 
use endpoint::*;
 
use errors::*;
 

	
 
#[derive(Debug, PartialEq)]
src/test/connector.rs
Show inline comments
 
extern crate test_generator;
 

	
 
use super::*;
 

	
 
use crate::common::*;
 
use crate::runtime::{errors::*, PortBinding::*};
 

	
 
static PDL: &[u8] = b"
 
primitive forward_once(in i, out o) {
 
    synchronous() put(o, get(i));
 
}
 
primitive blocked(in i, out o) {
 
    while(true) synchronous {}
 
}
 
primitive forward(in i, out o) {
 
    while(true) synchronous {
 
        put(o, get(i));
 
    }
 
}
 
primitive sync(in i, out o) {
 
    while(true) synchronous {
 
        if (fires(i)) put(o, get(i));
 
    }
0 comments (0 inline, 0 general)