Changeset - c1b2442f23b2
[Not reviewed]
79 9 20
MH - 4 years ago 2021-12-03 15:33:18
contact@maxhenger.nl
Remove references to old runtime and stale code
89 files changed with 1487 insertions and 5130 deletions:
0 comments (0 inline, 0 general)
Cargo.toml
Show inline comments
 
[package]
 
name = "reowolf_rs"
 
version = "1.1.0"
 
version = "1.2.0"
 
authors = [
 
	"Max Henger <henger@cwi.nl>",
 
	"Christopher Esterhuyse <esterhuy@cwi.nl>",
 
	"Hans-Dieter Hiep <hdh@cwi.nl>"
 
]
 
edition = "2018"
 

	
 
[dependencies]
 
# convenience macros
 
maplit = "1.0.2"
 
derive_more = "0.99.2"
 

	
 
# runtime
 
bincode = "1.3.1"
 
serde = { version = "1.0.114", features = ["derive"] }
 
getrandom = "0.1.14" # tiny crate. used to guess controller-id
 

	
 
# network
 
mio = { version = "0.7.0", package = "mio", features = ["udp", "tcp", "os-poll"] }
 
socket2 = { version = "0.3.12", optional = true }
 

	
 
# protocol
 
backtrace = "0.3"
 
lazy_static = "1.4.0"
 

	
 
# ffi
 

	
 
# socket ffi
 
libc = { version = "^0.2", optional = true }
 
os_socketaddr = { version = "0.1.0", optional = true }
 

	
 
[dev-dependencies]
 
# test-generator = "0.3.0"
 
crossbeam-utils = "0.7.2"
 
lazy_static = "1.4.0"
 

	
 
[lib]
 
crate-type = [
 
	"rlib", # for use as a Rust dependency. 
 
	"cdylib" # for FFI use, typically C.
 
]
 

	
 
[features]
 
default = ["ffi"]
 
ffi = [] # see src/ffi/mod.rs
 
ffi_pseudo_socket_api = ["ffi", "libc", "os_socketaddr"]# see src/ffi/pseudo_socket_api.rs.
 
endpoint_logging = [] # see src/macros.rs
 
session_optimization = [] # see src/runtime/setup.rs
 
no_logging = [] # see src/macros.rs
 
	"rlib", # for use as a Rust dependency.
 
]
 
\ No newline at end of file
examples/README.md
Show inline comments
 
deleted file
examples/bench_01/main.c
Show inline comments
 
deleted file
examples/bench_02/main.c
Show inline comments
 
deleted file
examples/bench_03/main.c
Show inline comments
 
deleted file
examples/bench_04/main.c
Show inline comments
 
deleted file
examples/bench_05/main.c
Show inline comments
 
deleted file
examples/bench_06/main.c
Show inline comments
 
deleted file
examples/bench_07/main.c
Show inline comments
 
deleted file
examples/bench_08/main.c
Show inline comments
 
deleted file
examples/bench_09/main.c
Show inline comments
 
deleted file
examples/bench_10/main.c
Show inline comments
 
deleted file
examples/bench_11/main.c
Show inline comments
 
deleted file
examples/bench_12/main.c
Show inline comments
 
deleted file
examples/bench_13/main.c
Show inline comments
 
deleted file
examples/bench_14/amy.c
Show inline comments
 
deleted file
examples/bench_14/bob.c
Show inline comments
 
deleted file
examples/bench_15/main.c
Show inline comments
 
deleted file
examples/bench_16/main.c
Show inline comments
 
deleted file
examples/bench_17/main.c
Show inline comments
 
deleted file
examples/bench_18/main.c
Show inline comments
 
deleted file
examples/bench_19/main.c
Show inline comments
 
deleted file
examples/bench_20/main.c
Show inline comments
 
deleted file
examples/bench_21/main.c
Show inline comments
 
deleted file
examples/bench_22/main.c
Show inline comments
 
deleted file
examples/bench_23/main.c
Show inline comments
 
deleted file
examples/bench_24/main.c
Show inline comments
 
deleted file
examples/bench_25/main.c
Show inline comments
 
deleted file
examples/bench_26/main.c
Show inline comments
 
deleted file
examples/bench_27/main.c
Show inline comments
 
deleted file
examples/bench_28/main.c
Show inline comments
 
deleted file
examples/bench_29/main.c
Show inline comments
 
deleted file
examples/bench_30/main.c
Show inline comments
 
deleted file
examples/cpy_dll.sh
Show inline comments
 
deleted file
examples/cpy_so.sh
Show inline comments
 
deleted file
examples/eg_protocols.pdl
Show inline comments
 
deleted file
examples/incr_1/amy.c
Show inline comments
 
deleted file
examples/incr_2/amy.c
Show inline comments
 
deleted file
examples/incr_3/amy.c
Show inline comments
 
deleted file
examples/incr_4/amy.c
Show inline comments
 
deleted file
examples/incr_5/amy.c
Show inline comments
 
deleted file
examples/incr_6/amy.c
Show inline comments
 
deleted file
examples/incr_7/amy.c
Show inline comments
 
deleted file
examples/incr_8/amy.c
Show inline comments
 
deleted file
examples/incr_9/amy.c
Show inline comments
 
deleted file
examples/interop_1_socket/main.c
Show inline comments
 
deleted file
examples/interop_2_pseudo_socket/main.c
Show inline comments
 
deleted file
examples/interop_3_connector/main.c
Show inline comments
 
deleted file
examples/make.py
Show inline comments
 
deleted file
examples/pres_1/amy.c
Show inline comments
 
deleted file
examples/pres_1/bob.c
Show inline comments
 
deleted file
examples/pres_2/bob.c
Show inline comments
 
deleted file
examples/pres_3/amy.c
Show inline comments
 
deleted file
examples/pres_3/bob.c
Show inline comments
 
deleted file
examples/pres_4/bob.c
Show inline comments
 
deleted file
examples/pres_5/amy.c
Show inline comments
 
deleted file
examples/pres_5/bob.c
Show inline comments
 
deleted file
examples/utility.c
Show inline comments
 
deleted file
src/collections/string_pool.rs
Show inline comments
 
use std::ptr::null_mut;
 
use std::hash::{Hash, Hasher};
 
use std::marker::PhantomData;
 
use std::fmt::{Debug, Display, Result as FmtResult};
 
use crate::common::Formatter;
 
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
 

	
 
const SLAB_SIZE: usize = u16::MAX as usize;
 

	
 
#[derive(Clone)]
 
pub struct StringRef<'a> {
 
    data: *const u8,
 
    length: usize,
 
    _phantom: PhantomData<&'a [u8]>,
 
}
 

	
 
// As the StringRef is an immutable thing:
 
unsafe impl Sync for StringRef<'_> {}
 
unsafe impl Send for StringRef<'_> {}
 

	
 
impl<'a> StringRef<'a> {
 
    /// `new` constructs a new StringRef whose data is not owned by the
 
    /// `StringPool`, hence cannot have a `'static` lifetime.
 
    pub(crate) fn new(data: &'a [u8]) -> StringRef<'a> {
 
        // This is an internal (compiler) function: so debug_assert that the
 
        // string is valid ascii. Most commonly the input will come from the
 
        // code's source file, which is checked for ASCII-ness anyway.
 
        debug_assert!(data.is_ascii());
 
        let length = data.len();
 
        let data = data.as_ptr();
 
        StringRef{ data, length, _phantom: PhantomData }
 
    }
 

	
 
    pub fn as_str(&self) -> &'a str {
 
        unsafe {
 
            let slice = std::slice::from_raw_parts::<'a, u8>(self.data, self.length);
 
            std::str::from_utf8_unchecked(slice)
 
        }
 
    }
 

	
 
    pub fn as_bytes(&self) -> &'a [u8] {
 
        unsafe {
 
            std::slice::from_raw_parts::<'a, u8>(self.data, self.length)
 
        }
 
    }
 
}
 

	
 
impl<'a> Debug for StringRef<'a> {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
 
        f.write_str("StringRef{ value: ")?;
 
        f.write_str(self.as_str())?;
 
        f.write_str(" }")
 
    }
 
}
 

	
 
impl<'a> Display for StringRef<'a> {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
 
        f.write_str(self.as_str())
 
    }
 
}
 

	
 
impl PartialEq for StringRef<'_> {
 
    fn eq(&self, other: &StringRef) -> bool {
 
        self.as_str() == other.as_str()
 
    }
 
}
 

	
 
impl Eq for StringRef<'_> {}
 

	
 
impl Hash for StringRef<'_> {
 
    fn hash<H: Hasher>(&self, state: &mut H) {
 
        state.write(self.as_bytes());
 
    }
 
}
 

	
 
struct StringPoolSlab {
 
    prev: *mut StringPoolSlab,
 
    data: Vec<u8>,
 
    remaining: usize,
 
}
 

	
 
impl StringPoolSlab {
 
    fn new(prev: *mut StringPoolSlab) -> Self {
 
        Self{ prev, data: Vec::with_capacity(SLAB_SIZE), remaining: SLAB_SIZE }
 
    }
 
}
 

	
 
/// StringPool is a ever-growing pool of strings. Strings have a maximum size
 
/// equal to the slab size. The slabs are essentially a linked list to maintain
 
/// pointer-stability of the strings themselves.
 
/// All `StringRef` instances are invalidated when the string pool is dropped
 
pub(crate) struct StringPool {
 
    last: *mut StringPoolSlab,
 
}
 

	
 
impl StringPool {
 
    pub(crate) fn new() -> Self {
 
        // To have some stability we just turn a box into a raw ptr.
 
        let initial_slab = Box::new(StringPoolSlab::new(null_mut()));
 
        let initial_slab = Box::into_raw(initial_slab);
 
        StringPool{
 
            last: initial_slab,
 
        }
 
    }
 

	
 
    /// Interns a string to the `StringPool`, returning a reference to it. The
 
    /// pointer owned by `StringRef` is `'static` as the `StringPool` doesn't
 
    /// reallocate/deallocate until dropped (which only happens at the end of
 
    /// the program.)
 
    pub(crate) fn intern(&mut self, data: &[u8]) -> StringRef<'static> {
 
        let data_len = data.len();
 
        assert!(data_len <= SLAB_SIZE, "string is too large for slab"); // if you hit this, create logic for large-string allocations
 
        debug_assert!(std::str::from_utf8(data).is_ok(), "string to intern is not valid UTF-8 encoded");
 
        
 
        let mut last = unsafe{&mut *self.last};
 
        if data.len() > last.remaining {
 
            // Doesn't fit: allocate new slab
 
            self.alloc_new_slab();
 
            last = unsafe{&mut *self.last};
 
        }
 

	
 
        // Must fit now, compute hash and put in buffer
 
        debug_assert!(data_len <= last.remaining);
 
        let range_start = last.data.len();
 
        last.data.extend_from_slice(data);
 
        last.remaining -= data_len;
 
        debug_assert_eq!(range_start + data_len, last.data.len());
 

	
 
        unsafe {
 
            let start = last.data.as_ptr().offset(range_start as isize);
 
            StringRef{ data: start, length: data_len, _phantom: PhantomData }
 
        }
 
    }
 

	
 
    fn alloc_new_slab(&mut self) {
 
        let new_slab = Box::new(StringPoolSlab::new(self.last));
 
        let new_slab = Box::into_raw(new_slab);
 
        self.last = new_slab;
 
    }
 
}
 

	
 
impl Drop for StringPool {
 
    fn drop(&mut self) {
 
        let mut new_slab = self.last;
 
        while !new_slab.is_null() {
 
            let cur_slab = new_slab;
 
            unsafe {
 
                new_slab = (*cur_slab).prev;
 
                Box::from_raw(cur_slab); // consume and deallocate
 
            }
 
        }
 
    }
 
}
 

	
 
// String pool cannot be cloned, and the created `StringRef` instances remain
 
// allocated until the end of the program, so it is always safe to send. It is
 
// also sync in the sense that it becomes an immutable thing after compilation,
 
// but lets not derive that if we would ever become a multithreaded compiler in
 
// the future.
 
unsafe impl Send for StringPool {}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use super::*;
 

	
 
    #[test]
 
    fn test_string_just_fits() {
 
        let large = "0".repeat(SLAB_SIZE);
 
        let mut pool = StringPool::new();
 
        let interned = pool.intern(large.as_bytes());
 
        assert_eq!(interned.as_str(), large);
 
    }
 

	
 
    #[test]
 
    #[should_panic]
 
    fn test_string_too_large() {
 
        let large = "0".repeat(SLAB_SIZE + 1);
 
        let mut pool = StringPool::new();
 
        let _interned = pool.intern(large.as_bytes());
 
    }
 

	
 
    #[test]
 
    fn test_lots_of_small_allocations() {
 
        const NUM_PER_SLAB: usize = 32;
 
        const NUM_SLABS: usize = 4;
 

	
 
        let to_intern = "0".repeat(SLAB_SIZE / NUM_PER_SLAB);
 
        let mut pool = StringPool::new();
 

	
 
        let mut last_slab = pool.last;
 
        let mut all_refs = Vec::new();
 

	
 
        // Fill up first slab
 
        for _alloc_idx in 0..NUM_PER_SLAB {
 
            let interned = pool.intern(to_intern.as_bytes());
 
            all_refs.push(interned);
 
            assert!(std::ptr::eq(last_slab, pool.last));
 
        }
 

	
 
        for _slab_idx in 0..NUM_SLABS-1 {
 
            for alloc_idx in 0..NUM_PER_SLAB {
 
                let interned = pool.intern(to_intern.as_bytes());
 
                all_refs.push(interned);
 

	
 
                if alloc_idx == 0 {
 
                    // First allocation produces a new slab
 
                    assert!(!std::ptr::eq(last_slab, pool.last));
 
                    last_slab = pool.last;
 
                } else {
 
                    assert!(std::ptr::eq(last_slab, pool.last));
 
                }
 
            }
 
        }
 

	
 
        // All strings are still correct
 
        for string_ref in all_refs {
 
            assert_eq!(string_ref.as_str(), to_intern);
 
        }
 
    }
 
}
 
\ No newline at end of file
src/ffi/mod.rs
Show inline comments
 
deleted file
src/ffi/pseudo_socket_api.rs
Show inline comments
 
deleted file
src/lib.rs
Show inline comments
 
#[macro_use]
 
mod macros;
 

	
 
mod common;
 
// mod common;
 
mod protocol;
 
mod runtime;
 
pub mod runtime2;
 
pub mod runtime;
 
mod collections;
 

	
 
pub use common::{ConnectorId, EndpointPolarity, Payload, Polarity, PortId};
 
pub use protocol::ProtocolDescription;
 
pub use runtime::{error, Connector, DummyLogger, FileLogger, VecLogger};
 

	
 
// TODO: Remove when not benchmarking
 
pub use protocol::input_source::InputSource;
 
pub use protocol::ast::Heap;
 

	
 
#[cfg(feature = "ffi")]
 
pub mod ffi;
 
pub use protocol::ProtocolDescription;
 
\ No newline at end of file
src/macros.rs
Show inline comments
 
macro_rules! enabled_debug_print {
 
    (false, $name:literal, $format:literal) => {};
 
    (false, $name:literal, $format:literal, $($args:expr),*) => {};
 
    (true, $name:literal, $format:literal) => {
 
        println!("[{}] {}", $name, $format)
 
    };
 
    (true, $name:literal, $format:literal, $($args:expr),*) => {
 
        println!("[{}] {}", $name, format!($format, $($args),*))
 
    };
 
}
 

	
 
/*
 
Change the definition of these macros to control the logging level statically
 
*/
 

	
 
macro_rules! log {
 
    (@ENDPT, $logger:expr, $($arg:tt)*) => {{
 
        // if let Some(w) = $logger.line_writer() {
 
        //     let _ = writeln!(w, $($arg)*);
 
        // }
 
    }};
 
    ($logger:expr, $($arg:tt)*) => {{
 
        #[cfg(not(feature = "no_logging"))]
 
        if let Some(w) = $logger.line_writer() {
 
            let _ = writeln!(w, $($arg)*);
 
        }
 
    }};
 
}
 
}
 
\ No newline at end of file
src/protocol/arena.rs
Show inline comments
 
use crate::common::*;
 
use std::fmt::{Debug, Formatter};
 

	
 
use core::hash::Hash;
 
use core::marker::PhantomData;
 

	
 
pub struct Id<T> {
 
    // Not actually a signed index into the heap. But the index is set to -1 if
 
    // we don't know an ID yet. This is checked during debug mode.
 
    pub(crate) index: i32,
 
    _phantom: PhantomData<T>,
 
}
 

	
 
impl<T> Id<T> {
 
    #[inline] pub(crate) fn new_invalid() -> Self     { Self{ index: -1, _phantom: Default::default() } }
 
    #[inline] pub(crate) fn new(index: i32) -> Self   { Self{ index, _phantom: Default::default() } }
 
    #[inline] pub(crate) fn is_invalid(&self) -> bool { self.index < 0 }
 
}
 

	
 
#[derive(Debug)]
 
pub(crate) struct Arena<T> {
 
    store: Vec<T>,
 
}
 
//////////////////////////////////
 

	
 
impl<T> Debug for Id<T> {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        f.debug_struct("Id").field("index", &self.index).finish()
 
    }
 
}
 
impl<T> Clone for Id<T> {
 
    fn clone(&self) -> Self {
 
        *self
 
    }
 
}
 
impl<T> Copy for Id<T> {}
 
impl<T> PartialEq for Id<T> {
 
    fn eq(&self, other: &Self) -> bool {
 
        self.index.eq(&other.index)
 
    }
 
}
 
impl<T> Eq for Id<T> {}
 
impl<T> Hash for Id<T> {
 
    fn hash<H: std::hash::Hasher>(&self, h: &mut H) {
 
        self.index.hash(h);
 
    }
 
}
 

	
 
impl<T> Arena<T> {
 
    pub fn new() -> Self {
 
        Self { store: vec![] }
 
    }
 

	
 
    pub fn alloc_with_id(&mut self, f: impl FnOnce(Id<T>) -> T) -> Id<T> {
 
        // Lets keep this a runtime assert.
 
        assert!(self.store.len() < i32::max_value() as usize, "Arena out of capacity");
 
        let id = Id::new(self.store.len() as i32);
 
        self.store.push(f(id));
 
        id
 
    }
 

	
 
    // Compiler-internal direct retrieval
 
    pub(crate) fn get_id(&self, idx: usize) -> Id<T> {
 
        debug_assert!(idx < self.store.len());
 
        return Id::new(idx as i32);
 
    }
 

	
 
    pub fn iter(&self) -> impl Iterator<Item = &T> {
 
        self.store.iter()
 
    }
 

	
 
    pub fn len(&self) -> usize {
 
        self.store.len()
 
    }
 
}
 
impl<T> core::ops::Index<Id<T>> for Arena<T> {
 
    type Output = T;
 
    fn index(&self, id: Id<T>) -> &Self::Output {
 
        debug_assert!(!id.is_invalid(), "attempted to index into Arena with an invalid id (index < 0)");
 
        self.store.index(id.index as usize)
 
    }
 
}
 
impl<T> core::ops::IndexMut<Id<T>> for Arena<T> {
 
    fn index_mut(&mut self, id: Id<T>) -> &mut Self::Output {
 
        debug_assert!(!id.is_invalid(), "attempted to index_mut into Arena with an invalid id (index < 0)");
 
        self.store.index_mut(id.index as usize)
 
    }
 
}
 
\ No newline at end of file
src/protocol/eval/mod.rs
Show inline comments
 
/// eval
 
///
 
/// Evaluator of the generated AST. Note that we use some misappropriated terms
 
/// to describe where values live and what they do. This is a temporary
 
/// implementation of an evaluator until some kind of appropriate bytecode or
 
/// machine code is generated.
 
///
 
/// Code is always executed within a "frame". For Reowolf the first frame is
 
/// usually an executed component. All subsequent frames are function calls.
 
/// Simple values live on the "stack". Each variable/parameter has a place on
 
/// the stack where its values are stored. If the value is not a primitive, then
 
/// its value will be stored in the "heap". Expressions are treated differently
 
/// and use a separate "stack" for their evaluation.
 
///
 
/// Since this is a value-based language, most values are copied. One has to be
 
/// careful with values that reside in the "heap" and make sure that copies are
 
/// properly removed from the heap..
 
///
 
/// Just to reiterate: this is a temporary wasteful implementation. A proper
 
/// implementation would fully fill out the type table with alignment/size/
 
/// offset information and lay out bytecode.
 

	
 
pub(crate) mod value;
 
pub(crate) mod store;
 
pub(crate) mod executor;
 
pub(crate) mod error;
 

	
 
pub use error::EvalError;
 
pub use value::{Value, ValueGroup};
 
pub(crate) use store::{Store};
 
pub use value::{PortId, Value, ValueGroup};
 
pub use executor::{EvalContinuation, Prompt};
 

	
src/protocol/eval/value.rs
Show inline comments
 
use std::collections::VecDeque;
 

	
 
use super::store::*;
 
use crate::PortId;
 
use crate::protocol::ast::{
 
    AssignmentOperator,
 
    BinaryOperator,
 
    UnaryOperator,
 
    ConcreteType,
 
    ConcreteTypePart,
 
};
 
use crate::protocol::parser::token_parsing::*;
 

	
 
pub type StackPos = u32;
 
pub type HeapPos = u32;
 

	
 
#[derive(Debug, Copy, Clone)]
 
pub enum ValueId {
 
    Stack(StackPos), // place on stack
 
    Heap(HeapPos, u32), // allocated region + values within that region
 
}
 

	
 
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
 
pub struct PortId{
 
    pub(crate) id: u32
 
}
 

	
 
impl PortId {
 
    pub fn new(id: u32) -> Self {
 
        return Self{ id };
 
    }
 
}
 

	
 
/// Represents a value stored on the stack or on the heap. Some values contain
 
/// a `HeapPos`, implying that they're stored in the store's `Heap`. Clearing
 
/// a `Value` with a `HeapPos` from a stack must also clear the associated
 
/// region from the `Heap`.
 
#[derive(Debug, Clone)]
 
pub enum Value {
 
    // Special types, never encountered during evaluation if the compiler works correctly
 
    Unassigned,                 // Marker when variables are first declared, immediately followed by assignment
 
    PrevStackBoundary(isize),   // Marker for stack frame beginning, so we can pop stack values
 
    Ref(ValueId),               // Reference to a value, used by expressions producing references
 
    Binding(StackPos),          // Reference to a binding variable (reserved on the stack)
 
    // Builtin types
 
    Input(PortId),
 
    Output(PortId),
 
    Message(HeapPos),
 
    Null,
 
    Bool(bool),
 
    Char(char),
 
    String(HeapPos),
 
    UInt8(u8),
 
    UInt16(u16),
 
    UInt32(u32),
 
    UInt64(u64),
 
    SInt8(i8),
 
    SInt16(i16),
 
    SInt32(i32),
 
    SInt64(i64),
 
    Array(HeapPos),
 
    // Instances of user-defined types
 
    Enum(i64),
 
    Union(i64, HeapPos),
 
    Struct(HeapPos),
 
}
 

	
 
macro_rules! impl_union_unpack_as_value {
 
    ($func_name:ident, $variant_name:path, $return_type:ty) => {
 
        impl Value {
 
            pub(crate) fn $func_name(&self) -> $return_type {
 
                match self {
 
                    $variant_name(v) => *v,
 
                    _ => panic!(concat!("called ", stringify!($func_name()), " on {:?}"), self),
 
                }
 
            }
 
        }
 
    }
 
}
 

	
 
impl_union_unpack_as_value!(as_stack_boundary, Value::PrevStackBoundary, isize);
 
impl_union_unpack_as_value!(as_ref,     Value::Ref,     ValueId);
 
impl_union_unpack_as_value!(as_input,   Value::Input,   PortId);
 
impl_union_unpack_as_value!(as_output,  Value::Output,  PortId);
 
impl_union_unpack_as_value!(as_message, Value::Message, HeapPos);
 
impl_union_unpack_as_value!(as_bool,    Value::Bool,    bool);
 
impl_union_unpack_as_value!(as_char,    Value::Char,    char);
 
impl_union_unpack_as_value!(as_string,  Value::String,  HeapPos);
 
impl_union_unpack_as_value!(as_uint8,   Value::UInt8,   u8);
 
impl_union_unpack_as_value!(as_uint16,  Value::UInt16,  u16);
 
impl_union_unpack_as_value!(as_uint32,  Value::UInt32,  u32);
 
impl_union_unpack_as_value!(as_uint64,  Value::UInt64,  u64);
 
impl_union_unpack_as_value!(as_sint8,   Value::SInt8,   i8);
 
impl_union_unpack_as_value!(as_sint16,  Value::SInt16,  i16);
 
impl_union_unpack_as_value!(as_sint32,  Value::SInt32,  i32);
 
impl_union_unpack_as_value!(as_sint64,  Value::SInt64,  i64);
 
impl_union_unpack_as_value!(as_array,   Value::Array,   HeapPos);
 
impl_union_unpack_as_value!(as_enum,    Value::Enum,    i64);
 
impl_union_unpack_as_value!(as_struct,  Value::Struct,  HeapPos);
 

	
 
impl Value {
 
    pub(crate) fn as_union(&self) -> (i64, HeapPos) {
 
        match self {
 
            Value::Union(tag, v) => (*tag, *v),
 
            _ => panic!("called as_union on {:?}", self),
 
        }
 
    }
 

	
 
    pub(crate) fn is_integer(&self) -> bool {
 
        match self {
 
            Value::UInt8(_) | Value::UInt16(_) | Value::UInt32(_) | Value::UInt64(_) |
 
            Value::SInt8(_) | Value::SInt16(_) | Value::SInt32(_) | Value::SInt64(_) => true,
 
            _ => false
 
        }
 
    }
 

	
 
    pub(crate) fn is_unsigned_integer(&self) -> bool {
 
        match self {
 
            Value::UInt8(_) | Value::UInt16(_) | Value::UInt32(_) | Value::UInt64(_) => true,
 
            _ => false
 
        }
 
    }
 

	
 
    pub(crate) fn is_signed_integer(&self) -> bool {
 
        match self {
 
            Value::SInt8(_) | Value::SInt16(_) | Value::SInt32(_) | Value::SInt64(_) => true,
 
            _ => false
 
        }
 
    }
 

	
 
    pub(crate) fn as_unsigned_integer(&self) -> u64 {
 
        match self {
 
            Value::UInt8(v)  => *v as u64,
 
            Value::UInt16(v) => *v as u64,
 
            Value::UInt32(v) => *v as u64,
 
            Value::UInt64(v) => *v as u64,
 
            _ => unreachable!("called as_unsigned_integer on {:?}", self),
 
        }
 
    }
 

	
 
    pub(crate) fn as_signed_integer(&self) -> i64 {
 
        match self {
 
            Value::SInt8(v)  => *v as i64,
 
            Value::SInt16(v) => *v as i64,
 
            Value::SInt32(v) => *v as i64,
 
            Value::SInt64(v) => *v as i64,
 
            _ => unreachable!("called as_signed_integer on {:?}", self)
 
        }
 
    }
 

	
 
    /// Returns the heap position associated with the value. If the value
 
    /// doesn't store anything in the heap then we return `None`.
 
    pub(crate) fn get_heap_pos(&self) -> Option<HeapPos> {
 
        match self {
 
            Value::Message(v) => Some(*v),
 
            Value::String(v) => Some(*v),
 
            Value::Array(v) => Some(*v),
 
            Value::Union(_, v) => Some(*v),
 
            Value::Struct(v) => Some(*v),
 
            _ => None
 
        }
 
    }
 
}
 

	
 
/// When providing arguments to a new component, or when transferring values
 
/// from one component's store to a newly instantiated component, one has to
 
/// transfer stack and heap values. This `ValueGroup` represents such a
 
/// temporary group of values with potential heap allocations.
 
///
 
/// Constructing such a ValueGroup manually requires some extra care to make
 
/// sure all elements of `values` point to valid elements of `regions`.
 
///
 
/// Again: this is a temporary thing, hopefully removed once we move to a
 
/// bytecode interpreter.
 
#[derive(Clone, Debug)]
 
pub struct ValueGroup {
 
    pub(crate) values: Vec<Value>,
 
    pub(crate) regions: Vec<Vec<Value>>
 
}
 

	
 
impl ValueGroup {
 
    pub(crate) fn new_stack(values: Vec<Value>) -> Self {
 
        debug_assert!(values.iter().all(|v| v.get_heap_pos().is_none()));
 
        Self{
 
            values,
 
            regions: Vec::new(),
 
        }
 
    }
 
    pub(crate) fn from_store(store: &Store, values: &[Value]) -> Self {
 
        let mut group = ValueGroup{
 
            values: Vec::with_capacity(values.len()),
 
            regions: Vec::with_capacity(values.len()), // estimation
 
        };
 

	
 
        for value in values {
 
            let transferred = group.retrieve_value(value, store);
 
            group.values.push(transferred);
 
        }
 

	
 
        group
 
    }
 

	
 
    /// Transfers a provided value from a store into a local value with its
 
    /// heap allocations (if any) stored in the ValueGroup. Calling this
 
    /// function will not store the returned value in the `values` member.
 
    fn retrieve_value(&mut self, value: &Value, from_store: &Store) -> Value {
 
        let value = from_store.maybe_read_ref(value);
 
        if let Some(heap_pos) = value.get_heap_pos() {
 
            // Value points to a heap allocation, so transfer the heap values
 
            // internally.
 
            let from_region = &from_store.heap_regions[heap_pos as usize].values;
 
            let mut new_region = Vec::with_capacity(from_region.len());
 
            for value in from_region {
 
                let transferred = self.retrieve_value(value, from_store);
 
                new_region.push(transferred);
 
            }
 

	
 
            // Region is constructed, store internally and return the new value.
 
            let new_region_idx = self.regions.len() as HeapPos;
 
            self.regions.push(new_region);
 

	
 
            return match value {
 
                Value::Message(_)    => Value::Message(new_region_idx),
 
                Value::String(_)     => Value::String(new_region_idx),
 
                Value::Array(_)      => Value::Array(new_region_idx),
 
                Value::Union(tag, _) => Value::Union(*tag, new_region_idx),
 
                Value::Struct(_)     => Value::Struct(new_region_idx),
 
                _ => unreachable!(),
 
            };
 
        } else {
 
            return value.clone();
 
        }
 
    }
 

	
 
    /// Transfers the heap values and the stack values into the store. Stack
 
    /// values are pushed onto the Store's stack in the order in which they
 
    /// appear in the value group.
 
    pub(crate) fn into_store(self, store: &mut Store) {
 
        for value in &self.values {
 
            let transferred = self.provide_value(value, store);
 
            store.stack.push(transferred);
 
        }
 
    }
 

	
 
    /// Transfers the heap values into the store, but will put the stack values
 
    /// into the provided `VecDeque`. This is mainly used to merge `ValueGroup`
 
    /// instances retrieved by the code by `get` calls into the expression
 
    /// stack.
 
    pub(crate) fn into_stack(self, stack: &mut VecDeque<Value>, store: &mut Store) {
 
        for value in &self.values {
 
            let transferred = self.provide_value(value, store);
 
            stack.push_back(transferred);
 
        }
 
    }
 

	
 
    fn provide_value(&self, value: &Value, to_store: &mut Store) -> Value {
 
        if let Some(from_heap_pos) = value.get_heap_pos() {
 
            let from_heap_pos = from_heap_pos as usize;
 
            let to_heap_pos = to_store.alloc_heap();
 
            let to_heap_pos_usize = to_heap_pos as usize;
 
            to_store.heap_regions[to_heap_pos_usize].values.reserve(self.regions[from_heap_pos].len());
 

	
 
            for value in &self.regions[from_heap_pos as usize] {
 
                let transferred = self.provide_value(value, to_store);
 
                to_store.heap_regions[to_heap_pos_usize].values.push(transferred);
 
            }
 

	
 
            return match value {
 
                Value::Message(_)    => Value::Message(to_heap_pos),
 
                Value::String(_)     => Value::String(to_heap_pos),
 
                Value::Array(_)      => Value::Array(to_heap_pos),
 
                Value::Union(tag, _) => Value::Union(*tag, to_heap_pos),
 
                Value::Struct(_)     => Value::Struct(to_heap_pos),
 
                _ => unreachable!(),
 
            };
 
        } else {
 
            return value.clone();
 
        }
 
    }
 
}
 

	
 
impl Default for ValueGroup {
 
    /// Returns an empty ValueGroup
 
    fn default() -> Self {
 
        Self { values: Vec::new(), regions: Vec::new() }
 
    }
 
}
 

	
 
enum ValueKind { Message, String, Array }
 

	
 
pub(crate) fn apply_assignment_operator(store: &mut Store, lhs: ValueId, op: AssignmentOperator, rhs: Value) {
 
    use AssignmentOperator as AO;
 

	
 
    macro_rules! apply_int_op {
 
        ($lhs:ident, $assignment_tokens:tt, $operator:ident, $rhs:ident) => {
 
            match $lhs {
 
                Value::UInt8(v)  => { *v $assignment_tokens $rhs.as_uint8();  },
 
                Value::UInt16(v) => { *v $assignment_tokens $rhs.as_uint16(); },
 
                Value::UInt32(v) => { *v $assignment_tokens $rhs.as_uint32(); },
 
                Value::UInt64(v) => { *v $assignment_tokens $rhs.as_uint64(); },
 
                Value::SInt8(v)  => { *v $assignment_tokens $rhs.as_sint8();  },
 
                Value::SInt16(v) => { *v $assignment_tokens $rhs.as_sint16(); },
 
                Value::SInt32(v) => { *v $assignment_tokens $rhs.as_sint32(); },
 
                Value::SInt64(v) => { *v $assignment_tokens $rhs.as_sint64(); },
 
                _ => unreachable!("apply_assignment_operator {:?} on lhs {:?} and rhs {:?}", $operator, $lhs, $rhs),
 
            }
 
        }
 
    }
 

	
 
    let lhs = store.read_mut_ref(lhs);
 

	
 
    let mut to_dealloc = None;
 
    match op {
 
        AO::Set => {
 
            match lhs {
 
                Value::Unassigned => { *lhs = rhs; },
 
                Value::Input(v)  => { *v = rhs.as_input(); },
 
                Value::Output(v) => { *v = rhs.as_output(); },
 
                Value::Message(v)  => { to_dealloc = Some(*v); *v = rhs.as_message(); },
 
                Value::Bool(v)    => { *v = rhs.as_bool(); },
 
                Value::Char(v) => { *v = rhs.as_char(); },
 
                Value::String(v) => { *v = rhs.as_string().clone(); },
 
                Value::UInt8(v) => { *v = rhs.as_uint8(); },
 
                Value::UInt16(v) => { *v = rhs.as_uint16(); },
 
                Value::UInt32(v) => { *v = rhs.as_uint32(); },
 
                Value::UInt64(v) => { *v = rhs.as_uint64(); },
 
                Value::SInt8(v) => { *v = rhs.as_sint8(); },
 
                Value::SInt16(v) => { *v = rhs.as_sint16(); },
 
                Value::SInt32(v) => { *v = rhs.as_sint32(); },
 
                Value::SInt64(v) => { *v = rhs.as_sint64(); },
 
                Value::Array(v) => { to_dealloc = Some(*v); *v = rhs.as_array(); },
 
                Value::Enum(v) => { *v = rhs.as_enum(); },
 
                Value::Union(lhs_tag, lhs_heap_pos) => {
 
                    to_dealloc = Some(*lhs_heap_pos);
 
                    let (rhs_tag, rhs_heap_pos) = rhs.as_union();
 
                    *lhs_tag = rhs_tag;
 
                    *lhs_heap_pos = rhs_heap_pos;
 
                }
 
                Value::Struct(v) => { to_dealloc = Some(*v); *v = rhs.as_struct(); },
 
                _ => unreachable!("apply_assignment_operator {:?} on lhs {:?} and rhs {:?}", op, lhs, rhs),
 
            }
 
        },
 
        AO::Concatenated => {
 
            let lhs_heap_pos = lhs.get_heap_pos().unwrap() as usize;
 
            let rhs_heap_pos = rhs.get_heap_pos().unwrap() as usize;
 

	
 
            // To prevent borrowing crap, swap out heap region with a temp empty array
 
            let mut total = Vec::new();
 
            std::mem::swap(&mut total, &mut store.heap_regions[lhs_heap_pos].values);
 

	
 
            // Push everything onto the swapped vector
 
            let rhs_len = store.heap_regions[rhs_heap_pos].values.len();
 
            total.reserve(rhs_len);
 
            for value_idx in 0..rhs_len {
 
                total.push(store.clone_value(store.heap_regions[rhs_heap_pos].values[value_idx].clone()));
 
            }
 

	
 
            // Swap back in place
 
            std::mem::swap(&mut total, &mut store.heap_regions[lhs_heap_pos].values);
 

	
 
            // We took ownership of the RHS, but we copied it into the LHS, so
 
            // different form assignment we need to drop the RHS heap pos.
 
            to_dealloc = Some(rhs_heap_pos as u32);
 
        },
 
        AO::Multiplied =>   { apply_int_op!(lhs, *=,  op, rhs) },
 
        AO::Divided =>      { apply_int_op!(lhs, /=,  op, rhs) },
 
        AO::Remained =>     { apply_int_op!(lhs, %=,  op, rhs) },
 
        AO::Added =>        { apply_int_op!(lhs, +=,  op, rhs) },
 
        AO::Subtracted =>   { apply_int_op!(lhs, -=,  op, rhs) },
 
        AO::ShiftedLeft =>  { apply_int_op!(lhs, <<=, op, rhs) },
 
        AO::ShiftedRight => { apply_int_op!(lhs, >>=, op, rhs) },
 
        AO::BitwiseAnded => { apply_int_op!(lhs, &=,  op, rhs) },
 
        AO::BitwiseXored => { apply_int_op!(lhs, ^=,  op, rhs) },
 
        AO::BitwiseOred =>  { apply_int_op!(lhs, |=,  op, rhs) },
 
    }
 

	
 
    if let Some(heap_pos) = to_dealloc {
 
        store.drop_heap_pos(heap_pos);
 
    }
 
}
 

	
 
pub(crate) fn apply_binary_operator(store: &mut Store, lhs: &Value, op: BinaryOperator, rhs: &Value) -> Value {
 
    use BinaryOperator as BO;
 

	
 
    macro_rules! apply_int_op_and_return_self {
 
        ($lhs:ident, $operator_tokens:tt, $operator:ident, $rhs:ident) => {
 
            return match $lhs {
 
                Value::UInt8(v)  => { Value::UInt8( *v $operator_tokens $rhs.as_uint8() ) },
 
                Value::UInt16(v) => { Value::UInt16(*v $operator_tokens $rhs.as_uint16()) },
 
                Value::UInt32(v) => { Value::UInt32(*v $operator_tokens $rhs.as_uint32()) },
 
                Value::UInt64(v) => { Value::UInt64(*v $operator_tokens $rhs.as_uint64()) },
 
                Value::SInt8(v)  => { Value::SInt8( *v $operator_tokens $rhs.as_sint8() ) },
 
                Value::SInt16(v) => { Value::SInt16(*v $operator_tokens $rhs.as_sint16()) },
 
                Value::SInt32(v) => { Value::SInt32(*v $operator_tokens $rhs.as_sint32()) },
 
                Value::SInt64(v) => { Value::SInt64(*v $operator_tokens $rhs.as_sint64()) },
 
                _ => unreachable!("apply_binary_operator {:?} on lhs {:?} and rhs {:?}", $operator, $lhs, $rhs)
 
            };
 
        }
 
    }
 

	
 
    macro_rules! apply_int_op_and_return_bool {
 
        ($lhs:ident, $operator_tokens:tt, $operator:ident, $rhs:ident) => {
 
            return match $lhs {
 
                Value::UInt8(v)  => { Value::Bool(*v $operator_tokens $rhs.as_uint8() ) },
 
                Value::UInt16(v) => { Value::Bool(*v $operator_tokens $rhs.as_uint16()) },
 
                Value::UInt32(v) => { Value::Bool(*v $operator_tokens $rhs.as_uint32()) },
 
                Value::UInt64(v) => { Value::Bool(*v $operator_tokens $rhs.as_uint64()) },
 
                Value::SInt8(v)  => { Value::Bool(*v $operator_tokens $rhs.as_sint8() ) },
 
                Value::SInt16(v) => { Value::Bool(*v $operator_tokens $rhs.as_sint16()) },
 
                Value::SInt32(v) => { Value::Bool(*v $operator_tokens $rhs.as_sint32()) },
 
                Value::SInt64(v) => { Value::Bool(*v $operator_tokens $rhs.as_sint64()) },
 
                _ => unreachable!("apply_binary_operator {:?} on lhs {:?} and rhs {:?}", $operator, $lhs, $rhs)
 
            };
 
        }
 
    }
 

	
 
    // We need to handle concatenate in a special way because it needs the store
 
    // mutably.
 
    if op == BO::Concatenate {
 
        let target_heap_pos = store.alloc_heap();
 
        let lhs_heap_pos;
 
        let rhs_heap_pos;
 

	
 
        let lhs = store.maybe_read_ref(lhs);
 
        let rhs = store.maybe_read_ref(rhs);
 

	
 
        let value_kind;
 

	
 
        match lhs {
 
            Value::Message(lhs_pos) => {
 
                lhs_heap_pos = *lhs_pos;
 
                rhs_heap_pos = rhs.as_message();
 
                value_kind = ValueKind::Message;
 
            },
 
            Value::String(lhs_pos) => {
 
                lhs_heap_pos = *lhs_pos;
 
                rhs_heap_pos = rhs.as_string();
 
                value_kind = ValueKind::String;
 
            },
 
            Value::Array(lhs_pos) => {
 
                lhs_heap_pos = *lhs_pos;
 
                rhs_heap_pos = rhs.as_array();
 
                value_kind = ValueKind::Array;
 
            },
 
            _ => unreachable!("apply_binary_operator {:?} on lhs {:?} and rhs {:?}", op, lhs, rhs)
 
        }
 

	
 
        let lhs_heap_pos = lhs_heap_pos as usize;
 
        let rhs_heap_pos = rhs_heap_pos as usize;
 

	
 
        let mut concatenated = Vec::new();
 
        let lhs_len = store.heap_regions[lhs_heap_pos].values.len();
 
        let rhs_len = store.heap_regions[rhs_heap_pos].values.len();
 
        concatenated.reserve(lhs_len + rhs_len);
 
        for idx in 0..lhs_len {
 
            concatenated.push(store.clone_value(store.heap_regions[lhs_heap_pos].values[idx].clone()));
 
        }
 
        for idx in 0..rhs_len {
 
            concatenated.push(store.clone_value(store.heap_regions[rhs_heap_pos].values[idx].clone()));
 
        }
 

	
 
        store.heap_regions[target_heap_pos as usize].values = concatenated;
 

	
 
        return match value_kind{
 
            ValueKind::Message => Value::Message(target_heap_pos),
 
            ValueKind::String => Value::String(target_heap_pos),
 
            ValueKind::Array => Value::Array(target_heap_pos),
 
        };
 
    }
 

	
 
    // If any of the values are references, retrieve the thing they're referring
 
    // to.
 
    let lhs = store.maybe_read_ref(lhs);
 
    let rhs = store.maybe_read_ref(rhs);
 

	
 
    match op {
 
        BO::Concatenate => unreachable!(),
 
        BO::LogicalOr => {
 
            return Value::Bool(lhs.as_bool() || rhs.as_bool());
 
        },
 
        BO::LogicalAnd => {
 
            return Value::Bool(lhs.as_bool() && rhs.as_bool());
 
        },
 
        BO::BitwiseOr        => { apply_int_op_and_return_self!(lhs, |,  op, rhs); },
 
        BO::BitwiseXor       => { apply_int_op_and_return_self!(lhs, ^,  op, rhs); },
 
        BO::BitwiseAnd       => { apply_int_op_and_return_self!(lhs, &,  op, rhs); },
 
        BO::Equality         => { Value::Bool(apply_equality_operator(store, lhs, rhs)) },
 
        BO::Inequality       => { Value::Bool(apply_inequality_operator(store, lhs, rhs)) },
 
        BO::LessThan         => { apply_int_op_and_return_bool!(lhs, <,  op, rhs); },
 
        BO::GreaterThan      => { apply_int_op_and_return_bool!(lhs, >,  op, rhs); },
 
        BO::LessThanEqual    => { apply_int_op_and_return_bool!(lhs, <=, op, rhs); },
 
        BO::GreaterThanEqual => { apply_int_op_and_return_bool!(lhs, >=, op, rhs); },
 
        BO::ShiftLeft        => { apply_int_op_and_return_self!(lhs, <<, op, rhs); },
 
        BO::ShiftRight       => { apply_int_op_and_return_self!(lhs, >>, op, rhs); },
 
        BO::Add              => { apply_int_op_and_return_self!(lhs, +,  op, rhs); },
 
        BO::Subtract         => { apply_int_op_and_return_self!(lhs, -,  op, rhs); },
 
        BO::Multiply         => { apply_int_op_and_return_self!(lhs, *,  op, rhs); },
 
        BO::Divide           => { apply_int_op_and_return_self!(lhs, /,  op, rhs); },
 
        BO::Remainder        => { apply_int_op_and_return_self!(lhs, %,  op, rhs); }
 
    }
 
}
 

	
 
pub(crate) fn apply_unary_operator(store: &mut Store, op: UnaryOperator, value: &Value) -> Value {
 
    use UnaryOperator as UO;
 

	
 
    macro_rules! apply_int_expr_and_return {
 
        ($value:ident, $apply:tt, $op:ident) => {
 
            return match $value {
 
                Value::UInt8(v)  => Value::UInt8($apply *v),
 
                Value::UInt16(v) => Value::UInt16($apply *v),
 
                Value::UInt32(v) => Value::UInt32($apply *v),
 
                Value::UInt64(v) => Value::UInt64($apply *v),
 
                Value::SInt8(v)  => Value::SInt8($apply *v),
 
                Value::SInt16(v) => Value::SInt16($apply *v),
 
                Value::SInt32(v) => Value::SInt32($apply *v),
 
                Value::SInt64(v) => Value::SInt64($apply *v),
 
                _ => unreachable!("apply_unary_operator {:?} on value {:?}", $op, $value),
 
            };
 
        }
 
    }
 

	
 
    // If the value is a reference, retrieve the thing it is referring to
 
    let value = store.maybe_read_ref(value);
 

	
 
    match op {
 
        UO::Positive => {
 
            debug_assert!(value.is_integer());
 
            return value.clone();
 
        },
 
        UO::Negative => {
 
            // TODO: Error on negating unsigned integers
 
            return match value {
 
                Value::SInt8(v) => Value::SInt8(-*v),
 
                Value::SInt16(v) => Value::SInt16(-*v),
 
                Value::SInt32(v) => Value::SInt32(-*v),
 
                Value::SInt64(v) => Value::SInt64(-*v),
 
                _ => unreachable!("apply_unary_operator {:?} on value {:?}", op, value),
 
            }
 
        },
 
        UO::BitwiseNot => { apply_int_expr_and_return!(value, !, op)},
 
        UO::LogicalNot => { return Value::Bool(!value.as_bool()); },
 
    }
 
}
 

	
 
pub(crate) fn apply_casting(store: &mut Store, output_type: &ConcreteType, subject: &Value) -> Result<Value, String> {
 
    // To simplify the casting logic: if the output type is not a simple
 
    // integer/boolean/character, then the type checker made sure that the two
 
    // types must be equal, hence we can do a simple clone.
 
    use ConcreteTypePart as CTP;
 
    let part = &output_type.parts[0];
 
    match part {
 
        CTP::Bool | CTP::Character |
 
        CTP::UInt8 | CTP::UInt16 | CTP::UInt32 | CTP::UInt64 |
 
        CTP::SInt8 | CTP::SInt16 | CTP::SInt32 | CTP::SInt64 => {
 
            // Do the checking of these below
 
            debug_assert_eq!(output_type.parts.len(), 1);
 
        },
 
        _ => {
 
            return Ok(store.clone_value(subject.clone()));
 
        },
 
    }
 

	
 
    // Note: character is not included, needs per-type checking
 
    macro_rules! unchecked_cast {
 
        ($input: expr, $output_part: expr) => {
 
            return Ok(match $output_part {
 
                CTP::UInt8 => Value::UInt8($input as u8),
 
                CTP::UInt16 => Value::UInt16($input as u16),
 
                CTP::UInt32 => Value::UInt32($input as u32),
 
                CTP::UInt64 => Value::UInt64($input as u64),
 
                CTP::SInt8 => Value::SInt8($input as i8),
 
                CTP::SInt16 => Value::SInt16($input as i16),
 
                CTP::SInt32 => Value::SInt32($input as i32),
 
                CTP::SInt64 => Value::SInt64($input as i64),
 
                _ => unreachable!()
 
            })
 
        }
 
    }
 

	
 
    macro_rules! from_unsigned_cast {
 
        ($input:expr, $input_type:ty, $output_part:expr) => {
 
            {
 
                let target_type_name = match $output_part {
 
                    CTP::Bool => return Ok(Value::Bool($input != 0)),
 
                    CTP::Character => if $input <= u8::MAX as $input_type {
 
                        return Ok(Value::Char(($input as u8) as char))
 
                    } else {
 
                        KW_TYPE_CHAR_STR
 
                    },
 
                    CTP::UInt8 => if $input <= u8::MAX as $input_type {
 
                        return Ok(Value::UInt8($input as u8))
 
                    } else {
 
                        KW_TYPE_UINT8_STR
 
                    },
 
                    CTP::UInt16 => if $input <= u16::MAX as $input_type {
 
                        return Ok(Value::UInt16($input as u16))
 
                    } else {
 
                        KW_TYPE_UINT16_STR
 
                    },
 
                    CTP::UInt32 => if $input <= u32::MAX as $input_type {
 
                        return Ok(Value::UInt32($input as u32))
 
                    } else {
 
                        KW_TYPE_UINT32_STR
 
                    },
 
                    CTP::UInt64 => return Ok(Value::UInt64($input as u64)), // any unsigned int to u64 is fine
 
                    CTP::SInt8 => if $input <= i8::MAX as $input_type {
 
                        return Ok(Value::SInt8($input as i8))
 
                    } else {
 
                        KW_TYPE_SINT8_STR
 
                    },
 
                    CTP::SInt16 => if $input <= i16::MAX as $input_type {
 
                        return Ok(Value::SInt16($input as i16))
 
                    } else {
 
                        KW_TYPE_SINT16_STR
 
                    },
 
                    CTP::SInt32 => if $input <= i32::MAX as $input_type {
 
                        return Ok(Value::SInt32($input as i32))
 
                    } else {
 
                        KW_TYPE_SINT32_STR
 
                    },
 
                    CTP::SInt64 => if $input <= i64::MAX as $input_type {
 
                        return Ok(Value::SInt64($input as i64))
 
                    } else {
 
                        KW_TYPE_SINT64_STR
 
                    },
 
                    _ => unreachable!(),
 
                };
 

	
 
                return Err(format!("value is '{}' which doesn't fit in a type '{}'", $input, target_type_name));
 
            }
 
        }
 
    }
 

	
 
    macro_rules! from_signed_cast {
 
        // Programmer note: for signed checking we cannot do
 
        //  output_type::MAX as input_type,
 
        //
 
        // because if the output type's width is larger than the input type,
 
        // then the cast results in a negative number. So we mask with the
 
        // maximum possible value the input type can become. As in:
 
        //  (output_type::MAX as input_type) & input_type::MAX
 
        //
 
        // This way:
 
        // 1. output width is larger than input width: fine in all cases, we
 
        //  simply compare against the max input value, which is always true.
 
        // 2. output width is equal to input width: by masking we "remove the
 
        //  signed bit from the unsigned number" and again compare against the
 
        //  maximum input value.
 
        // 3. output width is smaller than the input width: masking does nothing
 
        //  because the signed bit is never set, and we simply compare against
 
        //  the maximum possible output value.
 
        //
 
        // A similar kind of mechanism for the minimum value, but here we do
 
        // a binary OR. We do a:
 
        //  (output_type::MIN as input_type) & input_type::MIN
 
        //
 
        // This way:
 
        // 1. output width is larger than input width: initial cast truncates to
 
        //  0, then we OR with the actual minimum value, so we attain the
 
        //  minimum value of the input type.
 
        // 2. output width is equal to input width: we OR the minimum value with
 
        //  itself.
 
        // 3. output width is smaller than input width: the cast produces the
 
        //  min value of the output type, the subsequent OR does nothing, as it
 
        //  essentially just sets the signed bit (which must already be set,
 
        //  since we're dealing with a signed minimum value)
 
        //
 
        // After all of this expanding, we simply hope the compiler does a best
 
        // effort constant expression evaluation, and presto!
 
        ($input:expr, $input_type:ty, $output_type:expr) => {
 
            {
 
                let target_type_name = match $output_type {
 
                    CTP::Bool => return Ok(Value::Bool($input != 0)),
 
                    CTP::Character => if $input >= 0 && $input <= (u8::max as $input_type & <$input_type>::MAX) {
 
                        return Ok(Value::Char(($input as u8) as char))
 
                    } else {
 
                        KW_TYPE_CHAR_STR
 
                    },
 
                    CTP::UInt8 => if $input >= 0 && $input <= ((u8::MAX as $input_type) & <$input_type>::MAX) {
 
                        return Ok(Value::UInt8($input as u8));
 
                    } else {
 
                        KW_TYPE_UINT8_STR
 
                    },
 
                    CTP::UInt16 => if $input >= 0 && $input <= ((u16::MAX as $input_type) & <$input_type>::MAX) {
 
                        return Ok(Value::UInt16($input as u16));
 
                    } else {
 
                        KW_TYPE_UINT16_STR
 
                    },
 
                    CTP::UInt32 => if $input >= 0 && $input <= ((u32::MAX as $input_type) & <$input_type>::MAX) {
 
                        return Ok(Value::UInt32($input as u32));
 
                    } else {
 
                        KW_TYPE_UINT32_STR
 
                    },
 
                    CTP::UInt64 => if $input >= 0 && $input <= ((u64::MAX as $input_type) & <$input_type>::MAX) {
 
                        return Ok(Value::UInt64($input as u64));
 
                    } else {
 
                        KW_TYPE_UINT64_STR
 
                    },
 
                    CTP::SInt8 => if $input >= ((i8::MIN as $input_type) | <$input_type>::MIN) && $input <= ((i8::MAX as $input_type) & <$input_type>::MAX) {
 
                        return Ok(Value::SInt8($input as i8));
 
                    } else {
 
                        KW_TYPE_SINT8_STR
 
                    },
 
                    CTP::SInt16 => if $input >= ((i16::MIN as $input_type | <$input_type>::MIN)) && $input <= ((i16::MAX as $input_type) & <$input_type>::MAX) {
 
                        return Ok(Value::SInt16($input as i16));
 
                    } else {
 
                        KW_TYPE_SINT16_STR
 
                    },
 
                    CTP::SInt32 => if $input >= ((i32::MIN as $input_type | <$input_type>::MIN)) && $input <= ((i32::MAX as $input_type) & <$input_type>::MAX) {
 
                        return Ok(Value::SInt32($input as i32));
 
                    } else {
 
                        KW_TYPE_SINT32_STR
 
                    },
 
                    CTP::SInt64 => return Ok(Value::SInt64($input as i64)),
 
                    _ => unreachable!(),
 
                };
 

	
 
                return Err(format!("value is '{}' which doesn't fit in a type '{}'", $input, target_type_name));
 
            }
 
        }
 
    }
 

	
 
    // If here, then the types might still be equal, but at least we're dealing
 
    // with a simple integer/boolean/character input and output type.
 
    let subject = store.maybe_read_ref(subject);
 
    match subject {
 
        Value::Bool(val) => {
 
            match part {
 
                CTP::Bool => return Ok(Value::Bool(*val)),
 
                CTP::Character => return Ok(Value::Char(1 as char)),
 
                _ => unchecked_cast!(*val, part),
 
            }
 
        },
 
        Value::Char(val) => {
 
            match part {
 
                CTP::Bool => return Ok(Value::Bool(*val != 0 as char)),
 
                CTP::Character => return Ok(Value::Char(*val)),
 
                _ => unchecked_cast!(*val, part),
 
            }
 
        },
 
        Value::UInt8(val) => from_unsigned_cast!(*val, u8, part),
 
        Value::UInt16(val) => from_unsigned_cast!(*val, u16, part),
 
        Value::UInt32(val) => from_unsigned_cast!(*val, u32, part),
 
        Value::UInt64(val) => from_unsigned_cast!(*val, u64, part),
 
        Value::SInt8(val) => from_signed_cast!(*val, i8, part),
 
        Value::SInt16(val) => from_signed_cast!(*val, i16, part),
 
        Value::SInt32(val) => from_signed_cast!(*val, i32, part),
 
        Value::SInt64(val) => from_signed_cast!(*val, i64, part),
 
        _ => unreachable!("mismatch between 'cast' type checking and 'cast' evaluation"),
 
    }
 
}
 

	
 
/// Recursively checks for equality.
 
pub(crate) fn apply_equality_operator(store: &Store, lhs: &Value, rhs: &Value) -> bool {
 
    let lhs = store.maybe_read_ref(lhs);
 
    let rhs = store.maybe_read_ref(rhs);
 

	
 
    fn eval_equality_heap(store: &Store, lhs_pos: HeapPos, rhs_pos: HeapPos) -> bool {
 
        let lhs_vals = &store.heap_regions[lhs_pos as usize].values;
 
        let rhs_vals = &store.heap_regions[rhs_pos as usize].values;
 
        let lhs_len = lhs_vals.len();
 
        if lhs_len != rhs_vals.len() {
 
            return false;
 
        }
 

	
 
        for idx in 0..lhs_len {
 
            let lhs_val = &lhs_vals[idx];
 
            let rhs_val = &rhs_vals[idx];
 
            if !apply_equality_operator(store, lhs_val, rhs_val) {
 
                return false;
 
            }
 
        }
 

	
 
        return true;
 
    }
 

	
 
    match lhs {
 
        Value::Input(v) => *v == rhs.as_input(),
 
        Value::Output(v) => *v == rhs.as_output(),
 
        Value::Message(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_message()),
 
        Value::Null => todo!("remove null"),
 
        Value::Bool(v) => *v == rhs.as_bool(),
 
        Value::Char(v) => *v == rhs.as_char(),
 
        Value::String(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_string()),
 
        Value::UInt8(v) => *v == rhs.as_uint8(),
 
        Value::UInt16(v) => *v == rhs.as_uint16(),
 
        Value::UInt32(v) => *v == rhs.as_uint32(),
 
        Value::UInt64(v) => *v == rhs.as_uint64(),
 
        Value::SInt8(v) => *v == rhs.as_sint8(),
 
        Value::SInt16(v) => *v == rhs.as_sint16(),
 
        Value::SInt32(v) => *v == rhs.as_sint32(),
 
        Value::SInt64(v) => *v == rhs.as_sint64(),
 
        Value::Array(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_array()),
 
        Value::Enum(v) => *v == rhs.as_enum(),
 
        Value::Union(lhs_tag, lhs_pos) => {
 
            let (rhs_tag, rhs_pos) = rhs.as_union();
 
            if *lhs_tag != rhs_tag {
 
                return false;
 
            }
 
            eval_equality_heap(store, *lhs_pos, rhs_pos)
 
        },
 
        Value::Struct(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_struct()),
 
        _ => unreachable!("apply_equality_operator to lhs {:?}", lhs),
 
    }
 
}
 

	
 
/// Recursively checks for inequality
 
pub(crate) fn apply_inequality_operator(store: &Store, lhs: &Value, rhs: &Value) -> bool {
 
    let lhs = store.maybe_read_ref(lhs);
 
    let rhs = store.maybe_read_ref(rhs);
 

	
 
    fn eval_inequality_heap(store: &Store, lhs_pos: HeapPos, rhs_pos: HeapPos) -> bool {
 
        let lhs_vals = &store.heap_regions[lhs_pos as usize].values;
 
        let rhs_vals = &store.heap_regions[rhs_pos as usize].values;
 
        let lhs_len = lhs_vals.len();
 
        if lhs_len != rhs_vals.len() {
 
            return true;
 
        }
 

	
 
        for idx in 0..lhs_len {
 
            let lhs_val = &lhs_vals[idx];
 
            let rhs_val = &rhs_vals[idx];
 
            if apply_inequality_operator(store, lhs_val, rhs_val) {
 
                return true;
 
            }
 
        }
 

	
 
        return false;
 
    }
 

	
 
    match lhs {
 
        Value::Input(v) => *v != rhs.as_input(),
 
        Value::Output(v) => *v != rhs.as_output(),
 
        Value::Message(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_message()),
 
        Value::Null => todo!("remove null"),
 
        Value::Bool(v) => *v != rhs.as_bool(),
 
        Value::Char(v) => *v != rhs.as_char(),
 
        Value::String(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_string()),
 
        Value::UInt8(v) => *v != rhs.as_uint8(),
 
        Value::UInt16(v) => *v != rhs.as_uint16(),
 
        Value::UInt32(v) => *v != rhs.as_uint32(),
 
        Value::UInt64(v) => *v != rhs.as_uint64(),
 
        Value::SInt8(v) => *v != rhs.as_sint8(),
 
        Value::SInt16(v) => *v != rhs.as_sint16(),
 
        Value::SInt32(v) => *v != rhs.as_sint32(),
 
        Value::SInt64(v) => *v != rhs.as_sint64(),
 
        Value::Array(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_array()),
 
        Value::Enum(v) => *v != rhs.as_enum(),
 
        Value::Union(lhs_tag, lhs_pos) => {
 
            let (rhs_tag, rhs_pos) = rhs.as_union();
 
            if *lhs_tag != rhs_tag {
 
                return true;
 
            }
 
            eval_inequality_heap(store, *lhs_pos, rhs_pos)
 
        },
 
        Value::Struct(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_struct()),
 
        _ => unreachable!("apply_inequality_operator to lhs {:?}", lhs)
 
    }
 
}
 

	
 
/// Recursively applies binding operator. Essentially an equality operator with
 
/// special handling if the LHS contains a binding reference to a stack
 
/// stack variable.
 
// Note: that there is a lot of `Value.clone()` going on here. As always: this
 
// is potentially cloning the references to heap values, not actually cloning
 
// those heap regions into a new heap region.
 
pub(crate) fn apply_binding_operator(store: &mut Store, lhs: Value, rhs: Value) -> bool {
 
    let lhs = store.maybe_read_ref(&lhs).clone();
 
    let rhs = store.maybe_read_ref(&rhs).clone();
 

	
 
    fn eval_binding_heap(store: &mut Store, lhs_pos: HeapPos, rhs_pos: HeapPos) -> bool {
 
        let lhs_len = store.heap_regions[lhs_pos as usize].values.len();
 
        let rhs_len = store.heap_regions[rhs_pos as usize].values.len();
 
        if lhs_len != rhs_len {
 
            return false;
 
        }
 

	
 
        for idx in 0..lhs_len {
 
            // More rust shenanigans... I'm going to calm myself by saying that
 
            // this is just a temporary evaluator implementation.
 
            let lhs_val = store.heap_regions[lhs_pos as usize].values[idx].clone();
 
            let rhs_val = store.heap_regions[rhs_pos as usize].values[idx].clone();
 
            if !apply_binding_operator(store, lhs_val, rhs_val) {
 
                return false;
 
            }
 
        }
 

	
 
        return true;
 
    }
 

	
 
    match lhs {
 
        Value::Binding(var_pos) => {
 
            let to_write = store.clone_value(rhs.clone());
 
            store.write(ValueId::Stack(var_pos), to_write);
 
            return true;
 
        },
 
        Value::Input(v) => v == rhs.as_input(),
 
        Value::Output(v) => v == rhs.as_output(),
 
        Value::Message(lhs_pos) => eval_binding_heap(store, lhs_pos, rhs.as_message()),
 
        Value::Null => todo!("remove null"),
 
        Value::Bool(v) => v == rhs.as_bool(),
 
        Value::Char(v) => v == rhs.as_char(),
 
        Value::String(lhs_pos) => eval_binding_heap(store, lhs_pos, rhs.as_string()),
 
        Value::UInt8(v) => v == rhs.as_uint8(),
 
        Value::UInt16(v) => v == rhs.as_uint16(),
 
        Value::UInt32(v) => v == rhs.as_uint32(),
 
        Value::UInt64(v) => v == rhs.as_uint64(),
 
        Value::SInt8(v) => v == rhs.as_sint8(),
 
        Value::SInt16(v) => v == rhs.as_sint16(),
 
        Value::SInt32(v) => v == rhs.as_sint32(),
 
        Value::SInt64(v) => v == rhs.as_sint64(),
 
        Value::Array(lhs_pos) => eval_binding_heap(store, lhs_pos, rhs.as_array()),
 
        Value::Enum(v) => v == rhs.as_enum(),
 
        Value::Union(lhs_tag, lhs_pos) => {
 
            let (rhs_tag, rhs_pos) = rhs.as_union();
 
            if lhs_tag != rhs_tag {
 
                return false;
 
            }
 
            eval_binding_heap(store, lhs_pos, rhs_pos)
 
        },
 
        Value::Struct(lhs_pos) => eval_binding_heap(store, lhs_pos, rhs.as_struct()),
 
        _ => unreachable!("apply_binding_operator to lhs {:?}", lhs),
 
    }
 
}
 
\ No newline at end of file
src/protocol/mod.rs
Show inline comments
 
mod arena;
 
pub(crate) mod eval;
 
pub(crate) mod input_source;
 
mod parser;
 
#[cfg(test)] mod tests;
 

	
 
pub(crate) mod ast;
 
pub(crate) mod ast_printer;
 

	
 
use std::sync::Mutex;
 

	
 
use crate::collections::{StringPool, StringRef};
 
use crate::common::*;
 
use crate::protocol::ast::*;
 
use crate::protocol::eval::*;
 
use crate::protocol::input_source::*;
 
use crate::protocol::parser::*;
 
use crate::protocol::type_table::*;
 

	
 
/// A protocol description module
 
pub struct Module {
 
    pub(crate) source: InputSource,
 
    pub(crate) root_id: RootId,
 
    pub(crate) name: Option<StringRef<'static>>,
 
}
 
/// Description of a protocol object, used to configure new connectors.
 
#[repr(C)]
 
pub struct ProtocolDescription {
 
    pub(crate) modules: Vec<Module>,
 
    pub(crate) heap: Heap,
 
    pub(crate) types: TypeTable,
 
    pub(crate) pool: Mutex<StringPool>,
 
}
 
#[derive(Debug, Clone)]
 
pub(crate) struct ComponentState {
 
    pub(crate) prompt: Prompt,
 
}
 

	
 
#[allow(dead_code)]
 
pub(crate) enum EvalContext<'a> {
 
    Nonsync(&'a mut NonsyncProtoContext<'a>),
 
    Sync(&'a mut SyncProtoContext<'a>),
 
    None,
 
}
 
//////////////////////////////////////////////
 

	
 
#[derive(Debug)]
 
pub enum ComponentCreationError {
 
    ModuleDoesntExist,
 
    DefinitionDoesntExist,
 
    DefinitionNotComponent,
 
    InvalidNumArguments,
 
    InvalidArgumentType(usize),
 
    UnownedPort,
 
    InSync,
 
}
 

	
 
impl std::fmt::Debug for ProtocolDescription {
 
    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
 
        write!(f, "(An opaque protocol description)")
 
    }
 
}
 
impl ProtocolDescription {
 
    pub fn parse(buffer: &[u8]) -> Result<Self, String> {
 
        let source = InputSource::new(String::new(), Vec::from(buffer));
 
        let mut parser = Parser::new();
 
        parser.feed(source).expect("failed to feed source");
 
        
 
        if let Err(err) = parser.parse() {
 
            println!("ERROR:\n{}", err);
 
            return Err(format!("{}", err))
 
        }
 

	
 
        debug_assert_eq!(parser.modules.len(), 1, "only supporting one module here for now");
 
        let modules: Vec<Module> = parser.modules.into_iter()
 
            .map(|module| Module{
 
                source: module.source,
 
                root_id: module.root_id,
 
                name: module.name.map(|(_, name)| name)
 
            })
 
            .collect();
 

	
 
        return Ok(ProtocolDescription {
 
            modules,
 
            heap: parser.heap,
 
            types: parser.type_table,
 
            pool: Mutex::new(parser.string_pool),
 
        });
 
    }
 

	
 
    #[deprecated]
 
    pub(crate) fn component_polarities(
 
        &self,
 
        module_name: &[u8],
 
        identifier: &[u8],
 
    ) -> Result<Vec<Polarity>, AddComponentError> {
 
        use AddComponentError::*;
 

	
 
        let module_root = self.lookup_module_root(module_name);
 
        if module_root.is_none() {
 
            return Err(AddComponentError::NoSuchModule);
 
        }
 
        let module_root = module_root.unwrap();
 

	
 
        let root = &self.heap[module_root];
 
        let def = root.get_definition_ident(&self.heap, identifier);
 
        if def.is_none() {
 
            return Err(NoSuchComponent);
 
        }
 

	
 
        let def = &self.heap[def.unwrap()];
 
        if !def.is_component() {
 
            return Err(NoSuchComponent);
 
        }
 

	
 
        for &param in def.parameters().iter() {
 
            let param = &self.heap[param];
 
            let first_element = &param.parser_type.elements[0];
 

	
 
            match first_element.variant {
 
                ParserTypeVariant::Input | ParserTypeVariant::Output => continue,
 
                _ => {
 
                    return Err(NonPortTypeParameters);
 
                }
 
            }
 
        }
 

	
 
        let mut result = Vec::new();
 
        for &param in def.parameters().iter() {
 
            let param = &self.heap[param];
 
            let first_element = &param.parser_type.elements[0];
 

	
 
            if first_element.variant == ParserTypeVariant::Input {
 
                result.push(Polarity::Getter)
 
            } else if first_element.variant == ParserTypeVariant::Output {
 
                result.push(Polarity::Putter)
 
            } else {
 
                unreachable!()
 
            }
 
        }
 
        Ok(result)
 
    }
 

	
 
    // expects port polarities to be correct
 
    #[deprecated]
 
    pub(crate) fn new_component(&self, module_name: &[u8], identifier: &[u8], ports: &[PortId]) -> ComponentState {
 
        let mut args = Vec::new();
 
        for (&x, y) in ports.iter().zip(self.component_polarities(module_name, identifier).unwrap()) {
 
            match y {
 
                Polarity::Getter => args.push(Value::Input(x)),
 
                Polarity::Putter => args.push(Value::Output(x)),
 
            }
 
        }
 

	
 
        let module_root = self.lookup_module_root(module_name).unwrap();
 
        let root = &self.heap[module_root];
 
        let def = root.get_definition_ident(&self.heap, identifier).unwrap();
 

	
 
        ComponentState { prompt: Prompt::new(&self.types, &self.heap, def, 0, ValueGroup::new_stack(args)) }
 
    }
 

	
 
    // TODO: Ofcourse, rename this at some point, perhaps even remove it in its
 
    //  entirety. Find some way to interface with the parameter's types.
 
    pub(crate) fn new_component_v2(
 
    pub(crate) fn new_component(
 
        &self, module_name: &[u8], identifier: &[u8], arguments: ValueGroup
 
    ) -> Result<Prompt, ComponentCreationError> {
 
        // Find the module in which the definition can be found
 
        let module_root = self.lookup_module_root(module_name);
 
        if module_root.is_none() {
 
            return Err(ComponentCreationError::ModuleDoesntExist);
 
        }
 
        let module_root = module_root.unwrap();
 

	
 
        let root = &self.heap[module_root];
 
        let definition_id = root.get_definition_ident(&self.heap, identifier);
 
        if definition_id.is_none() {
 
            return Err(ComponentCreationError::DefinitionDoesntExist);
 
        }
 
        let definition_id = definition_id.unwrap();
 

	
 
        let definition = &self.heap[definition_id];
 
        if !definition.is_component() {
 
            return Err(ComponentCreationError::DefinitionNotComponent);
 
        }
 

	
 
        // Make sure that the types of the provided value group matches that of
 
        // the expected types.
 
        let definition = definition.as_component();
 
        if !definition.poly_vars.is_empty() {
 
            return Err(ComponentCreationError::DefinitionNotComponent);
 
        }
 

	
 
        // - check number of arguments
 
        let expr_data = self.types.get_procedure_expression_data(&definition_id, 0);
 
        if expr_data.arg_types.len() != arguments.values.len() {
 
            return Err(ComponentCreationError::InvalidNumArguments);
 
        }
 

	
 
        // - for each argument try to make sure the types match
 
        for arg_idx in 0..arguments.values.len() {
 
            let expected_type = &expr_data.arg_types[arg_idx];
 
            let provided_value = &arguments.values[arg_idx];
 
            if !self.verify_same_type(expected_type, 0, &arguments, provided_value) {
 
                return Err(ComponentCreationError::InvalidArgumentType(arg_idx));
 
            }
 
        }
 

	
 
        // By now we're sure that all of the arguments are correct. So create
 
        // the connector.
 
        return Ok(Prompt::new(&self.types, &self.heap, definition_id, 0, arguments));
 
    }
 

	
 
    fn lookup_module_root(&self, module_name: &[u8]) -> Option<RootId> {
 
        for module in self.modules.iter() {
 
            match &module.name {
 
                Some(name) => if name.as_bytes() == module_name {
 
                    return Some(module.root_id);
 
                },
 
                None => if module_name.is_empty() {
 
                    return Some(module.root_id);
 
                }
 
            }
 
        }
 

	
 
        return None;
 
    }
 

	
 
    fn verify_same_type(&self, expected: &ConcreteType, expected_idx: usize, arguments: &ValueGroup, argument: &Value) -> bool {
 
        use ConcreteTypePart as CTP;
 

	
 
        match &expected.parts[expected_idx] {
 
            CTP::Void | CTP::Message | CTP::Slice | CTP::Function(_, _) | CTP::Component(_, _) => unreachable!(),
 
            CTP::Bool => if let Value::Bool(_) = argument { true } else { false },
 
            CTP::UInt8 => if let Value::UInt8(_) = argument { true } else { false },
 
            CTP::UInt16 => if let Value::UInt16(_) = argument { true } else { false },
 
            CTP::UInt32 => if let Value::UInt32(_) = argument { true } else { false },
 
            CTP::UInt64 => if let Value::UInt64(_) = argument { true } else { false },
 
            CTP::SInt8 => if let Value::SInt8(_) = argument { true } else { false },
 
            CTP::SInt16 => if let Value::SInt16(_) = argument { true } else { false },
 
            CTP::SInt32 => if let Value::SInt32(_) = argument { true } else { false },
 
            CTP::SInt64 => if let Value::SInt64(_) = argument { true } else { false },
 
            CTP::Character => if let Value::Char(_) = argument { true } else { false },
 
            CTP::String => {
 
                // Match outer string type and embedded character types
 
                if let Value::String(heap_pos) = argument {
 
                    for element in &arguments.regions[*heap_pos as usize] {
 
                        if let Value::Char(_) = element {} else {
 
                            return false;
 
                        }
 
                    }
 
                } else {
 
                    return false;
 
                }
 

	
 
                return true;
 
            },
 
            CTP::Array => {
 
                if let Value::Array(heap_pos) = argument {
 
                    let heap_pos = *heap_pos;
 
                    for element in &arguments.regions[heap_pos as usize] {
 
                        if !self.verify_same_type(expected, expected_idx + 1, arguments, element) {
 
                            return false;
 
                        }
 
                    }
 
                    return true;
 
                } else {
 
                    return false;
 
                }
 
            },
 
            CTP::Input => if let Value::Input(_) = argument { true } else { false },
 
            CTP::Output => if let Value::Output(_) = argument { true } else { false },
 
            CTP::Instance(definition_id, _num_embedded) => {
 
                let definition = self.types.get_base_definition(definition_id).unwrap();
 
                match &definition.definition {
 
                    DefinedTypeVariant::Enum(definition) => {
 
                        if let Value::Enum(variant_value) = argument {
 
                            let is_valid = definition.variants.iter()
 
                                .any(|v| v.value == *variant_value);
 
                            return is_valid;
 
                        }
 
                    },
 
                    _ => todo!("implement full type checking on user-supplied arguments"),
 
                }
 

	
 
                return false;
 
            },
 
        }
 
    }
 
}
 

	
 
pub trait RunContext {
 
    fn performed_put(&mut self, port: PortId) -> bool;
 
    fn performed_get(&mut self, port: PortId) -> Option<ValueGroup>; // None if still waiting on message
 
    fn fires(&mut self, port: PortId) -> Option<Value>; // None if not yet branched
 
    fn performed_fork(&mut self) -> Option<bool>; // None if not yet forked
 
    fn created_channel(&mut self) -> Option<(Value, Value)>; // None if not yet prepared
 
}
 

	
 
#[derive(Debug)]
 
pub enum RunResult {
 
    // Can only occur outside sync blocks
 
    ComponentTerminated, // component has exited its procedure
 
    ComponentAtSyncStart,
 
    NewComponent(DefinitionId, i32, ValueGroup), // should also be possible inside sync
 
    NewChannel, // should also be possible inside sync
 
    // Can only occur inside sync blocks
 
    BranchInconsistent, // branch has inconsistent behaviour
 
    BranchMissingPortState(PortId), // branch doesn't know about port firing
 
    BranchGet(PortId), // branch hasn't received message on input port yet
 
    BranchAtSyncEnd,
 
    BranchFork,
 
    BranchPut(PortId, ValueGroup),
 
}
 

	
 
impl ComponentState {
 
    pub(crate) fn run(&mut self, ctx: &mut impl RunContext, pd: &ProtocolDescription) -> RunResult {
 
        use EvalContinuation as EC;
 
        use RunResult as RR;
 

	
 
        loop {
 
            let step_result = self.prompt.step(&pd.types, &pd.heap, &pd.modules, ctx);
 
            match step_result {
 
                Err(reason) => {
 
                    println!("Evaluation error:\n{}", reason);
 
                    todo!("proper error handling/bubbling up");
 
                },
 
                Ok(continuation) => match continuation {
 
                    EC::Stepping => continue,
 
                    EC::BranchInconsistent => return RR::BranchInconsistent,
 
                    EC::ComponentTerminated => return RR::ComponentTerminated,
 
                    EC::SyncBlockStart => return RR::ComponentAtSyncStart,
 
                    EC::SyncBlockEnd => return RR::BranchAtSyncEnd,
 
                    EC::NewComponent(definition_id, monomorph_idx, args) =>
 
                        return RR::NewComponent(definition_id, monomorph_idx, args),
 
                    EC::NewChannel =>
 
                        return RR::NewChannel,
 
                    EC::NewFork =>
 
                        return RR::BranchFork,
 
                    EC::BlockFires(port_id) => return RR::BranchMissingPortState(port_id),
 
                    EC::BlockGet(port_id) => return RR::BranchGet(port_id),
 
                    EC::Put(port_id, value_group) => {
 
                        return RR::BranchPut(port_id, value_group);
 
                    },
 
                }
 
            }
 
        }
 
    }
 
}
 

	
 
// TODO: @remove the old stuff
 
impl ComponentState {
 
    pub(crate) fn nonsync_run<'a: 'b, 'b>(
 
        &'a mut self,
 
        context: &'b mut NonsyncProtoContext<'b>,
 
        pd: &'a ProtocolDescription,
 
    ) -> NonsyncBlocker {
 
        let mut context = EvalContext::Nonsync(context);
 
        loop {
 
            let result = self.prompt.step(&pd.types, &pd.heap, &pd.modules, &mut context);
 
            match result {
 
                Err(err) => {
 
                    println!("Evaluation error:\n{}", err);
 
                    panic!("proper error handling when component fails");
 
                },
 
                Ok(cont) => match cont {
 
                    EvalContinuation::Stepping => continue,
 
                    EvalContinuation::BranchInconsistent => return NonsyncBlocker::Inconsistent,
 
                    EvalContinuation::ComponentTerminated => return NonsyncBlocker::ComponentExit,
 
                    EvalContinuation::SyncBlockStart => return NonsyncBlocker::SyncBlockStart,
 
                    // Not possible to end sync block if never entered one
 
                    EvalContinuation::SyncBlockEnd => unreachable!(),
 
                    EvalContinuation::NewComponent(definition_id, monomorph_idx, args) => {
 
                        // Look up definition
 
                        let mut moved_ports = HashSet::new();
 
                        for arg in args.values.iter() {
 
                            match arg {
 
                                Value::Output(port) => {
 
                                    moved_ports.insert(*port);
 
                                }
 
                                Value::Input(port) => {
 
                                    moved_ports.insert(*port);
 
                                }
 
                                _ => {}
 
                            }
 
                        }
 
                        for region in args.regions.iter() {
 
                            for arg in region {
 
                                match arg {
 
                                    Value::Output(port) => { moved_ports.insert(*port); },
 
                                    Value::Input(port) => { moved_ports.insert(*port); },
 
                                    _ => {},
 
                                }
 
                            }
 
                        }
 
                        let init_state = ComponentState { prompt: Prompt::new(&pd.types, &pd.heap, definition_id, monomorph_idx, args) };
 
                        context.new_component(moved_ports, init_state);
 
                        // Continue stepping
 
                        continue;
 
                    },
 
                    EvalContinuation::NewChannel => {
 
                        // Because of the way we emulate the old context for now, we can safely
 
                        // assume that this will never happen. The old context thingamajig always
 
                        // creates a channel, it never bubbles a "need to create a channel" message
 
                        // to the runtime
 
                        unreachable!();
 
                    },
 
                    EvalContinuation::NewFork => unreachable!(),
 
                    // Outside synchronous blocks, no fires/get/put happens
 
                    EvalContinuation::BlockFires(_) => unreachable!(),
 
                    EvalContinuation::BlockGet(_) => unreachable!(),
 
                    EvalContinuation::Put(_, _) => unreachable!(),
 
                },
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn sync_run<'a: 'b, 'b>(
 
        &'a mut self,
 
        context: &'b mut SyncProtoContext<'b>,
 
        pd: &'a ProtocolDescription,
 
    ) -> SyncBlocker {
 
        let mut context = EvalContext::Sync(context);
 
        loop {
 
            let result = self.prompt.step(&pd.types, &pd.heap, &pd.modules, &mut context);
 
            match result {
 
                Err(err) => {
 
                    println!("Evaluation error:\n{}", err);
 
                    panic!("proper error handling when component fails");
 
                },
 
                Ok(cont) => match cont {
 
                    EvalContinuation::Stepping => continue,
 
                    EvalContinuation::BranchInconsistent => return SyncBlocker::Inconsistent,
 
                    // First need to exit synchronous block before definition may end
 
                    EvalContinuation::ComponentTerminated => unreachable!(),
 
                    // No nested synchronous blocks
 
                    EvalContinuation::SyncBlockStart => unreachable!(),
 
                    EvalContinuation::SyncBlockEnd => return SyncBlocker::SyncBlockEnd,
 
                    // Not possible to create component in sync block
 
                    EvalContinuation::NewComponent(_, _, _) => unreachable!(),
 
                    EvalContinuation::NewChannel => unreachable!(),
 
                    EvalContinuation::NewFork => unreachable!(),
 
                    EvalContinuation::BlockFires(port) => {
 
                        return SyncBlocker::CouldntCheckFiring(port);
 
                    },
 
                    EvalContinuation::BlockGet(port) => {
 
                        return SyncBlocker::CouldntReadMsg(port);
 
                    },
 
                    EvalContinuation::Put(port, message) => {
 
                        let payload;
 

	
 
                        // Extract bytes from `put`
 
                        match &message.values[0] {
 
                            Value::Null => {
 
                                return SyncBlocker::Inconsistent;
 
                            },
 
                            Value::Message(heap_pos) => {
 
                                // Create a copy of the payload
 
                                let values = &message.regions[*heap_pos as usize];
 
                                let mut bytes = Vec::with_capacity(values.len());
 
                                for value in values {
 
                                    bytes.push(value.as_uint8());
 
                                }
 
                                payload = Payload(Arc::new(bytes));
 
                            }
 
                            _ => unreachable!(),
 
                        }
 
                        return SyncBlocker::PutMsg(port, payload);
 
                    }
 
                },
 
            }
 
        }
 
    }
 
}
 

	
 
impl RunContext for EvalContext<'_> {
 
    fn performed_put(&mut self, port: PortId) -> bool {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(_) => unreachable!(),
 
            EvalContext::Sync(ctx) => {
 
                ctx.did_put_or_get(port)
 
            }
 
        }
 
    }
 

	
 
    fn performed_get(&mut self, port: PortId) -> Option<ValueGroup> {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(_) => unreachable!(),
 
            EvalContext::Sync(ctx) => {
 
                let payload = ctx.read_msg(port);
 
                if payload.is_none() {
 
                    return None;
 
                }
 

	
 
                let payload = payload.unwrap();
 
                let mut transformed = Vec::with_capacity(payload.len());
 
                for byte in payload.0.iter() {
 
                    transformed.push(Value::UInt8(*byte));
 
                }
 

	
 
                let value_group = ValueGroup{
 
                    values: vec![Value::Message(0)],
 
                    regions: vec![transformed],
 
                };
 

	
 
                return Some(value_group);
 
            }
 
        }
 
    }
 

	
 
    fn fires(&mut self, port: PortId) -> Option<Value> {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(_) => unreachable!(),
 
            EvalContext::Sync(context) => {
 
                match context.is_firing(port) {
 
                    Some(did_fire) => Some(Value::Bool(did_fire)),
 
                    None => None,
 
                }
 
            }
 
        }
 
    }
 

	
 
    fn created_channel(&mut self) -> Option<(Value, Value)> {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(context) => {
 
                let [from, to] = context.new_port_pair();
 
                let from = Value::Output(from);
 
                let to = Value::Input(to);
 
                return Some((from, to));
 
            },
 
            EvalContext::Sync(_) => unreachable!(),
 
        }
 
    }
 

	
 
    fn performed_fork(&mut self) -> Option<bool> {
 
        // Never actually used in the old runtime
 
        return None;
 
    }
 
}
 

	
 
// TODO: @remove once old runtime has disappeared
 
impl EvalContext<'_> {
 
    fn new_component(&mut self, moved_ports: HashSet<PortId>, init_state: ComponentState) -> () {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(context) => {
 
                context.new_component(moved_ports, init_state)
 
            }
 
            EvalContext::Sync(_) => unreachable!(),
 
        }
 
    }
 
    fn new_channel(&mut self) -> [Value; 2] {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(context) => {
 
                let [from, to] = context.new_port_pair();
 
                let from = Value::Output(from);
 
                let to = Value::Input(to);
 
                return [from, to];
 
            }
 
            EvalContext::Sync(_) => unreachable!(),
 
        }
 
    }
 
    fn fires(&mut self, port: Value) -> Option<Value> {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(_) => unreachable!(),
 
            EvalContext::Sync(context) => match port {
 
                Value::Output(port) => context.is_firing(port).map(Value::Bool),
 
                Value::Input(port) => context.is_firing(port).map(Value::Bool),
 
                _ => unreachable!(),
 
            },
 
        }
 
    }
 
    fn get(&mut self, port: Value, store: &mut Store) -> Option<Value> {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(_) => unreachable!(),
 
            EvalContext::Sync(context) => match port {
 
                Value::Input(port) => {
 
                    let payload = context.read_msg(port);
 
                    if payload.is_none() { return None; }
 

	
 
                    let heap_pos = store.alloc_heap();
 
                    let heap_pos_usize = heap_pos as usize;
 
                    let payload = payload.unwrap();
 
                    store.heap_regions[heap_pos_usize].values.reserve(payload.0.len());
 
                    for value in payload.0.iter() {
 
                        store.heap_regions[heap_pos_usize].values.push(Value::UInt8(*value));
 
                    }
 

	
 
                    return Some(Value::Message(heap_pos));
 
                }
 
                _ => unreachable!(),
 
            },
 
        }
 
    }
 
    fn did_put(&mut self, port: Value) -> bool {
 
        match self {
 
            EvalContext::None => unreachable!("did_put in None context"),
 
            EvalContext::Nonsync(_) => unreachable!("did_put in nonsync context"),
 
            EvalContext::Sync(context) => match port {
 
                Value::Output(port) => {
 
                    context.did_put_or_get(port)
 
                },
 
                _ => unreachable!("did_put on non-output port value")
 
            }
 
        }
 
    }
 
}
src/runtime/branch.rs
Show inline comments
 
file renamed from src/runtime2/branch.rs to src/runtime/branch.rs
src/runtime/connector.rs
Show inline comments
 
file renamed from src/runtime2/connector.rs to src/runtime/connector.rs
 
// connector.rs
 
//
 
// Represents a component. A component (and the scheduler that is running it)
 
// has many properties that are not easy to subdivide into aspects that are
 
// conceptually handled by particular data structures. That is to say: the code
 
// that we run governs: running PDL code, keeping track of ports, instantiating
 
// new components and transports (i.e. interacting with the runtime), running
 
// a consensus algorithm, etc. But on the other hand, our data is rather
 
// simple: we have a speculative execution tree, a set of ports that we own,
 
// and a bit of code that we should run.
 
//
 
// So currently the code is organized as following:
 
// - The scheduler that is running the component is the authoritative source on
 
//     ports during *non-sync* mode. The consensus algorithm is the
 
//     authoritative source during *sync* mode. They retrieve each other's
 
//     state during the transitions. Hence port data exists duplicated between
 
//     these two datastructures.
 
// - The execution tree is where executed branches reside. But the execution
 
//     tree is only aware of the tree shape itself (and keeps track of some
 
//     queues of branches that are in a particular state), and tends to store
 
//     the PDL program state. The consensus algorithm is also somewhat aware
 
//     of the execution tree, but only in terms of what is needed to complete
 
//     a sync round (for now, that means the port mapping in each branch).
 
//     Hence once more we have properties conceptually associated with branches
 
//     in two places.
 
// - TODO: Write about handling messages, consensus wrapping data
 
// - TODO: Write about way information is exchanged between PDL/component and scheduler through ctx
 

	
 
use std::sync::atomic::AtomicBool;
 

	
 
use crate::{PortId, ProtocolDescription};
 
use crate::protocol::eval::{EvalContinuation, EvalError, Prompt, Value, ValueGroup};
 
use crate::ProtocolDescription;
 
use crate::protocol::eval::{EvalContinuation, EvalError, Prompt, Value, PortId, ValueGroup};
 
use crate::protocol::RunContext;
 

	
 
use super::branch::{BranchId, ExecTree, QueueKind, SpeculativeState, PreparedStatement};
 
use super::consensus::{Consensus, Consistency, RoundConclusion, find_ports_in_value_group};
 
use super::inbox::{DataMessage, Message, SyncCompMessage, SyncPortMessage, SyncControlMessage, PublicInbox};
 
use super::native::Connector;
 
use super::port::{PortKind, PortIdLocal};
 
use super::scheduler::{ComponentCtx, SchedulerCtx, MessageTicket};
 

	
 
pub(crate) struct ConnectorPublic {
 
    pub inbox: PublicInbox,
 
    pub sleeping: AtomicBool,
 
}
 

	
 
impl ConnectorPublic {
 
    pub fn new(initialize_as_sleeping: bool) -> Self {
 
        ConnectorPublic{
 
            inbox: PublicInbox::new(),
 
            sleeping: AtomicBool::new(initialize_as_sleeping),
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
 
enum Mode {
 
    NonSync,    // running non-sync code
 
    Sync,       // running sync code (in potentially multiple branches)
 
    SyncError,  // encountered an unrecoverable error in sync mode
 
    Error,      // encountered an error in non-sync mode (or finished handling the sync mode error).
 
}
 

	
 
#[derive(Debug)]
 
pub(crate) enum ConnectorScheduling {
 
    Immediate,          // Run again, immediately
 
    Later,              // Schedule for running, at some later point in time
 
    NotNow,             // Do not reschedule for running
 
    Exit,               // Connector has exited
 
}
 

	
 
pub(crate) struct ConnectorPDL {
 
    mode: Mode,
 
    eval_error: Option<EvalError>,
 
    tree: ExecTree,
 
    consensus: Consensus,
 
    last_finished_handled: Option<BranchId>,
 
}
 

	
 
struct ConnectorRunContext<'a> {
 
    branch_id: BranchId,
 
    consensus: &'a Consensus,
 
    prepared: PreparedStatement,
 
}
 

	
 
impl<'a> RunContext for ConnectorRunContext<'a>{
 
    fn performed_put(&mut self, _port: PortId) -> bool {
 
        return match self.prepared.take() {
 
            PreparedStatement::None => false,
 
            PreparedStatement::PerformedPut => true,
 
            taken => unreachable!("prepared statement is '{:?}' during 'performed_put()'", taken)
 
        };
 
    }
 

	
 
    fn performed_get(&mut self, _port: PortId) -> Option<ValueGroup> {
 
        return match self.prepared.take() {
 
            PreparedStatement::None => None,
 
            PreparedStatement::PerformedGet(value) => Some(value),
 
            taken => unreachable!("prepared statement is '{:?}' during 'performed_get()'", taken),
 
        };
 
    }
 

	
 
    fn fires(&mut self, port: PortId) -> Option<Value> {
 
        todo!("Remove fires() now");
 
        let port_id = PortIdLocal::new(port.0.u32_suffix);
 
        let port_id = PortIdLocal::new(port.id);
 
        let annotation = self.consensus.get_annotation(self.branch_id, port_id);
 
        return annotation.expected_firing.map(|v| Value::Bool(v));
 
    }
 

	
 
    fn created_channel(&mut self) -> Option<(Value, Value)> {
 
        return match self.prepared.take() {
 
            PreparedStatement::None => None,
 
            PreparedStatement::CreatedChannel(ports) => Some(ports),
 
            taken => unreachable!("prepared statement is '{:?}' during 'created_channel()'", taken),
 
        };
 
    }
 

	
 
    fn performed_fork(&mut self) -> Option<bool> {
 
        return match self.prepared.take() {
 
            PreparedStatement::None => None,
 
            PreparedStatement::ForkedExecution(path) => Some(path),
 
            taken => unreachable!("prepared statement is '{:?}' during 'performed_fork()'", taken),
 
        };
 
    }
 
}
 

	
 
impl Connector for ConnectorPDL {
 
    fn run(&mut self, sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        if let Some(scheduling) = self.handle_new_messages(comp_ctx) {
 
            return scheduling;
 
        }
 

	
 
        match self.mode {
 
            Mode::Sync => {
 
                // Run in sync mode
 
                let scheduling = self.run_in_sync_mode(sched_ctx, comp_ctx);
 

	
 
                // Handle any new finished branches
 
                let mut iter_id = self.last_finished_handled.or(self.tree.get_queue_first(QueueKind::FinishedSync));
 
                while let Some(branch_id) = iter_id {
 
                    iter_id = self.tree.get_queue_next(branch_id);
 
                    self.last_finished_handled = Some(branch_id);
 

	
 
                    if let Some(round_conclusion) = self.consensus.handle_new_finished_sync_branch(branch_id, comp_ctx) {
 
                        // Actually found a solution
 
                        return self.enter_non_sync_mode(round_conclusion, comp_ctx);
 
                    }
 

	
 
                    self.last_finished_handled = Some(branch_id);
 
                }
 

	
 
                return scheduling;
 
            },
 
            Mode::NonSync => {
 
                let scheduling = self.run_in_deterministic_mode(sched_ctx, comp_ctx);
 
                return scheduling;
 
            },
 
            Mode::SyncError => {
 
                let scheduling = self.run_in_sync_mode(sched_ctx, comp_ctx);
 
                return scheduling;
 
            },
 
            Mode::Error => {
 
                // This shouldn't really be called. Because when we reach exit
 
                // mode the scheduler should not run the component anymore
 
                unreachable!("called component run() during error-mode");
 
            },
 
        }
 
    }
 
}
 

	
 
impl ConnectorPDL {
 
    pub fn new(initial: Prompt) -> Self {
 
        Self{
 
            mode: Mode::NonSync,
 
            eval_error: None,
 
            tree: ExecTree::new(initial),
 
            consensus: Consensus::new(),
 
            last_finished_handled: None,
 
        }
 
    }
 

	
 
    // --- Handling messages
 

	
 
    pub fn handle_new_messages(&mut self, ctx: &mut ComponentCtx) -> Option<ConnectorScheduling> {
 
        while let Some(ticket) = ctx.get_next_message_ticket() {
 
            let message = ctx.read_message_using_ticket(ticket);
 
            let immediate_result = if let Message::Data(_) = message {
 
                self.handle_new_data_message(ticket, ctx);
 
                None
 
            } else {
 
                match ctx.take_message_using_ticket(ticket) {
 
                    Message::Data(_) => unreachable!(),
 
                    Message::SyncComp(message) => {
 
                        self.handle_new_sync_comp_message(message, ctx)
 
                    },
 
                    Message::SyncPort(message) => {
 
                        self.handle_new_sync_port_message(message, ctx);
 
                        None
 
                    },
 
                    Message::SyncControl(message) => {
 
                        self.handle_new_sync_control_message(message, ctx)
 
                    },
 
                    Message::Control(_) => unreachable!("control message in component"),
 
                }
 
            };
 

	
 
            if let Some(result) = immediate_result {
 
                return Some(result);
 
            }
 
        }
 

	
 
        return None;
 
    }
 

	
 
    pub fn handle_new_data_message(&mut self, ticket: MessageTicket, ctx: &mut ComponentCtx) {
 
        // Go through all branches that are awaiting new messages and see if
 
        // there is one that can receive this message.
 
        if !self.consensus.handle_new_data_message(ticket, ctx) {
 
            // Message should not be handled now
 
            return;
 
        }
 

	
 
        let message = ctx.read_message_using_ticket(ticket).as_data();
 
        let mut iter_id = self.tree.get_queue_first(QueueKind::AwaitingMessage);
 
        while let Some(branch_id) = iter_id {
 
            iter_id = self.tree.get_queue_next(branch_id);
 

	
 
            let branch = &self.tree[branch_id];
 
            if branch.awaiting_port != message.data_header.target_port { continue; }
 
            if !self.consensus.branch_can_receive(branch_id, &message) { continue; }
 

	
 
            // This branch can receive, so fork and given it the message
 
            let receiving_branch_id = self.tree.fork_branch(branch_id);
 
            self.consensus.notify_of_new_branch(branch_id, receiving_branch_id);
 
            let receiving_branch = &mut self.tree[receiving_branch_id];
 

	
 
            debug_assert!(receiving_branch.awaiting_port == message.data_header.target_port);
 
            receiving_branch.awaiting_port = PortIdLocal::new_invalid();
 
            receiving_branch.prepared = PreparedStatement::PerformedGet(message.content.clone());
 
            self.consensus.notify_of_received_message(receiving_branch_id, &message, ctx);
 

	
 
            // And prepare the branch for running
 
            self.tree.push_into_queue(QueueKind::Runnable, receiving_branch_id);
 
        }
 
    }
 

	
 
    pub fn handle_new_sync_comp_message(&mut self, message: SyncCompMessage, ctx: &mut ComponentCtx) -> Option<ConnectorScheduling> {
 
        println!("DEBUG: Actually really handling {:?}", message);
 
        if let Some(round_conclusion) = self.consensus.handle_new_sync_comp_message(message, ctx) {
 
            return Some(self.enter_non_sync_mode(round_conclusion, ctx));
 
        }
 

	
 
        return None;
 
    }
 

	
 
    pub fn handle_new_sync_port_message(&mut self, message: SyncPortMessage, ctx: &mut ComponentCtx) {
 
        self.consensus.handle_new_sync_port_message(message, ctx);
 
    }
 

	
 
    pub fn handle_new_sync_control_message(&mut self, message: SyncControlMessage, ctx: &mut ComponentCtx) -> Option<ConnectorScheduling> {
 
        if let Some(round_conclusion) = self.consensus.handle_new_sync_control_message(message, ctx) {
 
            return Some(self.enter_non_sync_mode(round_conclusion, ctx));
 
        }
 

	
 
        return None;
 
    }
 

	
 
    // --- Running code
 

	
 
    pub fn run_in_sync_mode(&mut self, sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        // Check if we have any branch that needs running
 
        debug_assert!(self.tree.is_in_sync() && self.consensus.is_in_sync());
 
        let branch_id = self.tree.pop_from_queue(QueueKind::Runnable);
 
        if branch_id.is_none() {
 
            return ConnectorScheduling::NotNow;
 
        }
 

	
 
        // Retrieve the branch and run it
 
        let branch_id = branch_id.unwrap();
 
        let branch = &mut self.tree[branch_id];
 

	
 
        let mut run_context = ConnectorRunContext{
 
            branch_id,
 
            consensus: &self.consensus,
 
            prepared: branch.prepared.take(),
 
        };
 

	
 
        let run_result = Self::run_prompt(&mut branch.code_state, &sched_ctx.runtime.protocol_description, &mut run_context);
 
        if let Err(eval_error) = run_result {
 
            self.eval_error = Some(eval_error);
 
            self.mode = Mode::SyncError;
 
            if let Some(conclusion) = self.consensus.notify_of_fatal_branch(branch_id, comp_ctx) {
 
                // We can exit immediately
 
                return self.enter_non_sync_mode(conclusion, comp_ctx);
 
            } else {
 
                // Current branch failed. But we may have other things that are
 
                // running.
 
                return ConnectorScheduling::Immediate;
 
            }
 
        }
 
        let run_result = run_result.unwrap();
 

	
 
        // Handle the returned result. Note that this match statement contains
 
        // explicit returns in case the run result requires that the component's
 
        // code is ran again immediately
 
        match run_result {
 
            EvalContinuation::BranchInconsistent => {
 
                // Branch became inconsistent
 
                branch.sync_state = SpeculativeState::Inconsistent;
 
            },
 
            EvalContinuation::BlockFires(port_id) => {
 
                // Branch called `fires()` on a port that has not been used yet.
 
                let port_id = PortIdLocal::new(port_id.0.u32_suffix);
 
                let port_id = PortIdLocal::new(port_id.id);
 

	
 
                // Create two forks, one that assumes the port will fire, and
 
                // one that assumes the port remains silent
 
                branch.sync_state = SpeculativeState::HaltedAtBranchPoint;
 

	
 
                let firing_branch_id = self.tree.fork_branch(branch_id);
 
                let silent_branch_id = self.tree.fork_branch(branch_id);
 
                self.consensus.notify_of_new_branch(branch_id, firing_branch_id);
 
                let _result = self.consensus.notify_of_speculative_mapping(firing_branch_id, port_id, true, comp_ctx);
 
                debug_assert_eq!(_result, Consistency::Valid);
 
                self.consensus.notify_of_new_branch(branch_id, silent_branch_id);
 
                let _result = self.consensus.notify_of_speculative_mapping(silent_branch_id, port_id, false, comp_ctx);
 
                debug_assert_eq!(_result, Consistency::Valid);
 

	
 
                // Somewhat important: we push the firing one first, such that
 
                // that branch is ran again immediately.
 
                self.tree.push_into_queue(QueueKind::Runnable, firing_branch_id);
 
                self.tree.push_into_queue(QueueKind::Runnable, silent_branch_id);
 

	
 
                return ConnectorScheduling::Immediate;
 
            },
 
            EvalContinuation::BlockGet(port_id) => {
 
                // Branch performed a `get()` on a port that does not have a
 
                // received message on that port.
 
                let port_id = PortIdLocal::new(port_id.0.u32_suffix);
 
                let port_id = PortIdLocal::new(port_id.id);
 

	
 
                branch.sync_state = SpeculativeState::HaltedAtBranchPoint;
 
                branch.awaiting_port = port_id;
 
                self.tree.push_into_queue(QueueKind::AwaitingMessage, branch_id);
 

	
 
                // Note: we only know that a branch is waiting on a message when
 
                // it reaches the `get` call. But we might have already received
 
                // a message that targets this branch, so check now.
 
                let mut any_message_received = false;
 
                for message in comp_ctx.get_read_data_messages(port_id) {
 
                    if self.consensus.branch_can_receive(branch_id, &message) {
 
                        // This branch can receive the message, so we do the
 
                        // fork-and-receive dance
 
                        let receiving_branch_id = self.tree.fork_branch(branch_id);
 
                        let branch = &mut self.tree[receiving_branch_id];
 
                        branch.awaiting_port = PortIdLocal::new_invalid();
 
                        branch.prepared = PreparedStatement::PerformedGet(message.content.clone());
 

	
 
                        self.consensus.notify_of_new_branch(branch_id, receiving_branch_id);
 
                        self.consensus.notify_of_received_message(receiving_branch_id, &message, comp_ctx);
 
                        self.tree.push_into_queue(QueueKind::Runnable, receiving_branch_id);
 

	
 
                        any_message_received = true;
 
                    }
 
                }
 

	
 
                if any_message_received {
 
                    return ConnectorScheduling::Immediate;
 
                }
 
            }
 
            EvalContinuation::SyncBlockEnd => {
 
                let consistency = self.consensus.notify_of_finished_branch(branch_id);
 
                if consistency == Consistency::Valid {
 
                    branch.sync_state = SpeculativeState::ReachedSyncEnd;
 
                    self.tree.push_into_queue(QueueKind::FinishedSync, branch_id);
 
                } else {
 
                    branch.sync_state = SpeculativeState::Inconsistent;
 
                }
 
            },
 
            EvalContinuation::NewFork => {
 
                // Like the `NewChannel` result. This means we're setting up
 
                // a branch and putting a marker inside the RunContext for the
 
                // next time we run the PDL code
 
                let left_id = branch_id;
 
                let right_id = self.tree.fork_branch(left_id);
 
                self.consensus.notify_of_new_branch(left_id, right_id);
 
                self.tree.push_into_queue(QueueKind::Runnable, left_id);
 
                self.tree.push_into_queue(QueueKind::Runnable, right_id);
 

	
 
                let left_branch = &mut self.tree[left_id];
 
                left_branch.prepared = PreparedStatement::ForkedExecution(true);
 
                let right_branch = &mut self.tree[right_id];
 
                right_branch.prepared = PreparedStatement::ForkedExecution(false);
 
            }
 
            EvalContinuation::Put(port_id, content) => {
 
                // Branch is attempting to send data
 
                let port_id = PortIdLocal::new(port_id.0.u32_suffix);
 
                let port_id = PortIdLocal::new(port_id.id);
 
                let (sync_header, data_header) = self.consensus.handle_message_to_send(branch_id, port_id, &content, comp_ctx);
 
                let message = DataMessage{ sync_header, data_header, content };
 
                match comp_ctx.submit_message(Message::Data(message)) {
 
                    Ok(_) => {
 
                        // Message is underway
 
                        branch.prepared = PreparedStatement::PerformedPut;
 
                        self.tree.push_into_queue(QueueKind::Runnable, branch_id);
 
                        return ConnectorScheduling::Immediate;
 
                    },
 
                    Err(_) => {
 
                        // We don't own the port
 
                        let pd = &sched_ctx.runtime.protocol_description;
 
                        let eval_error = branch.code_state.new_error_at_expr(
 
                            &pd.modules, &pd.heap,
 
                            String::from("attempted to 'put' on port that is no longer owned")
 
                        );
 
                        self.eval_error = Some(eval_error);
 
                        self.mode = Mode::SyncError;
 

	
 
                        println!("DEBUGERINO: Notify of fatal branch");
 
                        if let Some(conclusion) = self.consensus.notify_of_fatal_branch(branch_id, comp_ctx) {
 
                            println!("DEBUGERINO: Actually got {:?}", conclusion);
 
                            return self.enter_non_sync_mode(conclusion, comp_ctx);
 
                        }
 
                    }
 
                }
 
            },
 
            _ => unreachable!("unexpected run result {:?} in sync mode", run_result),
 
        }
 

	
 
        // If here then the run result did not require a particular action. We
 
        // return whether we have more active branches to run or not.
 
        if self.tree.queue_is_empty(QueueKind::Runnable) {
 
            return ConnectorScheduling::NotNow;
 
        } else {
 
            return ConnectorScheduling::Later;
 
        }
 
    }
 

	
 
    pub fn run_in_deterministic_mode(&mut self, sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        debug_assert!(!self.tree.is_in_sync() && !self.consensus.is_in_sync());
 

	
 
        let branch = self.tree.base_branch_mut();
 
        debug_assert!(branch.sync_state == SpeculativeState::RunningNonSync);
 

	
 
        let mut run_context = ConnectorRunContext{
 
            branch_id: branch.id,
 
            consensus: &self.consensus,
 
            prepared: branch.prepared.take(),
 
        };
 
        let run_result = Self::run_prompt(&mut branch.code_state, &sched_ctx.runtime.protocol_description, &mut run_context);
 
        if let Err(eval_error) = run_result {
 
            comp_ctx.push_error(eval_error);
 
            return ConnectorScheduling::Exit
 
        }
 
        let run_result = run_result.unwrap();
 

	
 
        match run_result {
 
            EvalContinuation::ComponentTerminated => {
 
                branch.sync_state = SpeculativeState::Finished;
 
                return ConnectorScheduling::Exit;
 
            },
 
            EvalContinuation::SyncBlockStart => {
 
                comp_ctx.notify_sync_start();
 
                let sync_branch_id = self.tree.start_sync();
 
                debug_assert!(self.last_finished_handled.is_none());
 
                self.consensus.start_sync(comp_ctx);
 
                self.consensus.notify_of_new_branch(BranchId::new_invalid(), sync_branch_id);
 
                self.tree.push_into_queue(QueueKind::Runnable, sync_branch_id);
 
                self.mode = Mode::Sync;
 

	
 
                return ConnectorScheduling::Immediate;
 
            },
 
            EvalContinuation::NewComponent(definition_id, monomorph_idx, arguments) => {
 
                // Note: we're relinquishing ownership of ports. But because
 
                // we are in non-sync mode the scheduler will handle and check
 
                // port ownership transfer.
 
                debug_assert!(comp_ctx.workspace_ports.is_empty());
 
                find_ports_in_value_group(&arguments, &mut comp_ctx.workspace_ports);
 

	
 
                let new_prompt = Prompt::new(
 
                    &sched_ctx.runtime.protocol_description.types,
 
                    &sched_ctx.runtime.protocol_description.heap,
 
                    definition_id, monomorph_idx, arguments
 
                );
 
                let new_component = ConnectorPDL::new(new_prompt);
 
                comp_ctx.push_component(new_component, comp_ctx.workspace_ports.clone());
 
                comp_ctx.workspace_ports.clear();
 

	
 
                return ConnectorScheduling::Later;
 
            },
 
            EvalContinuation::NewChannel => {
 
                let (getter, putter) = sched_ctx.runtime.create_channel(comp_ctx.id);
 
                debug_assert!(getter.kind == PortKind::Getter && putter.kind == PortKind::Putter);
 
                branch.prepared = PreparedStatement::CreatedChannel((
 
                    Value::Output(PortId::new(putter.self_id.index)),
 
                    Value::Input(PortId::new(getter.self_id.index)),
 
                ));
 

	
 
                comp_ctx.push_port(putter);
 
                comp_ctx.push_port(getter);
 

	
 
                return ConnectorScheduling::Immediate;
 
            },
 
            _ => unreachable!("unexpected run result '{:?}' while running in non-sync mode", run_result),
 
        }
 
    }
 

	
 
    /// Helper that moves the component's state back into non-sync mode, using
 
    /// the provided solution branch ID as the branch that should be comitted to
 
    /// memory. If this function returns false, then the component is supposed
 
    /// to exit.
 
    fn enter_non_sync_mode(&mut self, conclusion: RoundConclusion, ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        debug_assert!(self.mode == Mode::Sync || self.mode == Mode::SyncError);
 

	
 
        // Depending on local state decide what to do
 
        let final_branch_id = match conclusion {
 
            RoundConclusion::Success(branch_id) => Some(branch_id),
 
            RoundConclusion::Failure => None,
 
        };
 

	
 
        if let Some(solution_branch_id) = final_branch_id {
 
            let mut fake_vec = Vec::new();
 
            self.tree.end_sync(solution_branch_id);
 
            self.consensus.end_sync(solution_branch_id, &mut fake_vec);
 
            debug_assert!(fake_vec.is_empty());
 

	
 
            ctx.notify_sync_end(&[]);
 
            self.last_finished_handled = None;
 
            self.eval_error = None; // in case we came from the SyncError mode
 
            self.mode = Mode::NonSync;
 

	
 
            return ConnectorScheduling::Immediate;
 
        } else {
 
            // No final branch, because we're supposed to exit!
 
            self.last_finished_handled = None;
 
            self.mode = Mode::Error;
 
            if let Some(eval_error) = self.eval_error.take() {
 
                ctx.push_error(eval_error);
 
            }
 

	
 
            return ConnectorScheduling::Exit;
 
        }
 
    }
 

	
 
    /// Runs the prompt repeatedly until some kind of execution-blocking
 
    /// condition appears.
 
    #[inline]
 
    fn run_prompt(prompt: &mut Prompt, pd: &ProtocolDescription, ctx: &mut ConnectorRunContext) -> Result<EvalContinuation, EvalError> {
 
        loop {
 
            let result = prompt.step(&pd.types, &pd.heap, &pd.modules, ctx);
 
            if let Ok(EvalContinuation::Stepping) = result {
 
                continue;
 
            }
 

	
 
            return result;
 
        }
 
    }
 
}
 
\ No newline at end of file
src/runtime/consensus.rs
Show inline comments
 
file renamed from src/runtime2/consensus.rs to src/runtime/consensus.rs
 
use crate::collections::VecSet;
 

	
 
use crate::protocol::eval::ValueGroup;
 

	
 
use super::ConnectorId;
 
use super::branch::BranchId;
 
use super::port::{ChannelId, PortIdLocal, PortState};
 
use super::inbox::{
 
    Message, DataHeader, SyncHeader, ChannelAnnotation, BranchMarker,
 
    DataMessage,
 
    SyncCompMessage, SyncCompContent,
 
    SyncPortMessage, SyncPortContent,
 
    SyncControlMessage, SyncControlContent
 
};
 
use super::scheduler::{ComponentCtx, ComponentPortChange, MessageTicket};
 

	
 
struct BranchAnnotation {
 
    channel_mapping: Vec<ChannelAnnotation>,
 
    cur_marker: BranchMarker,
 
}
 

	
 
#[derive(Debug)]
 
pub(crate) struct LocalSolution {
 
    component: ConnectorId,
 
    final_branch_id: BranchId,
 
    sync_round_number: u32,
 
    port_mapping: Vec<(ChannelId, BranchMarker)>,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct GlobalSolution {
 
    component_branches: Vec<(ConnectorId, BranchId, u32)>,
 
    channel_mapping: Vec<(ChannelId, BranchMarker)>, // TODO: This can go, is debugging info
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
pub enum RoundConclusion {
 
    Failure,
 
    Success(BranchId),
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Consensus
 
// -----------------------------------------------------------------------------
 

	
 
#[derive(Debug)]
 
struct Peer {
 
    id: ConnectorId,
 
    encountered_this_round: bool,
 
    expected_sync_round: u32,
 
}
 

	
 
/// The consensus algorithm. Currently only implemented to find the component
 
/// with the highest ID within the sync region and letting it handle all the
 
/// local solutions.
 
///
 
/// The type itself serves as an experiment to see how code should be organized.
 
// TODO: Flatten all datastructures
 
// TODO: Have a "branch+port position hint" in case multiple operations are
 
//  performed on the same port to prevent repeated lookups
 
// TODO: A lot of stuff should be batched. Like checking all the sync headers
 
//  and sending "I have a higher ID" messages. Should reduce locking by quite a
 
//  bit.
 
// TODO: Needs a refactor. Firstly we have cases where we don't have a branch ID
 
//  but we do want to enumerate all current ports. So put that somewhere in a
 
//  central place. Secondly. Error handling and regular message handling is
 
//  becoming a mess.
 
pub(crate) struct Consensus {
 
    // --- State that is cleared after each round
 
    // Local component's state
 
    highest_connector_id: ConnectorId,
 
    branch_annotations: Vec<BranchAnnotation>, // index is branch ID
 
    branch_markers: Vec<BranchId>, // index is branch marker, maps to branch
 
    // Gathered state from communication
 
    encountered_ports: VecSet<PortIdLocal>, // to determine if we should send "port remains silent" messages.
 
    solution_combiner: SolutionCombiner,
 
    handled_wave: bool, // encountered notification wave in this round
 
    conclusion: Option<RoundConclusion>,
 
    ack_remaining: u32,
 
    // --- Persistent state
 
    peers: Vec<Peer>,
 
    sync_round: u32,
 
    // --- Workspaces
 
    workspace_ports: Vec<PortIdLocal>,
 
}
 

	
 
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
 
pub(crate) enum Consistency {
 
    Valid,
 
    Inconsistent,
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
pub(crate) enum MessageOrigin {
 
    Past,
 
    Present,
 
    Future
 
}
 

	
 
impl Consensus {
 
    pub fn new() -> Self {
 
        return Self {
 
            highest_connector_id: ConnectorId::new_invalid(),
 
            branch_annotations: Vec::new(),
 
            branch_markers: Vec::new(),
 
            encountered_ports: VecSet::new(),
 
            solution_combiner: SolutionCombiner::new(),
 
            handled_wave: false,
 
            conclusion: None,
 
            ack_remaining: 0,
 
            peers: Vec::new(),
 
            sync_round: 0,
 
            workspace_ports: Vec::new(),
 
        }
 
    }
 

	
 
    // --- Controlling sync round and branches
 

	
 
    /// Returns whether the consensus algorithm is running in sync mode
 
    pub fn is_in_sync(&self) -> bool {
 
        return !self.branch_annotations.is_empty();
 
    }
 

	
 
    #[deprecated]
 
    pub fn get_annotation(&self, branch_id: BranchId, channel_id: PortIdLocal) -> &ChannelAnnotation {
 
        let branch = &self.branch_annotations[branch_id.index as usize];
 
        let port = branch.channel_mapping.iter().find(|v| v.channel_id.index == channel_id.index).unwrap();
 
        return port;
 
    }
 

	
 
    /// Sets up the consensus algorithm for a new synchronous round. The
 
    /// provided ports should be the ports the component owns at the start of
 
    /// the sync round.
 
    pub fn start_sync(&mut self, ctx: &ComponentCtx) {
 
        debug_assert!(!self.highest_connector_id.is_valid());
 
        debug_assert!(self.branch_annotations.is_empty());
 
        debug_assert!(self.solution_combiner.local.is_empty());
 

	
 
        // We'll use the first "branch" (the non-sync one) to store our ports,
 
        // this allows cloning if we created a new branch.
 
        self.branch_annotations.push(BranchAnnotation{
 
            channel_mapping: ctx.get_ports().iter()
 
                .map(|v| ChannelAnnotation {
 
                    channel_id: v.channel_id,
 
                    registered_id: None,
 
                    expected_firing: None,
 
                })
 
                .collect(),
 
            cur_marker: BranchMarker::new_invalid(),
 
        });
 
        self.branch_markers.push(BranchId::new_invalid());
 

	
 
        self.highest_connector_id = ctx.id;
 

	
 
    }
 

	
 
    /// Notifies the consensus algorithm that a new branch has appeared. Must be
 
    /// called for each forked branch in the execution tree.
 
    pub fn notify_of_new_branch(&mut self, parent_branch_id: BranchId, new_branch_id: BranchId) {
 
        // If called correctly. Then each time we are notified the new branch's
 
        // index is the length in `branch_annotations`.
 
        debug_assert!(self.branch_annotations.len() == new_branch_id.index as usize);
 
        let parent_branch_annotations = &self.branch_annotations[parent_branch_id.index as usize];
 
        let new_marker = BranchMarker::new(self.branch_markers.len() as u32);
 
        let new_branch_annotations = BranchAnnotation{
 
            channel_mapping: parent_branch_annotations.channel_mapping.clone(),
 
            cur_marker: new_marker,
 
        };
 
        self.branch_annotations.push(new_branch_annotations);
 
        self.branch_markers.push(new_branch_id);
 
    }
 

	
 
    /// Notifies the consensus algorithm that a particular branch has
 
    /// encountered an unrecoverable error.
 
    pub fn notify_of_fatal_branch(&mut self, failed_branch_id: BranchId, ctx: &mut ComponentCtx) -> Option<RoundConclusion> {
 
        debug_assert!(self.is_in_sync());
 

	
 
        // Check for trivial case, where branch has not yet communicated within
 
        // the consensus algorithm
 
        let branch = &self.branch_annotations[failed_branch_id.index as usize];
 
        if branch.channel_mapping.iter().all(|v| v.registered_id.is_none()) {
 
            println!("DEBUG: Failure everything silent");
 
            return Some(RoundConclusion::Failure);
 
        }
 

	
 
        // We're not in the trivial case: since we've communicated we need to
 
        // let everyone know that this round is probably not going to end well.
 
        return self.initiate_sync_failure(ctx);
 
    }
 

	
 
    /// Notifies the consensus algorithm that a branch has reached the end of
 
    /// the sync block. A final check for consistency will be performed that the
 
    /// caller has to handle. Note that
 
    pub fn notify_of_finished_branch(&self, branch_id: BranchId) -> Consistency {
 
        debug_assert!(self.is_in_sync());
 
        let branch = &self.branch_annotations[branch_id.index as usize];
 
        for mapping in &branch.channel_mapping {
 
            match mapping.expected_firing {
 
                Some(expected) => {
 
                    if expected != mapping.registered_id.is_some() {
 
                        // Inconsistent speculative state and actual state
 
                        debug_assert!(mapping.registered_id.is_none()); // because if we did fire on a silent port, we should've caught that earlier
 
                        return Consistency::Inconsistent;
 
                    }
 
                },
 
                None => {},
 
            }
 
        }
 

	
 
        return Consistency::Valid;
 
    }
 

	
 
    /// Notifies the consensus algorithm that a particular branch has assumed
 
    /// a speculative value for its port mapping.
 
    pub fn notify_of_speculative_mapping(&mut self, branch_id: BranchId, port_id: PortIdLocal, does_fire: bool, ctx: &ComponentCtx) -> Consistency {
 
        debug_assert!(self.is_in_sync());
 

	
 
        let port_desc = ctx.get_port_by_id(port_id).unwrap();
 
        let channel_id = port_desc.channel_id;
 
        let branch = &mut self.branch_annotations[branch_id.index as usize];
 
        for mapping in &mut branch.channel_mapping {
 
            if mapping.channel_id == channel_id {
 
                match mapping.expected_firing {
 
                    None => {
 
                        // Not yet mapped, perform speculative mapping
 
                        mapping.expected_firing = Some(does_fire);
 
                        return Consistency::Valid;
 
                    },
 
                    Some(current) => {
 
                        // Already mapped
 
                        if current == does_fire {
 
                            return Consistency::Valid;
 
                        } else {
 
                            return Consistency::Inconsistent;
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        unreachable!("notify_of_speculative_mapping called with unowned port");
 
    }
 

	
 
    /// Generates a new local solution from a finished branch. If the component
 
    /// is not the leader of the sync region then it will be sent to the
 
    /// appropriate component. If it is the leader then there is a chance that
 
    /// this solution completes a global solution. In that case the solution
 
    /// branch ID will be returned.
 
    pub(crate) fn handle_new_finished_sync_branch(&mut self, branch_id: BranchId, ctx: &mut ComponentCtx) -> Option<RoundConclusion> {
 
        // Turn the port mapping into a local solution
 
        let source_mapping = &self.branch_annotations[branch_id.index as usize].channel_mapping;
 
        let mut target_mapping = Vec::with_capacity(source_mapping.len());
 

	
 
        for port in source_mapping {
 
            // Note: if the port is silent, and we've never communicated
 
            // over the port, then we need to do so now, to let the peer
 
            // component know about our sync leader state.
 
            let port_desc = ctx.get_port_by_channel_id(port.channel_id).unwrap();
 
            let self_port_id = port_desc.self_id;
 
            let peer_port_id = port_desc.peer_id;
 
            let channel_id = port_desc.channel_id;
 

	
 
            if !self.encountered_ports.contains(&self_port_id) {
 
                let message = SyncPortMessage {
 
                    sync_header: SyncHeader{
 
                        sending_component_id: ctx.id,
 
                        highest_component_id: self.highest_connector_id,
 
                        sync_round: self.sync_round
 
                    },
 
                    source_port: self_port_id,
 
                    target_port: peer_port_id,
 
                    content: SyncPortContent::SilentPortNotification,
 
                };
 
                match ctx.submit_message(Message::SyncPort(message)) {
 
                    Ok(_) => {
 
                        self.encountered_ports.push(self_port_id);
 
                    },
 
                    Err(_) => {
 
                        // Seems like we were done with this branch, but one of
 
                        // the silent ports (in scope) is actually closed
 
                        return self.notify_of_fatal_branch(branch_id, ctx);
 
                    }
 
                }
 
            }
 

	
 
            target_mapping.push((
 
                channel_id,
 
                port.registered_id.unwrap_or(BranchMarker::new_invalid())
 
            ));
 
        }
 

	
 
        let local_solution = LocalSolution{
 
            component: ctx.id,
 
            sync_round_number: self.sync_round,
 
            final_branch_id: branch_id,
 
            port_mapping: target_mapping,
 
        };
 
        let maybe_conclusion = self.send_to_leader_or_handle_as_leader(SyncCompContent::LocalSolution(local_solution), ctx);
 
        return maybe_conclusion;
 
    }
 

	
 
    /// Notifies the consensus algorithm about the chosen branch to commit to
 
    /// memory (may be the invalid "start" branch)
 
    pub fn end_sync(&mut self, branch_id: BranchId, final_ports: &mut Vec<ComponentPortChange>) {
 
        debug_assert!(self.is_in_sync());
 

	
 
        // TODO: Handle sending and receiving ports
 
        // Set final ports
 
        let branch = &self.branch_annotations[branch_id.index as usize];
 

	
 
        // Clear out internal storage to defaults
 
        println!("DEBUG: ***** Incrementing sync round stuff");
 
        self.highest_connector_id = ConnectorId::new_invalid();
 
        self.branch_annotations.clear();
 
        self.branch_markers.clear();
 
        self.encountered_ports.clear();
 
        self.solution_combiner.clear();
 
        self.handled_wave = false;
 
        self.conclusion = None;
 
        self.ack_remaining = 0;
 

	
 
        // And modify persistent storage
 
        self.sync_round += 1;
 

	
 
        for peer in self.peers.iter_mut() {
 
            peer.encountered_this_round = false;
 
            peer.expected_sync_round += 1;
 
        }
 

	
 
        println!("DEBUG: ***** Peers post round are:\n{:#?}", &self.peers)
 
    }
 

	
 
    // --- Handling messages
 

	
 
    /// Prepares a message for sending. Caller should have made sure that
 
    /// sending the message is consistent with the speculative state.
 
    pub fn handle_message_to_send(&mut self, branch_id: BranchId, source_port_id: PortIdLocal, content: &ValueGroup, ctx: &mut ComponentCtx) -> (SyncHeader, DataHeader) {
 
        debug_assert!(self.is_in_sync());
 
        let branch = &mut self.branch_annotations[branch_id.index as usize];
 
        let port_info = ctx.get_port_by_id(source_port_id).unwrap();
 

	
 
        if cfg!(debug_assertions) {
 
            // Check for consistent mapping
 
            let port = branch.channel_mapping.iter()
 
                .find(|v| v.channel_id == port_info.channel_id)
 
                .unwrap();
 
            debug_assert!(port.expected_firing == None || port.expected_firing == Some(true));
 
        }
 

	
 
        // Check for ports that are being sent
 
        debug_assert!(self.workspace_ports.is_empty());
 
        find_ports_in_value_group(content, &mut self.workspace_ports);
 
        if !self.workspace_ports.is_empty() {
 
            todo!("handle sending ports");
 
            self.workspace_ports.clear();
 
        }
 

	
 
        // Construct data header
 
        let data_header = DataHeader{
 
            expected_mapping: branch.channel_mapping.iter()
 
                .filter(|v| v.registered_id.is_some() || v.channel_id == port_info.channel_id)
 
                .copied()
 
                .collect(),
 
            sending_port: port_info.self_id,
 
            target_port: port_info.peer_id,
 
            new_mapping: branch.cur_marker,
 
        };
 

	
 
        // Update port mapping
 
        for mapping in &mut branch.channel_mapping {
 
            if mapping.channel_id == port_info.channel_id {
 
                mapping.expected_firing = Some(true);
 
                mapping.registered_id = Some(branch.cur_marker);
 
            }
 
        }
 

	
 
        // Update branch marker
 
        let new_marker = BranchMarker::new(self.branch_markers.len() as u32);
 
        branch.cur_marker = new_marker;
 
        self.branch_markers.push(branch_id);
 

	
 
        self.encountered_ports.push(source_port_id);
 

	
 
        return (self.create_sync_header(ctx), data_header);
 
    }
 

	
 
    /// Handles a new data message by handling the sync header. The caller is
 
    /// responsible for checking for branches that might be able to receive
 
    /// the message.
 
    pub fn handle_new_data_message(&mut self, ticket: MessageTicket, ctx: &mut ComponentCtx) -> bool {
 
        let message = ctx.read_message_using_ticket(ticket).as_data();
 
        let target_port = message.data_header.target_port;
 
        match self.handle_received_sync_header(message.sync_header, ctx) {
 
            MessageOrigin::Past => return false,
 
            MessageOrigin::Present => {
 
                self.encountered_ports.push(target_port);
 
                return true;
 
            },
 
            MessageOrigin::Future => {
 
                let message = ctx.take_message_using_ticket(ticket);
 
                ctx.put_back_message(message);
 
                return false;
 
            }
 
        }
 
    }
 

	
 
    /// Handles a new sync message by handling the sync header and the contents
 
    /// of the message. Returns `Some` with the branch ID of the global solution
 
    /// if the sync solution has been found.
 
    pub fn handle_new_sync_comp_message(&mut self, message: SyncCompMessage, ctx: &mut ComponentCtx) -> Option<RoundConclusion> {
 
        match self.handle_received_sync_header(message.sync_header, ctx) {
 
            MessageOrigin::Past => return None,
 
            MessageOrigin::Present => {},
 
            MessageOrigin::Future => {
 
                ctx.put_back_message(Message::SyncComp(message));
 
                return None
 
            }
 
        }
 

	
 
        // And handle the contents
 
        debug_assert_eq!(message.target_component_id, ctx.id);
 

	
 
        match &message.content {
 
            SyncCompContent::LocalFailure |
 
            SyncCompContent::LocalSolution(_) |
 
            SyncCompContent::PartialSolution(_) |
 
            SyncCompContent::AckFailure |
 
            SyncCompContent::Presence(_) => {
 
                // Needs to be handled by the leader
 
                return self.send_to_leader_or_handle_as_leader(message.content, ctx);
 
            },
 
            SyncCompContent::GlobalSolution(solution) => {
 
                // Found a global solution
 
                debug_assert_ne!(self.highest_connector_id, ctx.id); // not the leader
 
                let (_, branch_id, _) = solution.component_branches.iter()
 
                    .find(|(component_id, _, _)| *component_id == ctx.id)
 
                    .unwrap();
 
                return Some(RoundConclusion::Success(*branch_id));
 
            },
 
            SyncCompContent::GlobalFailure => {
 
                // Global failure of round, send Ack to leader
 
                println!("DEBUGERINO: Got GlobalFailure, sending Ack in response");
 
                debug_assert_ne!(self.highest_connector_id, ctx.id); // not the leader
 
                let _result = self.send_to_leader_or_handle_as_leader(SyncCompContent::AckFailure, ctx);
 
                debug_assert!(_result.is_none());
 
                return Some(RoundConclusion::Failure);
 
            },
 
            SyncCompContent::Notification => {
 
                // We were just interested in the sync header we handled above
 
                return None;
 
            }
 
        }
 
    }
 

	
 
    pub fn handle_new_sync_port_message(&mut self, message: SyncPortMessage, ctx: &mut ComponentCtx) -> Option<RoundConclusion> {
 
        match self.handle_received_sync_header(message.sync_header, ctx) {
 
            MessageOrigin::Past => return None,
 
            MessageOrigin::Present => {},
 
            MessageOrigin::Future => {
 
                ctx.put_back_message(Message::SyncPort(message));
 
                return None;
 
            }
 
        }
 

	
 
        debug_assert!(self.is_in_sync());
 
        debug_assert!(ctx.get_port_by_id(message.target_port).is_some());
 
        match message.content {
 
            SyncPortContent::SilentPortNotification => {
 
                // The point here is to let us become part of the sync round and
 
                // take note of the leader in case all of our ports are silent.
 
                self.encountered_ports.push(message.target_port);
 
                return None
 
            }
 
            SyncPortContent::NotificationWave => {
 
                // Wave to discover everyone in the network, handling sync
 
                // header takes care of leader discovery, here we need to make
 
                // sure we propagate the wave
 
                if self.handled_wave {
 
                    return None;
 
                }
 

	
 
                self.handled_wave = true;
 

	
 
                // Propagate wave to all peers except the one that has sent us
 
                // the wave.
 
                for mapping in &self.branch_annotations[0].channel_mapping {
 
                    let channel_id = mapping.channel_id;
 
                    let port_desc = ctx.get_port_by_channel_id(channel_id).unwrap();
 
                    if port_desc.self_id == message.target_port {
 
                        // Wave came from this port, no need to send one back
 
                        continue;
 
                    }
 

	
 
                    let message = SyncPortMessage{
 
                        sync_header: self.create_sync_header(ctx),
 
                        source_port: port_desc.self_id,
 
                        target_port: port_desc.peer_id,
 
                        content: SyncPortContent::NotificationWave,
 
                    };
 
                    // As with the other SyncPort where we throw away the
 
                    // result: we're dealing with an error here anyway
 
                    let _unused = ctx.submit_message(Message::SyncPort(message));
 
                }
 

	
 
                // And let the leader know about our port state
 
                let annotations = &self.branch_annotations[0];
 
                let mut channels = Vec::with_capacity(annotations.channel_mapping.len());
 
                for mapping in &annotations.channel_mapping {
 
                    let port_info = ctx.get_port_by_channel_id(mapping.channel_id).unwrap();
 
                    channels.push(LocalChannelPresence{
 
                        channel_id: mapping.channel_id,
 
                        is_closed: port_info.state == PortState::Closed,
 
                    });
 
                }
 

	
 
                let maybe_conclusion = self.send_to_leader_or_handle_as_leader(SyncCompContent::Presence(ComponentPresence{
 
                    component_id: ctx.id,
 
                    channels,
 
                }), ctx);
 
                return maybe_conclusion;
 
            }
 
        }
 
    }
 

	
 
    pub fn handle_new_sync_control_message(&mut self, message: SyncControlMessage, ctx: &mut ComponentCtx) -> Option<RoundConclusion> {
 
        if message.in_response_to_sync_round < self.sync_round {
 
            // Old message
 
            return None
 
        }
 

	
 
        // Because the message is always sent in response to a message
 
        // originating here, the sync round number can never be larger than the
 
        // currently stored one.
 
        debug_assert_eq!(message.in_response_to_sync_round, self.sync_round);
 
        match message.content {
 
            SyncControlContent::ChannelIsClosed(_) => {
 
                return self.initiate_sync_failure(ctx);
 
            }
 
        }
 
    }
 

	
 
    pub fn notify_of_received_message(&mut self, branch_id: BranchId, message: &DataMessage, ctx: &ComponentCtx) {
 
        debug_assert!(self.branch_can_receive(branch_id, message));
 

	
 
        let target_port = ctx.get_port_by_id(message.data_header.target_port).unwrap();
 
        let branch = &mut self.branch_annotations[branch_id.index as usize];
 
        for mapping in &mut branch.channel_mapping {
 
            if mapping.channel_id == target_port.channel_id {
 
                // Found the port in which the message should be inserted
 
                mapping.registered_id = Some(message.data_header.new_mapping);
 

	
 
                // Check for sent ports
 
                debug_assert!(self.workspace_ports.is_empty());
 
                find_ports_in_value_group(&message.content, &mut self.workspace_ports);
 
                if !self.workspace_ports.is_empty() {
 
                    todo!("handle received ports");
 
                    self.workspace_ports.clear();
 
                }
 

	
 
                return;
 
            }
 
        }
 

	
 
        // If here, then the branch didn't actually own the port? Means the
 
        // caller made a mistake
 
        unreachable!("incorrect notify_of_received_message");
 
    }
 

	
 
    /// Matches the mapping between the branch and the data message. If they
 
    /// match then the branch can receive the message.
 
    pub fn branch_can_receive(&self, branch_id: BranchId, message: &DataMessage) -> bool {
 
        if let Some(peer) = self.peers.iter().find(|v| v.id == message.sync_header.sending_component_id) {
 
            if message.sync_header.sync_round < peer.expected_sync_round {
 
                return false;
 
            }
 
        }
 

	
 
        let annotation = &self.branch_annotations[branch_id.index as usize];
 
        for expected in &message.data_header.expected_mapping {
 
            // If we own the port, then we have an entry in the
 
            // annotation, check if the current mapping matches
 
            for current in &annotation.channel_mapping {
 
                if expected.channel_id == current.channel_id {
 
                    if expected.registered_id != current.registered_id {
 
                        // IDs do not match, we cannot receive the
 
                        // message in this branch
 
                        return false;
 
                    }
 
                }
 
            }
 
        }
 

	
 
        return true;
 
    }
 

	
 
    // --- Internal helpers
 

	
 
    fn handle_received_sync_header(&mut self, sync_header: SyncHeader, ctx: &mut ComponentCtx) -> MessageOrigin {
 
        debug_assert!(sync_header.sending_component_id != ctx.id); // not sending to ourselves
 
        let origin = self.handle_peer(&sync_header);
 
        println!(" ********************** GOT {:?}", origin);
 
        if origin != MessageOrigin::Present {
 
            // We do not have to handle it now
 
            return origin;
 
        }
 

	
 
        if sync_header.highest_component_id > self.highest_connector_id {
 
            // Sender has higher component ID. So should be the target of our
 
            // messages. We should also let all of our peers know
 
            self.highest_connector_id = sync_header.highest_component_id;
 
            for peer in self.peers.iter() {
 
                if peer.id == sync_header.sending_component_id || !peer.encountered_this_round {
 
                    // Don't need to send it to this one
 
                    continue
 
                }
 

	
 
                let message = SyncCompMessage {
 
                    sync_header: self.create_sync_header(ctx),
 
                    target_component_id: peer.id,
 
                    content: SyncCompContent::Notification,
 
                };
 
                ctx.submit_message(Message::SyncComp(message)).unwrap(); // unwrap: sending to component instead of through channel
 
            }
 

	
 
            // But also send our locally combined solution
 
            self.forward_local_data_to_new_leader(ctx);
 
        } else if sync_header.highest_component_id < self.highest_connector_id {
 
            // Sender has lower leader ID, so it should know about our higher
 
            // one.
 
            let message = SyncCompMessage {
 
                sync_header: self.create_sync_header(ctx),
 
                target_component_id: sync_header.sending_component_id,
 
                content: SyncCompContent::Notification
 
            };
 
            ctx.submit_message(Message::SyncComp(message)).unwrap(); // unwrap: sending to component instead of through channel
 
        } // else: exactly equal, so do nothing
 

	
 
        return MessageOrigin::Present;
 
    }
 

	
 
    /// Handles a (potentially new) peer. Returns `false` if the provided sync
 
    /// number is different then the expected one.
 
    fn handle_peer(&mut self, sync_header: &SyncHeader) -> MessageOrigin {
 
        let position = self.peers.iter().position(|v| v.id == sync_header.sending_component_id);
 
        match position {
 
            Some(index) => {
 
                let entry = &mut self.peers[index];
 
                if entry.encountered_this_round {
 
                    // Already encountered this round
 
                    if sync_header.sync_round < entry.expected_sync_round {
 
                        return MessageOrigin::Past;
 
                    } else if sync_header.sync_round == entry.expected_sync_round {
 
                        return MessageOrigin::Present;
 
                    } else {
 
                        return MessageOrigin::Future;
 
                    }
 
                } else {
 
                    // TODO: Proper handling of potential overflow
 
                    entry.encountered_this_round = true;
 

	
 
                    if sync_header.sync_round >= entry.expected_sync_round {
 
                        entry.expected_sync_round = sync_header.sync_round;
 
                        return MessageOrigin::Present;
 
                    } else {
 
                        return MessageOrigin::Past;
 
                    }
 
                }
 
            },
 
            None => {
 
                self.peers.push(Peer{
 
                    id: sync_header.sending_component_id,
 
                    encountered_this_round: true,
 
                    expected_sync_round: sync_header.sync_round,
 
                });
 
                return MessageOrigin::Present;
 
            }
 
        }
 
    }
 

	
 
    /// Sends a message towards the leader, if already the leader then the
 
    /// message will be handled immediately.
 
    fn send_to_leader_or_handle_as_leader(&mut self, content: SyncCompContent, ctx: &mut ComponentCtx) -> Option<RoundConclusion> {
 
        if self.highest_connector_id == ctx.id {
 
            // We are the leader
 
            match content {
 
                SyncCompContent::LocalFailure => {
 
                    if self.solution_combiner.mark_failure_and_check_for_global_failure() {
 
                        return self.handle_global_failure_as_leader(ctx);
 
                    }
 
                },
 
                SyncCompContent::LocalSolution(local_solution) => {
 
                    if let Some(global_solution) = self.solution_combiner.add_solution_and_check_for_global_solution(local_solution) {
 
                        return self.handle_global_solution_as_leader(global_solution, ctx);
 
                    }
 
                },
 
                SyncCompContent::PartialSolution(partial_solution) => {
 
                    if let Some(conclusion) = self.solution_combiner.combine(partial_solution) {
 
                        match conclusion {
 
                            LeaderConclusion::Solution(global_solution) => {
 
                                return self.handle_global_solution_as_leader(global_solution, ctx);
 
                            },
 
                            LeaderConclusion::Failure => {
 
                                return self.handle_global_failure_as_leader(ctx);
 
                            }
 
                        }
 
                    }
 
                },
 
                SyncCompContent::Presence(component_presence) => {
 
                    if self.solution_combiner.add_presence_and_check_for_global_failure(component_presence.component_id, &component_presence.channels) {
 
                        return self.handle_global_failure_as_leader(ctx);
 
                    }
 
                },
 
                SyncCompContent::AckFailure => {
 
                    debug_assert_eq!(Some(RoundConclusion::Failure), self.conclusion);
 
                    debug_assert!(self.ack_remaining > 0);
 
                    self.ack_remaining -= 1;
 
                    if self.ack_remaining == 0 {
 
                        return Some(RoundConclusion::Failure);
 
                    }
 
                }
 
                SyncCompContent::Notification | SyncCompContent::GlobalSolution(_) |
 
                SyncCompContent::GlobalFailure => {
 
                    unreachable!("unexpected message content for leader");
 
                },
 
            }
 
        } else {
 
            // Someone else is the leader
 
            let message = SyncCompMessage {
 
                sync_header: self.create_sync_header(ctx),
 
                target_component_id: self.highest_connector_id,
 
                content,
 
            };
 
            ctx.submit_message(Message::SyncComp(message)).unwrap(); // unwrap: sending to component instead of through channel
 
        }
 

	
 
        return None;
 
    }
 

	
 
    fn handle_global_solution_as_leader(&mut self, global_solution: GlobalSolution, ctx: &mut ComponentCtx) -> Option<RoundConclusion> {
 
        if self.conclusion.is_some() {
 
            return None;
 
        }
 

	
 
        // Handle the global solution
 
        let mut my_final_branch_id = BranchId::new_invalid();
 
        for (connector_id, branch_id, sync_round) in global_solution.component_branches.iter().copied() {
 
            if connector_id == ctx.id {
 
                // This is our solution branch
 
                my_final_branch_id = branch_id;
 
                continue;
 
            }
 

	
 
            // Send solution message
 
            let message = SyncCompMessage {
 
                sync_header: self.create_sync_header(ctx),
 
                target_component_id: connector_id,
 
                content: SyncCompContent::GlobalSolution(global_solution.clone()),
 
            };
 
            ctx.submit_message(Message::SyncComp(message)).unwrap(); // unwrap: sending to component instead of through channel
 

	
 
            // Update peers as leader. Subsequent call to `end_sync` will update
 
            // the round numbers
 
            match self.peers.iter_mut().find(|v| v.id == connector_id) {
 
                Some(peer) => {
 
                    peer.expected_sync_round = sync_round;
 
                },
 
                None => {
 
                    self.peers.push(Peer{
 
                        id: connector_id,
 
                        expected_sync_round: sync_round,
 
                        encountered_this_round: true,
 
                    });
 
                }
 
            }
 
        }
 

	
 
        debug_assert!(my_final_branch_id.is_valid());
 
        self.conclusion = Some(RoundConclusion::Success(my_final_branch_id));
 
        return Some(RoundConclusion::Success(my_final_branch_id));
 
    }
 

	
 
    fn handle_global_failure_as_leader(&mut self, ctx: &mut ComponentCtx) -> Option<RoundConclusion> {
 
        debug_assert!(self.solution_combiner.failure_reported && self.solution_combiner.check_for_global_failure());
 
        if self.conclusion.is_some() {
 
            // Already sent out a failure
 
            return None;
 
        }
 

	
 
        // TODO: Performance
 
        let mut encountered = VecSet::new();
 
        for presence in &self.solution_combiner.presence {
 
            if presence.owner_a != ctx.id {
 
                // Did not add it ourselves
 
                if encountered.push(presence.owner_a) {
 
                    // Not yet sent a message
 
                    let message = SyncCompMessage{
 
                        sync_header: self.create_sync_header(ctx),
 
                        target_component_id: presence.owner_a,
 
                        content: SyncCompContent::GlobalFailure,
 
                    };
 
                    ctx.submit_message(Message::SyncComp(message)).unwrap(); // unwrap: sending to component instead of through channel
 
                }
 
            }
 

	
 
            if let Some(owner_b) = presence.owner_b {
 
                if owner_b != ctx.id {
 
                    if encountered.push(owner_b) {
 
                        let message = SyncCompMessage{
 
                            sync_header: self.create_sync_header(ctx),
 
                            target_component_id: owner_b,
 
                            content: SyncCompContent::GlobalFailure,
 
                        };
 
                        ctx.submit_message(Message::SyncComp(message)).unwrap();
 
                    }
 
                }
 
            }
 
        }
 

	
 
        println!("DEBUGERINO: Leader entering error state, we need to wait on {:?}", encountered.iter().map(|v| v.index).collect::<Vec<_>>());
 
        self.conclusion = Some(RoundConclusion::Failure);
 
        if encountered.is_empty() {
 
            // We don't have to wait on Acks
 
            return Some(RoundConclusion::Failure);
 
        } else {
 
            self.ack_remaining = encountered.len() as u32;
 
            return None;
 
        }
 
    }
 

	
 
    fn initiate_sync_failure(&mut self, ctx: &mut ComponentCtx) -> Option<RoundConclusion> {
 
        debug_assert!(self.is_in_sync());
 

	
 
        // Notify leader of our channels and the fact that we just failed
 
        let channel_mapping = &self.branch_annotations[0].channel_mapping;
 
        let mut channel_presence = Vec::with_capacity(channel_mapping.len());
 
        for mapping in channel_mapping {
 
            let port = ctx.get_port_by_channel_id(mapping.channel_id).unwrap();
 
            channel_presence.push(LocalChannelPresence{
 
                channel_id: mapping.channel_id,
 
                is_closed: port.state == PortState::Closed,
 
            });
 
        }
 
        let maybe_already = self.send_to_leader_or_handle_as_leader(SyncCompContent::Presence(ComponentPresence{
 
            component_id: ctx.id,
 
            channels: channel_presence,
 
        }), ctx);
 

	
 
        if self.handled_wave {
 
            // Someone (or us) has already initiated a sync failure.
 
            return maybe_already;
 
        }
 

	
 
        let maybe_conclusion = self.send_to_leader_or_handle_as_leader(SyncCompContent::LocalFailure, ctx);
 
        debug_assert!(if maybe_already.is_some() { maybe_conclusion.is_some() } else { true });
 
        println!("DEBUG: Maybe conclusion is {:?}", maybe_conclusion);
 

	
 
        // Initiate a discovery wave so peers can do the same
 
        self.handled_wave = true;
 
        for mapping in &self.branch_annotations[0].channel_mapping {
 
            let channel_id = mapping.channel_id;
 
            let port_info = ctx.get_port_by_channel_id(channel_id).unwrap();
 
            let message = SyncPortMessage{
 
                sync_header: self.create_sync_header(ctx),
 
                source_port: port_info.self_id,
 
                target_port: port_info.peer_id,
 
                content: SyncPortContent::NotificationWave,
 
            };
 

	
 
            // Note: submitting the message might fail. But we're attempting to
 
            // handle the error anyway.
 
            // TODO: Think about this a second time: how do we make sure the
 
            //  entire network will fail if we reach this condition
 
            let _unused = ctx.submit_message(Message::SyncPort(message));
 
        }
 

	
 
        return maybe_conclusion;
 
    }
 

	
 
    #[inline]
 
    fn create_sync_header(&self, ctx: &ComponentCtx) -> SyncHeader {
 
        return SyncHeader{
 
            sending_component_id: ctx.id,
 
            highest_component_id: self.highest_connector_id,
 
            sync_round: self.sync_round,
 
        }
 
    }
 

	
 
    fn forward_local_data_to_new_leader(&mut self, ctx: &mut ComponentCtx) {
 
        debug_assert_ne!(self.highest_connector_id, ctx.id);
 

	
 
        if let Some(partial_solution) = self.solution_combiner.drain() {
 
            let message = SyncCompMessage {
 
                sync_header: self.create_sync_header(ctx),
 
                target_component_id: self.highest_connector_id,
 
                content: SyncCompContent::PartialSolution(partial_solution),
 
            };
 
            ctx.submit_message(Message::SyncComp(message)).unwrap(); // unwrap: sending to component instead of through channel
 
        }
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Solution storage and algorithms
 
// -----------------------------------------------------------------------------
 

	
 
// TODO: Remove all debug derives
 

	
 
#[derive(Debug, Clone)]
 
struct MatchedLocalSolution {
 
    final_branch_id: BranchId,
 
    channel_mapping: Vec<(ChannelId, BranchMarker)>,
 
    matches: Vec<ComponentMatches>,
 
}
 

	
 
#[derive(Debug, Clone)]
 
struct ComponentMatches {
 
    target_id: ConnectorId,
 
    target_index: usize,
 
    match_indices: Vec<usize>, // of local solution in connector
 
}
 

	
 
#[derive(Debug, Clone)]
 
struct ComponentPeer {
 
    target_id: ConnectorId,
 
    target_index: usize, // in array of global solution components
 
    involved_channels: Vec<ChannelId>,
 
}
 

	
 
#[derive(Debug, Clone)]
 
struct ComponentLocalSolutions {
 
    component: ConnectorId,
 
    sync_round: u32,
 
    peers: Vec<ComponentPeer>,
 
    solutions: Vec<MatchedLocalSolution>,
 
    all_peers_present: bool,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct ComponentPresence {
 
    component_id: ConnectorId,
 
    channels: Vec<LocalChannelPresence>,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct LocalChannelPresence {
 
    channel_id: ChannelId,
 
    is_closed: bool,
 
}
 

	
 
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
 
enum PresenceState {
 
    OnePresent, // one component reported the channel being open
 
    BothPresent, // two components reported the channel being open
 
    Closed, // one component reported the channel being closed
 
}
 

	
 
/// Record to hold channel state during the error-resolving mode of the leader.
 
/// This is used to determine when the sync region has grown to its largest
 
/// size. The structure is eventually consistent in the sense that a component
 
/// might initially presume a channel is open, only to figure out later it is
 
/// actually closed.
 
#[derive(Debug, Clone)]
 
struct ChannelPresence {
 
    owner_a: ConnectorId,
 
    owner_b: Option<ConnectorId>,
 
    id: ChannelId,
 
    state: PresenceState,
 
}
 

	
 
// TODO: Flatten? Flatten. Flatten everything.
 
#[derive(Debug)]
 
pub(crate) struct SolutionCombiner {
 
    local: Vec<ComponentLocalSolutions>, // used for finding solution
 
    presence: Vec<ChannelPresence>, // used to detect all channels present in case of failure
 
    failure_reported: bool,
 
}
 

	
 
struct CheckEntry {
 
    component_index: usize,         // component index in combiner's vector
 
    solution_index: usize,          // solution entry in the above component entry
 
    parent_entry_index: usize,      // parent that caused the creation of this checking entry
 
    match_index_in_parent: usize,   // index in the matches array of the parent
 
    solution_index_in_parent: usize,// index in the solution array of the match entry in the parent
 
}
 

	
 
enum LeaderConclusion {
 
    Solution(GlobalSolution),
 
    Failure,
 
}
 

	
 
impl SolutionCombiner {
 
    fn new() -> Self {
 
        return Self{
 
            local: Vec::new(),
 
            presence: Vec::new(),
 
            failure_reported: false,
 
        };
 
    }
 

	
 
    /// Adds a new local solution to the global solution storage. Will check the
 
    /// new local solutions for matching against already stored local solutions
 
    /// of peer connectors.
 
    fn add_solution_and_check_for_global_solution(&mut self, solution: LocalSolution) -> Option<GlobalSolution> {
 
        let component_id = solution.component;
 
        let sync_round = solution.sync_round_number;
 
        let solution = MatchedLocalSolution{
 
            final_branch_id: solution.final_branch_id,
 
            channel_mapping: solution.port_mapping,
 
            matches: Vec::new(),
 
        };
 

	
 
        // Create an entry for the solution for the particular component
 
        let component_exists = self.local.iter_mut()
 
            .enumerate()
 
            .find(|(_, v)| v.component == component_id);
 
        let (component_index, solution_index, new_component) = match component_exists {
 
            Some((component_index, storage)) => {
 
                // Entry for component exists, so add to solutions
 
                let solution_index = storage.solutions.len();
 
                storage.solutions.push(solution);
 

	
 
                (component_index, solution_index, false)
 
            }
 
            None => {
 
                // Entry for component does not exist yet
 
                let component_index = self.local.len();
 
                self.local.push(ComponentLocalSolutions{
 
                    component: component_id,
 
                    sync_round,
 
                    peers: Vec::new(),
 
                    solutions: vec![solution],
 
                    all_peers_present: false,
 
                });
 
                (component_index, 0, true)
 
            }
 
        };
 

	
 
        // If this is a solution of a component that is new to us, then we check
 
        // in the stored solutions which other components are peers of the new
 
        // one.
 
        if new_component {
 
            let cur_ports = &self.local[component_index].solutions[0].channel_mapping;
 
            let mut component_peers = Vec::new();
 

	
 
            // Find the matching components
 
            for (other_index, other_component) in self.local.iter().enumerate() {
 
                if other_index == component_index {
 
                    // Don't match against ourselves
 
                    continue;
 
                }
 

	
 
                let mut matching_channels = Vec::new();
 
                for (cur_channel_id, _) in cur_ports {
 
                    for (other_channel_id, _) in &other_component.solutions[0].channel_mapping {
 
                        if cur_channel_id == other_channel_id {
 
                            // We have a shared port
 
                            matching_channels.push(*cur_channel_id);
 
                        }
 
                    }
 
                }
 

	
 
                if !matching_channels.is_empty() {
 
                    // We share some ports
 
                    component_peers.push(ComponentPeer{
 
                        target_id: other_component.component,
 
                        target_index: other_index,
 
                        involved_channels: matching_channels,
 
                    });
 
                }
 
            }
 

	
 
            let mut num_ports_in_peers = 0;
 
            for peer in &component_peers {
 
                num_ports_in_peers += peer.involved_channels.len();
 
            }
 

	
 
            if num_ports_in_peers == cur_ports.len() {
 
                // Newly added component has all required peers present
 
                self.local[component_index].all_peers_present = true;
 
            }
 

	
 
            // Add the found component pairing entries to the solution entries
 
            // for the two involved components
 
            for component_match in component_peers {
 
                // Check the other component for having all peers present
 
                let mut num_ports_in_peers = component_match.involved_channels.len();
 
                let other_component = &mut self.local[component_match.target_index];
 
                for existing_peer in &other_component.peers {
 
                    num_ports_in_peers += existing_peer.involved_channels.len();
 
                }
 

	
 
                if num_ports_in_peers == other_component.solutions[0].channel_mapping.len() {
 
                    other_component.all_peers_present = true;
 
                }
 

	
 
                other_component.peers.push(ComponentPeer{
 
                    target_id: component_id,
 
                    target_index: component_index,
 
                    involved_channels: component_match.involved_channels.clone(),
 
                });
 

	
 
                let new_component = &mut self.local[component_index];
 
                new_component.peers.push(component_match);
 
            }
 
        }
 

	
 
        // We're now sure that we know which other components the currently
 
        // considered component is linked up to. Now we need to check those
 
        // entries (if any) to see if any pair of local solutions match
 
        let mut new_component_matches = Vec::new();
 
        let cur_component = &self.local[component_index];
 
        let cur_solution = &cur_component.solutions[solution_index];
 

	
 
        for peer in &cur_component.peers {
 
            let mut new_solution_matches = Vec::new();
 

	
 
            let other_component = &self.local[peer.target_index];
 
            for (other_solution_index, other_solution) in other_component.solutions.iter().enumerate() {
 
                // Check the port mappings between the pair of solutions.
 
                let mut all_matched = true;
 

	
 
                'mapping_check_loop: for (cur_port, cur_branch) in &cur_solution.channel_mapping {
 
                    for (other_port, other_branch) in &other_solution.channel_mapping {
 
                        if cur_port == other_port {
 
                            if cur_branch == other_branch {
 
                                // Same port mapping, go to next port
 
                                break;
 
                            } else {
 
                                // Different port mapping, not a match
 
                                all_matched = false;
 
                                break 'mapping_check_loop;
 
                            }
 
                        }
 
                    }
 
                }
 

	
 
                if !all_matched {
 
                    continue;
 
                }
 

	
 
                // Port mapping between the component pair is the same, so they
 
                // have agreeable local solutions
 
                new_solution_matches.push(other_solution_index);
 
            }
 

	
 
            new_component_matches.push(ComponentMatches{
 
                target_id: peer.target_id,
 
                target_index: peer.target_index,
 
                match_indices: new_solution_matches,
 
            });
 
        }
 

	
 
        // And now that we have the new solution-to-solution matches, we need to
 
        // add those in the appropriate storage.
 
        for new_component_match in new_component_matches {
 
            let other_component = &mut self.local[new_component_match.target_index];
 

	
 
            for other_solution_index in new_component_match.match_indices.iter().copied() {
 
                let other_solution = &mut other_component.solutions[other_solution_index];
 

	
 
                // Add a completely new entry for the component, or add it to
 
                // the existing component entry's matches
 
                match other_solution.matches.iter_mut()
 
                    .find(|v| v.target_id == component_id)
 
                {
 
                    Some(other_match) => {
 
                        other_match.match_indices.push(solution_index);
 
                    },
 
                    None => {
 
                        other_solution.matches.push(ComponentMatches{
 
                            target_id: component_id,
 
                            target_index: component_index,
 
                            match_indices: vec![solution_index],
 
                        })
 
                    }
 
                }
 
            }
 

	
 
            let cur_component = &mut self.local[component_index];
 
            let cur_solution = &mut cur_component.solutions[solution_index];
 

	
 
            match cur_solution.matches.iter_mut()
 
                .find(|v| v.target_id == new_component_match.target_id)
 
            {
 
                Some(other_match) => {
 
                    // Already have an entry
 
                    debug_assert_eq!(other_match.target_index, new_component_match.target_index);
 
                    other_match.match_indices.extend(&new_component_match.match_indices);
 
                },
 
                None => {
 
                    // Create a new entry
 
                    cur_solution.matches.push(new_component_match);
 
                }
 
            }
 
        }
 

	
 
        return self.check_for_global_solution(component_index, solution_index);
 
    }
 

	
 
    fn add_presence_and_check_for_global_failure(&mut self, component_id: ConnectorId, channels: &[LocalChannelPresence]) -> bool {
 
        for entry in channels {
 
            let mut found = false;
 

	
 
            for existing in &mut self.presence {
 
                if existing.id == entry.channel_id {
 
                    // Same entry. We only update if we have the second
 
                    // component coming in it owns one end of the channel, or if
 
                    // a component is telling us that the channel is (now)
 
                    // closed.
 
                    if entry.is_closed {
 
                        existing.state = PresenceState::Closed;
 
                    } else if component_id != existing.owner_a && existing.state != PresenceState::Closed {
 
                        existing.state = PresenceState::BothPresent;
 
                    }
 

	
 
                    if existing.owner_a != component_id {
 
                        existing.owner_b = Some(component_id);
 
                    }
 

	
 
                    found = true;
 
                    break;
 
                }
 
            }
 

	
 
            if !found {
 
                self.presence.push(ChannelPresence{
 
                    owner_a: component_id,
 
                    owner_b: None,
 
                    id: entry.channel_id,
 
                    state: if entry.is_closed { PresenceState::Closed } else { PresenceState::OnePresent },
 
                });
 
            }
 
        }
 

	
 
        println!("DEBUGGERINO Presence is now:\n{:#?}", self.presence);
 

	
 
        return self.check_for_global_failure();
 
    }
 

	
 
    fn mark_failure_and_check_for_global_failure(&mut self) -> bool {
 
        self.failure_reported = true;
 
        return self.check_for_global_failure();
 
    }
 

	
 
    /// Checks if, starting at the provided local solution, a global solution
 
    /// can be formed.
 
    // TODO: At some point, check if divide and conquer is faster?
 
    fn check_for_global_solution(&self, initial_component_index: usize, initial_solution_index: usize) -> Option<GlobalSolution> {
 
        // Small trivial test necessary (but not sufficient) for a global
 
        // solution
 
        for component in &self.local {
 
            if !component.all_peers_present {
 
                return None;
 
            }
 
        }
 

	
 
        // Construct initial entry on stack
 
        let mut stack = Vec::with_capacity(self.local.len());
 
        stack.push(CheckEntry{
 
            component_index: initial_component_index,
 
            solution_index: initial_solution_index,
 
            parent_entry_index: 0,
 
            match_index_in_parent: 0,
 
            solution_index_in_parent: 0,
 
        });
 

	
 
        'check_last_stack: loop {
 
            let cur_index = stack.len() - 1;
 
            let cur_entry = &stack[cur_index];
 

	
 
            // Check if the current component is matching with all other entries
 
            let mut all_match = true;
 
            'check_against_existing: for prev_index in 0..cur_index {
 
                let prev_entry = &stack[prev_index];
 
                let prev_component = &self.local[prev_entry.component_index];
 
                let prev_solution = &prev_component.solutions[prev_entry.solution_index];
 

	
 
                for prev_matching_component in &prev_solution.matches {
 
                    if prev_matching_component.target_index == cur_entry.component_index {
 
                        // Previous entry has shared ports with the current
 
                        // entry, so see if we have a composable pair of
 
                        // solutions.
 
                        if !prev_matching_component.match_indices.contains(&cur_entry.solution_index) {
 
                            all_match = false;
 
                            break 'check_against_existing;
 
                        }
 
                    }
 
                }
 
            }
 

	
 
            if all_match {
 
                // All components matched until now.
 
                if stack.len() == self.local.len() {
 
                    // We have found a global solution
 
                    break 'check_last_stack;
 
                }
 

	
 
                // Not all components found yet, look for a new one that has not
 
                // yet been added yet.
 
                for (parent_index, parent_entry) in stack.iter().enumerate() {
 
                    let parent_component = &self.local[parent_entry.component_index];
 
                    let parent_solution = &parent_component.solutions[parent_entry.solution_index];
 

	
 
                    for (peer_index, peer_component) in parent_solution.matches.iter().enumerate() {
 
                        if peer_component.match_indices.is_empty() {
 
                            continue;
 
                        }
 

	
 
                        let already_added = stack.iter().any(|v| v.component_index == peer_component.target_index);
 
                        if !already_added {
 
                            // New component to try
 
                            stack.push(CheckEntry{
 
                                component_index: peer_component.target_index,
 
                                solution_index: peer_component.match_indices[0],
 
                                parent_entry_index: parent_index,
 
                                match_index_in_parent: peer_index,
 
                                solution_index_in_parent: 0,
 
                            });
 
                            continue 'check_last_stack;
 
                        }
 
                    }
 
                }
 

	
 
                // Cannot find a peer to add. This is possible if, for example,
 
                // we have a component A which has the only connection to
 
                // component B. And B has sent a local solution saying it is
 
                // finished, but the last data message has not yet arrived at A.
 

	
 
                // In any case, we just exit the if statement and handle not
 
                // being able to find a new connector as being forced to try a
 
                // new permutation of possible local solutions.
 
            }
 

	
 
            // Either the currently considered local solution is inconsistent
 
            // with other local solutions, or we cannot find a new component to
 
            // add. This is where we perform backtracking as long as needed to
 
            // try a new solution.
 
            while stack.len() > 1 {
 
                // Check if our parent has another solution we can try
 
                let cur_index = stack.len() - 1;
 
                let cur_entry = &stack[cur_index];
 

	
 
                let parent_entry = &stack[cur_entry.parent_entry_index];
 
                let parent_component = &self.local[parent_entry.component_index];
 
                let parent_solution = &parent_component.solutions[parent_entry.solution_index];
 

	
 
                let match_component = &parent_solution.matches[cur_entry.match_index_in_parent];
 
                debug_assert!(match_component.target_index == cur_entry.component_index);
 
                let new_solution_index_in_parent = cur_entry.solution_index_in_parent + 1;
 

	
 
                if new_solution_index_in_parent < match_component.match_indices.len() {
 
                    // We can still try a new one
 
                    let new_solution_index = match_component.match_indices[new_solution_index_in_parent];
 
                    let cur_entry = &mut stack[cur_index];
 
                    cur_entry.solution_index_in_parent = new_solution_index_in_parent;
 
                    cur_entry.solution_index = new_solution_index;
 
                    continue 'check_last_stack;
 
                } else {
 
                    // We're out of options here. So pop an entry, then in
 
                    // the next iteration of this backtracking loop we try
 
                    // to increment that solution
 
                    stack.pop();
 
                }
 
            }
 

	
 
            // Stack length is 1, hence we're back at our initial solution.
 
            // Since that doesn't yield a global solution, we simply:
 
            return None;
 
        }
 

	
 
        // Constructing the representation of the global solution
 
        debug_assert_eq!(stack.len(), self.local.len());
 
        let mut final_branches = Vec::with_capacity(stack.len());
 
        for entry in &stack {
 
            let component = &self.local[entry.component_index];
 
            let solution = &component.solutions[entry.solution_index];
 
            final_branches.push((component.component, solution.final_branch_id, component.sync_round));
 
        }
 

	
 
        // Just debugging here, TODO: @remove
 
        let mut total_num_channels = 0;
 
        for entry in &stack {
 
            let component = &self.local[entry.component_index];
 
            total_num_channels += component.solutions[0].channel_mapping.len();
 
        }
 

	
 
        total_num_channels /= 2;
 
        let mut final_mapping = Vec::with_capacity(total_num_channels);
 
        let mut total_num_checked = 0;
 

	
 
        for entry in &stack {
 
            let component = &self.local[entry.component_index];
 
            let solution = &component.solutions[entry.solution_index];
 

	
 
            for (channel_id, branch_id) in solution.channel_mapping.iter().copied() {
 
                match final_mapping.iter().find(|(v, _)| *v == channel_id) {
 
                    Some((_, encountered_branch_id)) => {
 
                        debug_assert_eq!(*encountered_branch_id, branch_id);
 
                        total_num_checked += 1;
 
                    },
 
                    None => {
 
                        final_mapping.push((channel_id, branch_id));
 
                    }
 
                }
 
            }
 
        }
 

	
 
        debug_assert_eq!(total_num_checked, total_num_channels);
 

	
 
        return Some(GlobalSolution{
 
            component_branches: final_branches,
 
            channel_mapping: final_mapping,
 
        });
 
    }
 

	
 
    /// Checks if all preconditions for global sync failure have been met
 
    fn check_for_global_failure(&self) -> bool {
 
        if !self.failure_reported {
 
            return false;
 
        }
 

	
 
        // Failure is reported, if all components are present then we may emit
 
        // the global failure broadcast
 
        // Check if all are present and we're preparing to fail this round
 
        let mut all_present = true;
 
        for presence in &self.presence {
 
            if presence.state == PresenceState::OnePresent {
 
                all_present = false;
 
                break;
 
            }
 
        }
 

	
 
        return all_present; // && failure_reported, which is checked above
 
    }
 

	
 
    /// Turns the entire (partially resolved) global solution into a structure
 
    /// that can be forwarded to a new parent. The new parent may then merge
 
    /// already obtained information.
 
    fn drain(&mut self) -> Option<SolutionCombiner> {
 
        if self.local.is_empty() && self.presence.is_empty() && !self.failure_reported {
 
            return None;
 
        }
 

	
 
        let result = SolutionCombiner{
 
            local: self.local.clone(),
 
            presence: self.presence.clone(),
 
            failure_reported: self.failure_reported,
 
        };
 

	
 
        self.local.clear();
 
        self.presence.clear();
 
        self.failure_reported = false;
 
        return Some(result);
 
    }
 

	
 
    // TODO: Entire routine is quite wasteful. Combine instead of doing all work
 
    //  again.
 
    fn combine(&mut self, combiner: SolutionCombiner) -> Option<LeaderConclusion> {
 
        self.failure_reported = self.failure_reported || combiner.failure_reported;
 

	
 
        // Handle local solutions
 
        if self.local.is_empty() {
 
            // Trivial case
 
            self.local = combiner.local;
 
        } else {
 
            for local in combiner.local {
 
                for matched in local.solutions {
 
                    let local_solution = LocalSolution{
 
                        component: local.component,
 
                        sync_round_number: local.sync_round,
 
                        final_branch_id: matched.final_branch_id,
 
                        port_mapping: matched.channel_mapping,
 
                    };
 
                    let maybe_solution = self.add_solution_and_check_for_global_solution(local_solution);
 
                    if let Some(global_solution) = maybe_solution {
 
                        return Some(LeaderConclusion::Solution(global_solution));
 
                    }
 
                }
 
            }
 
        }
 

	
 
        // Handle channel presence
 
        println!("DEBUGERINO: Presence before joining is {:#?}", &self.presence);
 
        if self.presence.is_empty() {
 
            // Trivial case
 
            self.presence = combiner.presence;
 
            println!("DEBUGERINO: Trivial merging")
 
        } else {
 
            for presence in combiner.presence {
 
                match self.presence.iter_mut().find(|v| v.id == presence.id) {
 
                    Some(entry) => {
 
                        // Combine entries. Take first that has Closed, then
 
                        // check first that has both, then check if they are
 
                        // combinable
 
                        if entry.state == PresenceState::Closed {
 
                            // Do nothing
 
                        } else if presence.state == PresenceState::Closed {
 
                            entry.owner_a = presence.owner_a;
 
                            entry.owner_b = presence.owner_b;
 
                            entry.state = PresenceState::Closed;
 
                        } else if entry.state == PresenceState::BothPresent {
 
                            // Again: do nothing
 
                        } else if presence.state == PresenceState::BothPresent {
 
                            entry.owner_a = presence.owner_a;
 
                            entry.owner_b = presence.owner_b;
 
                            entry.state = PresenceState::BothPresent;
 
                        } else {
 
                            // Both have one presence, combine into both present
 
                            debug_assert!(entry.state == PresenceState::OnePresent && presence.state == PresenceState::OnePresent);
 
                            entry.owner_b = Some(presence.owner_a);
 
                            entry.state = PresenceState::BothPresent;
 
                        }
 
                    },
 
                    None => {
 
                        self.presence.push(presence);
 
                    }
 
                }
 
            }
 
            println!("DEBUGERINO: Presence after joining is {:#?}", &self.presence);
 

	
 
            // After adding everything we might have immediately found a solution
 
            if self.check_for_global_failure() {
 
                println!("DEBUG: Returning immediate failure?");
 
                return Some(LeaderConclusion::Failure);
 
            }
 
        }
 

	
 
        return None;
 
    }
 

	
 
    fn clear(&mut self) {
 
        self.local.clear();
 
        self.presence.clear();
 
        self.failure_reported = false;
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Generic Helpers
 
// -----------------------------------------------------------------------------
 

	
 
/// Recursively goes through the value group, attempting to find ports.
 
/// Duplicates will only be added once.
 
pub(crate) fn find_ports_in_value_group(value_group: &ValueGroup, ports: &mut Vec<PortIdLocal>) {
 
    // Helper to check a value for a port and recurse if needed.
 
    use crate::protocol::eval::Value;
 

	
 
    fn find_port_in_value(group: &ValueGroup, value: &Value, ports: &mut Vec<PortIdLocal>) {
 
        match value {
 
            Value::Input(port_id) | Value::Output(port_id) => {
 
                // This is an actual port
 
                let cur_port = PortIdLocal::new(port_id.0.u32_suffix);
 
                let cur_port = PortIdLocal::new(port_id.id);
 
                for prev_port in ports.iter() {
 
                    if *prev_port == cur_port {
 
                        // Already added
 
                        return;
 
                    }
 
                }
 

	
 
                ports.push(cur_port);
 
            },
 
            Value::Array(heap_pos) |
 
            Value::Message(heap_pos) |
 
            Value::String(heap_pos) |
 
            Value::Struct(heap_pos) |
 
            Value::Union(_, heap_pos) => {
 
                // Reference to some dynamic thing which might contain ports,
 
                // so recurse
 
                let heap_region = &group.regions[*heap_pos as usize];
 
                for embedded_value in heap_region {
 
                    find_port_in_value(group, embedded_value, ports);
 
                }
 
            },
 
            _ => {}, // values we don't care about
 
        }
 
    }
 

	
 
    // Clear the ports, then scan all the available values
 
    ports.clear();
 
    for value in &value_group.values {
 
        find_port_in_value(value_group, value, ports);
 
    }
 
}
 
\ No newline at end of file
src/runtime/inbox.rs
Show inline comments
 
file renamed from src/runtime2/inbox.rs to src/runtime/inbox.rs
 
use std::sync::Mutex;
 
use std::collections::VecDeque;
 

	
 
use crate::protocol::eval::ValueGroup;
 
use crate::runtime2::consensus::{ComponentPresence, SolutionCombiner};
 
use crate::runtime2::port::ChannelId;
 
use crate::runtime::consensus::{ComponentPresence, SolutionCombiner};
 
use crate::runtime::port::ChannelId;
 

	
 
use super::ConnectorId;
 
use super::consensus::{GlobalSolution, LocalSolution};
 
use super::port::PortIdLocal;
 

	
 
#[derive(Debug, Copy, Clone)]
 
pub(crate) struct ChannelAnnotation {
 
    pub channel_id: ChannelId,
 
    pub registered_id: Option<BranchMarker>,
 
    pub expected_firing: Option<bool>,
 
}
 

	
 
/// Marker for a branch in a port mapping. A marker is, like a branch ID, a
 
/// unique identifier for a branch, but differs in that a branch only has one
 
/// branch ID, but might have multiple associated markers (i.e. one branch
 
/// performing a `put` three times will generate three markers.
 
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
 
pub(crate) struct BranchMarker{
 
    marker: u32,
 
}
 

	
 
impl BranchMarker {
 
    #[inline]
 
    pub(crate) fn new(marker: u32) -> Self {
 
        debug_assert!(marker != 0);
 
        return Self{ marker };
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn new_invalid() -> Self {
 
        return Self{ marker: 0 }
 
    }
 
}
 

	
 
/// The header added by the synchronization algorithm to all.
 
#[derive(Debug, Clone, Copy)]
 
pub(crate) struct SyncHeader {
 
    pub sending_component_id: ConnectorId,
 
    pub highest_component_id: ConnectorId,
 
    pub sync_round: u32,
 
}
 

	
 
/// The header added to data messages
 
#[derive(Debug, Clone)]
 
pub(crate) struct DataHeader {
 
    pub expected_mapping: Vec<ChannelAnnotation>,
 
    pub sending_port: PortIdLocal,
 
    pub target_port: PortIdLocal,
 
    pub new_mapping: BranchMarker,
 
}
 

	
 
/// A data message is a message that is intended for the receiver's PDL code,
 
/// but will also be handled by the consensus algorithm
 
#[derive(Debug, Clone)]
 
pub(crate) struct DataMessage {
 
    pub sync_header: SyncHeader,
 
    pub data_header: DataHeader,
 
    pub content: ValueGroup,
 
}
 

	
 
#[derive(Debug)]
 
pub(crate) enum SyncCompContent {
 
    LocalFailure, // notifying leader that component has failed (e.g. timeout, whatever)
 
    LocalSolution(LocalSolution), // sending a local solution to the leader
 
    PartialSolution(SolutionCombiner), // when new leader is detected, forward all local results
 
    GlobalSolution(GlobalSolution), // broadcasting to everyone
 
    GlobalFailure, // broadcasting to everyone
 
    AckFailure, // acknowledgement of failure to leader
 
    Notification, // just a notification (so purpose of message is to send the SyncHeader)
 
    Presence(ComponentPresence), // notifying leader of component presence (needed to ensure failing a round involves all components in a sync round)
 
}
 

	
 
/// A sync message is a message that is intended only for the consensus
 
/// algorithm. The message goes directly to a component.
 
#[derive(Debug)]
 
pub(crate) struct SyncCompMessage {
 
    pub sync_header: SyncHeader,
 
    pub target_component_id: ConnectorId,
 
    pub content: SyncCompContent,
 
}
 

	
 
#[derive(Debug)]
 
pub(crate) enum SyncPortContent {
 
    SilentPortNotification,
 
    NotificationWave,
 
}
 

	
 
/// A sync message intended for the consensus algorithm. This message does not
 
/// go to a component, but through a channel (and results in potential
 
/// rerouting) because we're not sure about the ID of the component that holds
 
/// the other end of the channel.
 
#[derive(Debug)]
 
pub(crate) struct SyncPortMessage {
 
    pub sync_header: SyncHeader,
 
    pub source_port: PortIdLocal,
 
    pub target_port: PortIdLocal,
 
    pub content: SyncPortContent,
 
}
 

	
 
#[derive(Debug)]
 
pub(crate) enum SyncControlContent {
 
    ChannelIsClosed(PortIdLocal), // contains port that is owned by the recipient of the message
 
}
 

	
 
/// A sync control message: originating from the scheduler, but intended for the
 
/// current sync round of the recipient. Every kind of consensus algorithm must
 
/// be able to handle such a message.
 
#[derive(Debug)]
 
pub(crate) struct SyncControlMessage {
 
    // For now these control messages are only aimed at components. Might change
 
    // in the future. But for now we respond to messages from components that
 
    // have, because of that message, published their ID.
 
    pub in_response_to_sync_round: u32,
 
    pub target_component_id: ConnectorId,
 
    pub content: SyncControlContent,
 
}
 

	
 
/// A control message is a message intended for the scheduler that is executing
 
/// a component.
 
#[derive(Debug)]
 
pub(crate) struct ControlMessage {
 
    pub id: u32, // generic identifier, used to match request to response
 
    pub sending_component_id: ConnectorId,
 
    pub content: ControlContent,
 
}
 

	
 
#[derive(Debug)]
 
pub(crate) enum ControlContent {
 
    PortPeerChanged(PortIdLocal, ConnectorId),
 
    CloseChannel(PortIdLocal),
 
    Ack,
 
    Ping,
 
}
 

	
 
/// Combination of data message and control messages.
 
#[derive(Debug)]
 
pub(crate) enum Message {
 
    Data(DataMessage),
 
    SyncComp(SyncCompMessage),
 
    SyncPort(SyncPortMessage),
 
    SyncControl(SyncControlMessage),
 
    Control(ControlMessage),
 
}
 

	
 
impl Message {
 
    /// If the message is sent through a particular channel, then this function
 
    /// returns the port through which the message was sent.
 
    pub(crate) fn source_port(&self) -> Option<PortIdLocal> {
 
        // Currently only data messages have a source port
 
        match self {
 
            Message::Data(message) => return Some(message.data_header.sending_port),
 
            Message::SyncPort(message) => return Some(message.source_port),
 
            Message::SyncComp(_) => return None,
 
            Message::SyncControl(_) => return None,
 
            Message::Control(_) => return None,
 
        }
 
    }
 

	
 
    /// If the message is sent through a particular channel, then this function
 
    /// returns the target port through which the message was sent.
 
    pub(crate) fn target_port(&self) -> Option<PortIdLocal> {
 
        match self {
 
            Message::Data(message) => return Some(message.data_header.target_port),
 
            Message::SyncPort(message) => return Some(message.target_port),
 
            Message::SyncComp(_) => return None,
 
            Message::SyncControl(_) => return None,
 
            Message::Control(message) => {
 
                match &message.content {
 
                    ControlContent::PortPeerChanged(port_id, _) => return Some(*port_id),
 
                    ControlContent::CloseChannel(port_id) => return Some(*port_id),
 
                    ControlContent::Ping => return None,
 
                    ControlContent::Ack => return None,
 
                }
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn source_component(&self) -> Option<ConnectorId> {
 
        match self {
 
            Message::Data(message) => Some(message.sync_header.sending_component_id),
 
            Message::SyncPort(message) => Some(message.sync_header.sending_component_id),
 
            Message::SyncComp(message) => Some(message.sync_header.sending_component_id),
 
            Message::SyncControl(_) => None,
 
            Message::Control(message) => Some(message.sending_component_id)
 
        }
 
    }
 

	
 
    pub(crate) fn as_data(&self) -> &DataMessage {
 
        match self {
 
            Message::Data(v) => v,
 
            _ => unreachable!(),
 
        }
 
    }
 
}
 

	
 
/// The public inbox of a connector. The thread running the connector that owns
 
/// this inbox may retrieved from it. Non-owning threads may only put new
 
/// messages inside of it.
 
// TODO: @Optimize, lazy concurrency. Probably ringbuffer with read/write heads.
 
//  Should behave as a MPSC queue.
 
pub struct PublicInbox {
 
    messages: Mutex<VecDeque<Message>>,
 
}
 

	
 
impl PublicInbox {
 
    pub fn new() -> Self {
 
        Self{
 
            messages: Mutex::new(VecDeque::new()),
 
        }
 
    }
 

	
 
    pub(crate) fn insert_message(&self, message: Message) {
 
        let mut lock = self.messages.lock().unwrap();
 
        lock.push_back(message);
 
    }
 

	
 
    pub(crate) fn take_message(&self) -> Option<Message> {
 
        let mut lock = self.messages.lock().unwrap();
 
        return lock.pop_front();
 
    }
 

	
 
    pub fn is_empty(&self) -> bool {
 
        let lock = self.messages.lock().unwrap();
 
        return lock.is_empty();
 
    }
 

	
 
    pub fn clear(&self) {
 
        let mut lock = self.messages.lock().unwrap();
 
        lock.clear();
 
    }
 
}
 
\ No newline at end of file
src/runtime/mod.rs
Show inline comments
 
/// cbindgen:ignore
 
mod communication;
 
/// cbindgen:ignore
 
mod endpoints;
 
pub mod error;
 
/// cbindgen:ignore
 
mod logging;
 
/// cbindgen:ignore
 
mod setup;
 

	
 
#[cfg(test)]
 
mod tests;
 

	
 
use crate::common::*;
 
use error::*;
 
use mio::net::UdpSocket;
 

	
 
/// The interface between the user's application and a communication session,
 
/// in which the application plays the part of a (native) component. This structure provides the application
 
/// with functionality available to all components: the ability to add new channels (port pairs), and to
 
/// instantiate new components whose definitions are defined in the connector's configured protocol
 
/// description. Native components have the additional ability to add `dangling' ports backed by local/remote
 
/// IP addresses, to be coupled with a counterpart once the connector's setup is completed by `connect`.
 
/// This allows sets of applications to cooperate in constructing shared sessions that span the network.
 
#[derive(Debug)]
 
pub struct Connector {
 
    unphased: ConnectorUnphased,
 
    phased: ConnectorPhased,
 
}
 
// Structure of module
 

	
 
/// Characterizes a type which can write lines of logging text.
 
/// The implementations provided in the `logging` module are likely to be sufficient,
 
/// but for added flexibility, users are able to implement their own loggers for use
 
/// by connectors.
 
pub trait Logger: Debug + Send + Sync {
 
    fn line_writer(&mut self) -> Option<&mut dyn std::io::Write>;
 
}
 
mod branch;
 
mod native;
 
mod port;
 
mod scheduler;
 
mod consensus;
 
mod inbox;
 

	
 
/// A logger that appends the logged strings to a growing byte buffer
 
#[derive(Debug)]
 
pub struct VecLogger(ConnectorId, Vec<u8>);
 
#[cfg(test)] mod tests;
 
mod connector;
 

	
 
/// A trivial logger that always returns None, such that no logging information is ever written.
 
#[derive(Debug)]
 
pub struct DummyLogger;
 
// Imports
 

	
 
/// A logger that writes the logged lines to a given file.
 
#[derive(Debug)]
 
pub struct FileLogger(ConnectorId, std::fs::File);
 

	
 
// Interface between protocol state and the connector runtime BEFORE all components
 
// ave begun their branching speculation. See ComponentState::nonsync_run.
 
pub(crate) struct NonsyncProtoContext<'a> {
 
    ips: &'a mut IdAndPortState,
 
    logger: &'a mut dyn Logger,
 
    unrun_components: &'a mut Vec<(ComponentId, ComponentState)>, // lives for Nonsync phase
 
    proto_component_id: ComponentId,                              // KEY in id->component map
 
}
 
use std::collections::VecDeque;
 
use std::sync::{Arc, Condvar, Mutex, RwLock};
 
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
 
use std::thread::{self, JoinHandle};
 

	
 
// Interface between protocol state and the connector runtime AFTER all components
 
// have begun their branching speculation. See ComponentState::sync_run.
 
pub(crate) struct SyncProtoContext<'a> {
 
    rctx: &'a RoundCtx,
 
    branch_inner: &'a mut ProtoComponentBranchInner, // sub-structure of component branch
 
    predicate: &'a Predicate,                        // KEY in pred->branch map
 
}
 
use crate::collections::RawVec;
 
use crate::ProtocolDescription;
 

	
 
// The data coupled with a particular protocol component branch, but crucially omitting
 
// the `ComponentState` such that this may be passed by reference to the state with separate
 
// access control.
 
#[derive(Default, Debug, Clone)]
 
struct ProtoComponentBranchInner {
 
    did_put_or_get: HashSet<PortId>,
 
    inbox: HashMap<PortId, Payload>,
 
}
 
use connector::{ConnectorPDL, ConnectorPublic, ConnectorScheduling};
 
use scheduler::{Scheduler, ComponentCtx, SchedulerCtx, ControlMessageHandler};
 
use native::{Connector, ConnectorApplication, ApplicationInterface};
 
use inbox::Message;
 
use port::{ChannelId, Port, PortState};
 

	
 
// A speculative variable that lives for the duration of the synchronous round.
 
// Each is assigned a value in domain `SpecVal`.
 
#[derive(
 
    Copy, Clone, Eq, PartialEq, Ord, Hash, PartialOrd, serde::Serialize, serde::Deserialize,
 
)]
 
struct SpecVar(PortId);
 

	
 
// The codomain of SpecVal. Has two associated constants for values FIRING and SILENT,
 
// but may also enumerate many more values to facilitate finer-grained nondeterministic branching.
 
#[derive(
 
    Copy, Clone, Eq, PartialEq, Ord, Hash, PartialOrd, serde::Serialize, serde::Deserialize,
 
)]
 
struct SpecVal(u16);
 

	
 
// Data associated with a successful synchronous round, retained afterwards such that the
 
// native component can freely reflect on how it went, reading the messages received at their
 
// inputs, and reflecting on which of their connector's synchronous batches succeeded.
 
/// A kind of token that, once obtained, allows mutable access to a connector.
 
/// We're trying to use move semantics as much as possible: the owner of this
 
/// key is the only one that may execute the connector's code.
 
#[derive(Debug)]
 
struct RoundEndedNative {
 
    batch_index: usize,
 
    gotten: HashMap<PortId, Payload>,
 
pub(crate) struct ConnectorKey {
 
    pub index: u32, // of connector
 
    pub generation: u32,
 
}
 

	
 
// Implementation of a set in terms of a vector (optimized for reading, not writing)
 
#[derive(Default)]
 
struct VecSet<T: std::cmp::Ord> {
 
    // invariant: ordered, deduplicated
 
    vec: Vec<T>,
 
}
 
impl ConnectorKey {
 
    /// Downcasts the `ConnectorKey` type, which can be used to obtain mutable
 
    /// access, to a "regular ID" which can be used to obtain immutable access.
 
    #[inline]
 
    pub fn downcast(&self) -> ConnectorId {
 
        return ConnectorId{
 
            index: self.index,
 
            generation: self.generation,
 
        };
 
    }
 

	
 
// Allows a connector to remember how to forward payloads towards the component that
 
// owns their destination port. `LocalComponent` corresponds with messages for components
 
// managed by the connector itself (hinting for it to look it up in a local structure),
 
// whereas the other variants direct the connector to forward the messages over the network.
 
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
 
enum Route {
 
    LocalComponent,
 
    NetEndpoint { index: usize },
 
    UdpEndpoint { index: usize },
 
    /// Turns the `ConnectorId` into a `ConnectorKey`, marked as unsafe as it
 
    /// bypasses the type-enforced `ConnectorKey`/`ConnectorId` system
 
    #[inline]
 
    pub unsafe fn from_id(id: ConnectorId) -> ConnectorKey {
 
        return ConnectorKey{
 
            index: id.index,
 
            generation: id.generation,
 
        };
 
    }
 
}
 

	
 
// The outcome of a synchronous round, representing the distributed consensus.
 
// In the success case, the attached predicate encodes a row in the session's trace table.
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
enum Decision {
 
    Failure, // some connector timed out!
 
    Success(Predicate),
 
/// A kind of token that allows shared access to a connector. Multiple threads
 
/// may hold this
 
#[derive(Debug, Copy, Clone)]
 
pub struct ConnectorId{
 
    pub index: u32,
 
    pub generation: u32,
 
}
 

	
 
// The type of control messages exchanged between connectors over the network
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
enum Msg {
 
    SetupMsg(SetupMsg),
 
    CommMsg(CommMsg),
 
impl PartialEq for ConnectorId {
 
    fn eq(&self, other: &Self) -> bool {
 
        return self.index.eq(&other.index);
 
    }
 
}
 

	
 
// Control messages exchanged during the setup phase only
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
enum SetupMsg {
 
    MyPortInfo(MyPortInfo),
 
    LeaderWave { wave_leader: ConnectorId },
 
    LeaderAnnounce { tree_leader: ConnectorId },
 
    YouAreMyParent,
 
}
 
impl Eq for ConnectorId{}
 

	
 
// Control message particular to the communication phase.
 
// as such, it's annotated with a round_index
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
struct CommMsg {
 
    round_index: usize,
 
    contents: CommMsgContents,
 
impl PartialOrd for ConnectorId{
 
    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
 
        return self.index.partial_cmp(&other.index)
 
    }
 
}
 

	
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
enum CommMsgContents {
 
    SendPayload(SendPayloadMsg),
 
    CommCtrl(CommCtrlMsg),
 
impl Ord for ConnectorId{
 
    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
 
        return self.partial_cmp(other).unwrap();
 
    }
 
}
 

	
 
// Connector <-> connector control messages for use in the communication phase
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
enum CommCtrlMsg {
 
    Suggest { suggestion: Decision }, // child->parent
 
    Announce { decision: Decision },  // parent->child
 
}
 
impl ConnectorId {
 
    // TODO: Like the other `new_invalid`, maybe remove
 
    #[inline]
 
    pub fn new_invalid() -> ConnectorId {
 
        return ConnectorId {
 
            index: u32::MAX,
 
            generation: 0,
 
        };
 
    }
 

	
 
// Speculative payload message, communicating the value for the given
 
// port's message predecated on the given speculative variable assignments.
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
struct SendPayloadMsg {
 
    predicate: Predicate,
 
    payload: Payload,
 
    #[inline]
 
    pub(crate) fn is_valid(&self) -> bool {
 
        return self.index != u32::MAX;
 
    }
 
}
 

	
 
// Return result of `Predicate::assignment_union`, communicating the contents
 
// of the predicate which represents the (consistent) union of their mappings,
 
// if it exists (no variable mapped distinctly by the input predicates)
 
#[derive(Debug, PartialEq)]
 
enum AssignmentUnionResult {
 
    FormerNotLatter,
 
    LatterNotFormer,
 
    Equivalent,
 
    New(Predicate),
 
    Nonexistant,
 
// TODO: Change this, I hate this. But I also don't want to put `public` and
 
//  `router` of `ScheduledConnector` back into `Connector`. The reason I don't
 
//  want `Box<dyn Connector>` everywhere is because of the v-table overhead. But
 
//  to truly design this properly I need some benchmarks.
 
pub(crate) enum ConnectorVariant {
 
    UserDefined(ConnectorPDL),
 
    Native(Box<dyn Connector>),
 
}
 

	
 
// One of two endpoints for a control channel with a connector on either end.
 
// The underlying transport is TCP, so we use an inbox buffer to allow
 
// discrete payload receipt.
 
struct NetEndpoint {
 
    inbox: Vec<u8>,
 
    stream: TcpStream,
 
impl Connector for ConnectorVariant {
 
    fn run(&mut self, scheduler_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        match self {
 
            ConnectorVariant::UserDefined(c) => c.run(scheduler_ctx, comp_ctx),
 
            ConnectorVariant::Native(c) => c.run(scheduler_ctx, comp_ctx),
 
        }
 
    }
 
}
 

	
 
// Datastructure used during the setup phase representing a NetEndpoint TO BE SETUP
 
#[derive(Debug, Clone)]
 
struct NetEndpointSetup {
 
    getter_for_incoming: PortId,
 
    sock_addr: SocketAddr,
 
    endpoint_polarity: EndpointPolarity,
 
}
 
pub(crate) struct ScheduledConnector {
 
    pub connector: ConnectorVariant, // access by connector
 
    pub ctx: ComponentCtx,
 
    pub public: ConnectorPublic, // accessible by all schedulers and connectors
 
    pub router: ControlMessageHandler,
 
    pub shutting_down: bool,
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Runtime
 
// -----------------------------------------------------------------------------
 

	
 
/// Externally facing runtime.
 
pub struct Runtime {
 
    inner: Arc<RuntimeInner>,
 
}
 

	
 
impl Runtime {
 
    pub fn new(num_threads: u32, protocol_description: ProtocolDescription) -> Runtime {
 
        // Setup global state
 
        assert!(num_threads > 0, "need a thread to run connectors");
 
        let runtime_inner = Arc::new(RuntimeInner{
 
            protocol_description,
 
            port_counter: AtomicU32::new(0),
 
            connectors: RwLock::new(ConnectorStore::with_capacity(32)),
 
            connector_queue: Mutex::new(VecDeque::with_capacity(32)),
 
            schedulers: Mutex::new(Vec::new()),
 
            scheduler_notifier: Condvar::new(),
 
            active_connectors: AtomicU32::new(0),
 
            active_interfaces: AtomicU32::new(1), // this `Runtime` instance
 
            should_exit: AtomicBool::new(false),
 
        });
 

	
 
        // Launch threads
 
        {
 
            let mut schedulers = Vec::with_capacity(num_threads as usize);
 
            for thread_index in 0..num_threads {
 
                let cloned_runtime_inner = runtime_inner.clone();
 
                let thread = thread::Builder::new()
 
                    .name(format!("thread-{}", thread_index))
 
                    .spawn(move || {
 
                        let mut scheduler = Scheduler::new(cloned_runtime_inner, thread_index);
 
                        scheduler.run();
 
                    })
 
                    .unwrap();
 

	
 
                schedulers.push(thread);
 
            }
 

	
 
// Datastructure used during the setup phase representing a UdpEndpoint TO BE SETUP
 
#[derive(Debug, Clone)]
 
struct UdpEndpointSetup {
 
    getter_for_incoming: PortId,
 
    local_addr: SocketAddr,
 
    peer_addr: SocketAddr,
 
}
 
            let mut lock = runtime_inner.schedulers.lock().unwrap();
 
            *lock = schedulers;
 
        }
 

	
 
// NetEndpoint annotated with the ID of the port that receives payload
 
// messages received through the endpoint. This approach assumes that NetEndpoints
 
// DO NOT multiplex port->port channels, and so a mapping such as this is possible.
 
// As a result, the messages themselves don't need to carry the PortID with them.
 
#[derive(Debug)]
 
struct NetEndpointExt {
 
    net_endpoint: NetEndpoint,
 
    getter_for_incoming: PortId,
 
}
 
        // Return runtime
 
        return Runtime{ inner: runtime_inner };
 
    }
 

	
 
// Endpoint for a "raw" UDP endpoint. Corresponds to the "Udp Mediator Component"
 
// described in the literature.
 
// It acts as an endpoint by receiving messages via the poller etc. (managed by EndpointManager),
 
// It acts as a native component by managing a (speculative) set of payload messages (an outbox,
 
//  protecting the peer on the other side of the network).
 
#[derive(Debug)]
 
struct UdpEndpointExt {
 
    sock: UdpSocket, // already bound and connected
 
    received_this_round: bool,
 
    outgoing_payloads: HashMap<Predicate, Payload>,
 
    getter_for_incoming: PortId,
 
}
 
    /// Returns a new interface through which channels and connectors can be
 
    /// created.
 
    pub fn create_interface(&self) -> ApplicationInterface {
 
        self.inner.increment_active_interfaces();
 
        let (connector, mut interface) = ConnectorApplication::new(self.inner.clone());
 
        let connector_key = self.inner.create_interface_component(connector);
 
        interface.set_connector_id(connector_key.downcast());
 

	
 
// Meta-data for the connector: its role in the consensus tree.
 
#[derive(Debug)]
 
struct Neighborhood {
 
    parent: Option<usize>,
 
    children: VecSet<usize>,
 
        // Note that we're not scheduling. That is done by the interface in case
 
        // it is actually needed.
 
        return interface;
 
    }
 
}
 

	
 
// Manages the connector's ID, and manages allocations for connector/port IDs.
 
#[derive(Debug, Clone)]
 
struct IdManager {
 
    connector_id: ConnectorId,
 
    port_suffix_stream: U32Stream,
 
    component_suffix_stream: U32Stream,
 
impl Drop for Runtime {
 
    fn drop(&mut self) {
 
        self.inner.decrement_active_interfaces();
 
        let mut lock = self.inner.schedulers.lock().unwrap();
 
        for handle in lock.drain(..) {
 
            handle.join().unwrap();
 
        }
 
    }
 
}
 

	
 
// Newtype wrapper around a byte buffer, used for UDP mediators to receive incoming datagrams.
 
struct IoByteBuffer {
 
    byte_vec: Vec<u8>,
 
}
 
// -----------------------------------------------------------------------------
 
// RuntimeInner
 
// -----------------------------------------------------------------------------
 

	
 
pub(crate) struct RuntimeInner {
 
    // Protocol
 
    pub(crate) protocol_description: ProtocolDescription,
 
    // Regular counter for port IDs
 
    port_counter: AtomicU32,
 
    // Storage of connectors and the work queue
 
    connectors: RwLock<ConnectorStore>,
 
    connector_queue: Mutex<VecDeque<ConnectorKey>>,
 
    schedulers: Mutex<Vec<JoinHandle<()>>>,
 
    // Conditions to determine whether the runtime can exit
 
    scheduler_notifier: Condvar,  // coupled to mutex on `connector_queue`.
 
    // TODO: Figure out if we can simply merge the counters?
 
    active_connectors: AtomicU32, // active connectors (if sleeping, then still considered active)
 
    active_interfaces: AtomicU32, // active API interfaces that can add connectors/channels
 
    should_exit: AtomicBool,
 
}
 

	
 
impl RuntimeInner {
 
    // --- Managing the components queued for execution
 

	
 
    /// Wait until there is a connector to run. If there is one, then `Some`
 
    /// will be returned. If there is no more work, then `None` will be
 
    /// returned.
 
    pub(crate) fn wait_for_work(&self) -> Option<ConnectorKey> {
 
        let mut lock = self.connector_queue.lock().unwrap();
 
        while lock.is_empty() && !self.should_exit.load(Ordering::Acquire) {
 
            lock = self.scheduler_notifier.wait(lock).unwrap();
 
        }
 

	
 
// A generator of speculative variables. Created on-demand during the synchronous round
 
// by the IdManager.
 
#[derive(Debug)]
 
struct SpecVarStream {
 
    connector_id: ConnectorId,
 
    port_suffix_stream: U32Stream,
 
}
 
        return lock.pop_front();
 
    }
 

	
 
    pub(crate) fn push_work(&self, key: ConnectorKey) {
 
        let mut lock = self.connector_queue.lock().unwrap();
 
        lock.push_back(key);
 
        self.scheduler_notifier.notify_one();
 
    }
 

	
 
    // --- Creating/using ports
 

	
 
    /// Creates a new port pair. Note that these are stored globally like the
 
    /// connectors are. Ports stored by components belong to those components.
 
    pub(crate) fn create_channel(&self, creating_connector: ConnectorId) -> (Port, Port) {
 
        use port::{PortIdLocal, PortKind};
 

	
 
        let getter_id = self.port_counter.fetch_add(2, Ordering::SeqCst);
 
        let channel_id = ChannelId::new(getter_id);
 
        let putter_id = PortIdLocal::new(getter_id + 1);
 
        let getter_id = PortIdLocal::new(getter_id);
 

	
 
        let getter_port = Port{
 
            self_id: getter_id,
 
            peer_id: putter_id,
 
            channel_id,
 
            kind: PortKind::Getter,
 
            state: PortState::Open,
 
            peer_connector: creating_connector,
 
        };
 
        let putter_port = Port{
 
            self_id: putter_id,
 
            peer_id: getter_id,
 
            channel_id,
 
            kind: PortKind::Putter,
 
            state: PortState::Open,
 
            peer_connector: creating_connector,
 
        };
 

	
 
        return (getter_port, putter_port);
 
    }
 

	
 
    /// Sends a message directly (without going through the port) to a
 
    /// component. This is slightly less efficient then sending over a port, but
 
    /// might be preferable for some algorithms. If the component was sleeping
 
    /// then it is scheduled for execution.
 
    pub(crate) fn send_message_maybe_destroyed(&self, target_id: ConnectorId, message: Message) -> bool {
 
        let target = {
 
            let mut lock = self.connectors.read().unwrap();
 
            lock.get(target_id.index)
 
        };
 

	
 
        // Do a CAS on the number of users. Most common case the component is
 
        // alive and we're the only one sending the message. Note that if we
 
        // finish this block, we're sure that no-one has set the `num_users`
 
        // value to 0. This is essential! When at 0, the component is added to
 
        // the freelist and the generation counter will be incremented.
 
        let mut cur_num_users = 1;
 
        while let Err(old_num_users) = target.num_users.compare_exchange(cur_num_users, cur_num_users + 1, Ordering::SeqCst, Ordering::Acquire) {
 
            if old_num_users == 0 {
 
                // Cannot send message. Whatever the component state is
 
                // (destroyed, at a different generation number, busy being
 
                // destroyed, etc.) we cannot send the message and will not
 
                // modify the component
 
                return false;
 
            }
 

	
 
// Manages the messy state of the various endpoints, pollers, buffers, etc.
 
#[derive(Debug)]
 
struct EndpointManager {
 
    // invariants:
 
    // 1. net and udp endpoints are registered with poll with tokens computed with TargetToken::into
 
    // 2. Events is empty
 
    poll: Poll,
 
    events: Events,
 
    delayed_messages: Vec<(usize, Msg)>,
 
    undelayed_messages: Vec<(usize, Msg)>, // ready to yield
 
    net_endpoint_store: EndpointStore<NetEndpointExt>,
 
    udp_endpoint_store: EndpointStore<UdpEndpointExt>,
 
    io_byte_buffer: IoByteBuffer,
 
}
 
            cur_num_users = old_num_users;
 
        }
 

	
 
// A storage of endpoints, which keeps track of which components have raised
 
// an event during poll(), signifying that they need to be checked for new incoming data
 
#[derive(Debug)]
 
struct EndpointStore<T> {
 
    endpoint_exts: Vec<T>,
 
    polled_undrained: VecSet<usize>,
 
}
 
        // We incremented the counter. But we might still be at the wrong
 
        // generation number. The generation number is a monotonically
 
        // increasing value. Since it only increases when someone gets the
 
        // `num_users` counter to 0, we can simply load the generation number.
 
        let generation = target.generation.load(Ordering::Acquire);
 
        if generation != target_id.generation {
 
            // We're at the wrong generation, so we cannot send the message.
 
            // However, since we incremented the `num_users` counter, the moment
 
            // we decrement it we might be the one that are supposed to handle
 
            // the destruction of the component. Note that all users of the
 
            // component do an increment-followed-by-decrement, we can simply
 
            // do a `fetch_sub`.
 
            let old_num_users = target.num_users.fetch_sub(1, Ordering::SeqCst);
 
            if old_num_users == 1 {
 
                // We're the one that got the counter to 0, so we're the ones
 
                // that are supposed to handle component exit
 
                self.finish_component_destruction(target_id);
 
            }
 

	
 
// The information associated with a port identifier, designed for local storage.
 
#[derive(Clone, Debug)]
 
struct PortInfo {
 
    owner: ComponentId,
 
    peer: Option<PortId>,
 
    polarity: Polarity,
 
    route: Route,
 
}
 
            return false;
 
        }
 

	
 
// Similar to `PortInfo`, but designed for communication during the setup procedure.
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
struct MyPortInfo {
 
    polarity: Polarity,
 
    port: PortId,
 
    owner: ComponentId,
 
}
 
        // The generation is correct, and since we incremented the `num_users`
 
        // counter we're now sure that we can send the message and it will be
 
        // handled by the receiver
 
        target.connector.public.inbox.insert_message(message);
 

	
 
        // Finally, do the same as above: decrement number of users, if at gets
 
        // to 0 we're the ones who should handle the exit condition.
 
        let old_num_users = target.num_users.fetch_sub(1, Ordering::SeqCst);
 
        if old_num_users == 1 {
 
            // We're allowed to destroy the component.
 
            self.finish_component_destruction(target_id);
 
        } else {
 
            // Message is sent. If the component is sleeping, then we're sure
 
            // it is not scheduled and it has not initiated the destruction of
 
            // the component (because of the way
 
            // `initiate_component_destruction` does not set sleeping to true).
 
            // So we can safely schedule it.
 
            let should_wake_up = target.connector.public.sleeping
 
                .compare_exchange(true, false, Ordering::SeqCst, Ordering::Acquire)
 
                .is_ok();
 

	
 
            if should_wake_up {
 
                let key = unsafe{ ConnectorKey::from_id(target_id) };
 
                self.push_work(key);
 
            }
 
        }
 

	
 
// Newtype around port info map, allowing the implementation of some
 
// useful methods
 
#[derive(Default, Debug, Clone)]
 
struct PortInfoMap {
 
    // invariant: self.invariant_preserved()
 
    // `owned` is redundant information, allowing for fast lookup
 
    // of a component's owned ports (which occurs during the sync round a lot)
 
    map: HashMap<PortId, PortInfo>,
 
    owned: HashMap<ComponentId, HashSet<PortId>>,
 
}
 
        return true
 
    }
 

	
 
// A convenient substructure for containing port info and the ID manager.
 
// Houses the bulk of the connector's persistent state between rounds.
 
// It turns out several situations require access to both things.
 
#[derive(Debug, Clone)]
 
struct IdAndPortState {
 
    port_info: PortInfoMap,
 
    id_manager: IdManager,
 
}
 
    /// Sends a message to a particular component, assumed to occur over a port.
 
    /// If the component happened to be sleeping then it will be scheduled for
 
    /// execution. Because of the port management system we may assumed that
 
    /// we're always accessing the component at the right generation number.
 
    pub(crate) fn send_message_assumed_alive(&self, target_id: ConnectorId, message: Message) {
 
        let target = {
 
            let lock = self.connectors.read().unwrap();
 
            let entry = lock.get(target_id.index);
 
            debug_assert_eq!(entry.generation.load(Ordering::Acquire), target_id.generation);
 
            &mut entry.connector.public
 
        };
 

	
 
// A component's setup-phase-specific data
 
#[derive(Debug)]
 
struct ConnectorCommunication {
 
    round_index: usize,
 
    endpoint_manager: EndpointManager,
 
    neighborhood: Neighborhood,
 
    native_batches: Vec<NativeBatch>,
 
    round_result: Result<Option<RoundEndedNative>, SyncError>,
 
}
 
        target.inbox.insert_message(message);
 

	
 
// A component's data common to both setup and communication phases
 
#[derive(Debug)]
 
struct ConnectorUnphased {
 
    proto_description: Arc<ProtocolDescription>,
 
    proto_components: HashMap<ComponentId, ComponentState>,
 
    logger: Box<dyn Logger>,
 
    ips: IdAndPortState,
 
    native_component_id: ComponentId,
 
}
 
        let should_wake_up = target.sleeping
 
            .compare_exchange(true, false, Ordering::SeqCst, Ordering::Acquire)
 
            .is_ok();
 

	
 
// A connector's phase-specific data
 
#[derive(Debug)]
 
enum ConnectorPhased {
 
    Setup(Box<ConnectorSetup>),
 
    Communication(Box<ConnectorCommunication>),
 
}
 
        if should_wake_up {
 
            let key = unsafe{ ConnectorKey::from_id(target_id) };
 
            self.push_work(key);
 
        }
 
    }
 

	
 
// A connector's setup-phase-specific data
 
#[derive(Debug)]
 
struct ConnectorSetup {
 
    net_endpoint_setups: Vec<NetEndpointSetup>,
 
    udp_endpoint_setups: Vec<UdpEndpointSetup>,
 
}
 
    // --- Creating/retrieving/destroying components
 

	
 
// A newtype wrapper for a map from speculative variable to speculative value
 
// A missing mapping corresponds with "unspecified".
 
#[derive(Default, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)]
 
struct Predicate {
 
    assigned: BTreeMap<SpecVar, SpecVal>,
 
}
 
    /// Creates an initially sleeping application connector.
 
    fn create_interface_component(&self, component: ConnectorApplication) -> ConnectorKey {
 
        // Initialize as sleeping, as it will be scheduled by the programmer.
 
        let mut lock = self.connectors.write().unwrap();
 
        let key = lock.create(ConnectorVariant::Native(Box::new(component)), true);
 

	
 
// Identifies a child of this connector in the _solution tree_.
 
// Each connector creates its own local solutions for the consensus procedure during `sync`,
 
// from the solutions of its children. Those children are either locally-managed components,
 
// (which are leaves in the solution tree), or other connectors reachable through the given
 
// network endpoint (which are internal nodes in the solution tree).
 
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
 
enum SubtreeId {
 
    LocalComponent(ComponentId),
 
    NetEndpoint { index: usize },
 
}
 
        self.increment_active_components();
 
        return key;
 
    }
 

	
 
// An accumulation of the connector's knowledge of all (a) the local solutions its children
 
// in the solution tree have found, and (b) its own solutions derivable from those of its children.
 
// This structure starts off each round with an empty set, and accumulates solutions as they are found
 
// by local components, or received over the network in control messages.
 
// IMPORTANT: solutions, once found, don't go away until the end of the round. That is to
 
// say that these sets GROW until the round is over, and all solutions are reset.
 
#[derive(Debug)]
 
struct SolutionStorage {
 
    // invariant: old_local U new_local solutions are those that can be created from
 
    // the UNION of one element from each set in `subtree_solution`.
 
    // invariant is maintained by potentially populating new_local whenever subtree_solutions is populated.
 
    old_local: HashSet<Predicate>, // already sent to this connector's parent OR decided
 
    new_local: HashSet<Predicate>, // not yet sent to this connector's parent OR decided
 
    // this pair acts as SubtreeId -> HashSet<Predicate> which is friendlier to iteration
 
    subtree_solutions: Vec<HashSet<Predicate>>,
 
    subtree_id_to_index: HashMap<SubtreeId, usize>,
 
}
 
    /// Creates a new PDL component. This function just creates the component.
 
    /// If you create it initially awake, then you must add it to the work
 
    /// queue. Other aspects of correctness (i.e. setting initial ports) are
 
    /// relinquished to the caller!
 
    pub(crate) fn create_pdl_component(&self, connector: ConnectorPDL, initially_sleeping: bool) -> ConnectorKey {
 
        // Create as not sleeping, as we'll schedule it immediately
 
        let key = {
 
            let mut lock = self.connectors.write().unwrap();
 
            lock.create(ConnectorVariant::UserDefined(connector), initially_sleeping)
 
        };
 

	
 
// Stores the transient data of a synchronous round.
 
// Some of it is for bookkeeping, and the rest is a temporary mirror of fields of
 
// `ConnectorUnphased`, such that any changes are safely contained within RoundCtx,
 
// and can be undone if the round fails.
 
struct RoundCtx {
 
    solution_storage: SolutionStorage,
 
    spec_var_stream: SpecVarStream,
 
    payload_inbox: Vec<(PortId, SendPayloadMsg)>,
 
    deadline: Option<Instant>,
 
    ips: IdAndPortState,
 
}
 
        self.increment_active_components();
 
        return key;
 
    }
 

	
 
// A trait intended to limit the access of the ConnectorUnphased structure
 
// such that we don't accidentally modify any important component/port data
 
// while the results of the round are undecided. Why? Any actions during Connector::sync
 
// are _speculative_ until the round is decided, and we need a safe way of rolling
 
// back any changes.
 
trait CuUndecided {
 
    fn logger(&mut self) -> &mut dyn Logger;
 
    fn proto_description(&self) -> &ProtocolDescription;
 
    fn native_component_id(&self) -> ComponentId;
 
    fn logger_and_protocol_description(&mut self) -> (&mut dyn Logger, &ProtocolDescription);
 
    fn logger_and_protocol_components(
 
        &mut self,
 
    ) -> (&mut dyn Logger, &mut HashMap<ComponentId, ComponentState>);
 
}
 
    /// Retrieve private access to the component through its key.
 
    #[inline]
 
    pub(crate) fn get_component_private(&self, connector_key: &ConnectorKey) -> &'static mut ScheduledConnector {
 
        let entry = {
 
            let lock = self.connectors.read().unwrap();
 
            lock.get(connector_key.index)
 
        };
 

	
 
        debug_assert_eq!(entry.generation.load(Ordering::Acquire), connector_key.generation, "private access to {:?}", connector_key);
 
        return &mut entry.connector;
 
    }
 

	
 
    // --- Managing component destruction
 

	
 
    /// Start component destruction, may only be done by the scheduler that is
 
    /// executing the component. This might not actually destroy the component,
 
    /// since other components might be sending it messages.
 
    fn initiate_component_destruction(&self, connector_key: ConnectorKey) {
 
        // Most of the time no-one will be sending messages, so try
 
        // immediate destruction
 
        let mut lock = self.connectors.write().unwrap();
 
        let entry = lock.get(connector_key.index);
 
        debug_assert_eq!(entry.generation.load(Ordering::Acquire), connector_key.generation);
 
        debug_assert_eq!(entry.connector.public.sleeping.load(Ordering::Acquire), false); // not sleeping: caller is executing this component
 
        let old_num_users = entry.num_users.fetch_sub(1, Ordering::SeqCst);
 
        if old_num_users == 1 {
 
            // We just brought the number of users down to 0. Destroy the
 
            // component
 
            entry.connector.public.inbox.clear();
 
            entry.generation.fetch_add(1, Ordering::SeqCst);
 
            lock.destroy(connector_key);
 
            self.decrement_active_components();
 
        }
 
    }
 

	
 
// Represents a set of synchronous port operations that the native component
 
// has described as an "option" for completing during the synchronous rounds.
 
// Operations contained here succeed together or not at all.
 
// A native with N=2+ batches are expressing an N-way nondeterministic choice
 
#[derive(Debug, Default)]
 
struct NativeBatch {
 
    // invariant: putters' and getters' polarities respected
 
    to_put: HashMap<PortId, Payload>,
 
    to_get: HashSet<PortId>,
 
}
 
    fn finish_component_destruction(&self, connector_id: ConnectorId) {
 
        let mut lock = self.connectors.write().unwrap();
 
        let entry = lock.get(connector_id.index);
 
        debug_assert_eq!(entry.num_users.load(Ordering::Acquire), 0);
 
        let _old_generation = entry.generation.fetch_add(1, Ordering::SeqCst);
 
        debug_assert_eq!(_old_generation, connector_id.generation);
 

	
 
// Parallels a mio::Token type, but more clearly communicates
 
// the way it identifies the evented structre it corresponds to.
 
// See runtime/setup for methods converting between TokenTarget and mio::Token
 
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
 
enum TokenTarget {
 
    NetEndpoint { index: usize },
 
    UdpEndpoint { index: usize },
 
}
 
        // TODO: In the future we should not only clear out the inbox, but send
 
        //  messages back to the senders indicating the messages did not arrive.
 
        entry.connector.public.inbox.clear();
 

	
 
// Returned by the endpoint manager as a result of comm_recv, telling the connector what happened,
 
// such that it can know when to continue polling, and when to block.
 
enum CommRecvOk {
 
    TimeoutWithoutNew,
 
    NewPayloadMsgs,
 
    NewControlMsg { net_index: usize, msg: CommCtrlMsg },
 
}
 
////////////////
 
fn err_would_block(err: &std::io::Error) -> bool {
 
    err.kind() == std::io::ErrorKind::WouldBlock
 
}
 
impl<T: std::cmp::Ord> VecSet<T> {
 
    fn new(mut vec: Vec<T>) -> Self {
 
        // establish the invariant
 
        vec.sort();
 
        vec.dedup();
 
        Self { vec }
 
        // Invariant of only one thread being able to handle the internals of
 
        // component is preserved by the fact that only one thread can decrement
 
        // `num_users` to 0.
 
        lock.destroy(unsafe{ ConnectorKey::from_id(connector_id) });
 
        self.decrement_active_components();
 
    }
 
    fn contains(&self, element: &T) -> bool {
 
        self.vec.binary_search(element).is_ok()
 
    }
 
    // Insert the given element. Returns whether it was already present.
 
    fn insert(&mut self, element: T) -> bool {
 
        match self.vec.binary_search(&element) {
 
            Ok(_) => false,
 
            Err(index) => {
 
                self.vec.insert(index, element);
 
                true
 

	
 
    // --- Managing exit condition
 

	
 
    #[inline]
 
    pub(crate) fn increment_active_interfaces(&self) {
 
        let _old_num = self.active_interfaces.fetch_add(1, Ordering::SeqCst);
 
        debug_assert_ne!(_old_num, 0); // once it hits 0, it stays zero
 
    }
 

	
 
    pub(crate) fn decrement_active_interfaces(&self) {
 
        let old_num = self.active_interfaces.fetch_sub(1, Ordering::SeqCst);
 
        debug_assert!(old_num > 0);
 
        if old_num == 1 { // such that active interfaces is now 0
 
            let num_connectors = self.active_connectors.load(Ordering::Acquire);
 
            if num_connectors == 0 {
 
                self.signal_for_shutdown();
 
            }
 
        }
 
    }
 
    fn iter(&self) -> std::slice::Iter<T> {
 
        self.vec.iter()
 
    }
 
    fn pop(&mut self) -> Option<T> {
 
        self.vec.pop()
 
    }
 
}
 
impl PortInfoMap {
 
    fn ports_owned_by(&self, owner: ComponentId) -> impl Iterator<Item = &PortId> {
 
        self.owned.get(&owner).into_iter().flat_map(HashSet::iter)
 
    }
 
    fn spec_var_for(&self, port: PortId) -> SpecVar {
 
        // Every port maps to a speculative variable
 
        // Two distinct ports map to the same variable
 
        // IFF they are two ends of the same logical channel.
 
        let info = self.map.get(&port).unwrap();
 
        SpecVar(match info.polarity {
 
            Getter => port,
 
            Putter => info.peer.unwrap(),
 
        })
 

	
 
    #[inline]
 
    fn increment_active_components(&self) {
 
        let _old_num = self.active_connectors.fetch_add(1, Ordering::SeqCst);
 
    }
 
    fn invariant_preserved(&self) -> bool {
 
        // for every port P with some owner O,
 
        // P is in O's owned set
 
        for (port, info) in self.map.iter() {
 
            match self.owned.get(&info.owner) {
 
                Some(set) if set.contains(port) => {}
 
                _ => {
 
                    println!("{:#?}\n WITH port {:?}", self, port);
 
                    return false;
 
                }
 
            }
 
        }
 
        // for every port P owned by every owner O,
 
        // P's owner is O
 
        for (&owner, set) in self.owned.iter() {
 
            for port in set {
 
                match self.map.get(port) {
 
                    Some(info) if info.owner == owner => {}
 
                    _ => {
 
                        println!("{:#?}\n WITH owner {:?} port {:?}", self, owner, port);
 
                        return false;
 
                    }
 
                }
 

	
 
    fn decrement_active_components(&self) {
 
        let old_num = self.active_connectors.fetch_sub(1, Ordering::SeqCst);
 
        debug_assert!(old_num > 0);
 
        if old_num == 1 { // such that we have no more active connectors (for now!)
 
            let num_interfaces = self.active_interfaces.load(Ordering::Acquire);
 
            if num_interfaces == 0 {
 
                self.signal_for_shutdown();
 
            }
 
        }
 
        true
 
    }
 
}
 
impl SpecVarStream {
 
    fn next(&mut self) -> SpecVar {
 
        let phantom_port: PortId =
 
            Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }
 
                .into();
 
        SpecVar(phantom_port)
 
    }
 
}
 
impl IdManager {
 
    fn new(connector_id: ConnectorId) -> Self {
 
        Self {
 
            connector_id,
 
            port_suffix_stream: Default::default(),
 
            component_suffix_stream: Default::default(),
 
        }
 
    }
 
    fn new_spec_var_stream(&self) -> SpecVarStream {
 
        // Spec var stream starts where the current port_id stream ends, with gap of SKIP_N.
 
        // This gap is entirely unnecessary (i.e. 0 is fine)
 
        // It's purpose is only to make SpecVars easier to spot in logs.
 
        // E.g. spot the spec var: { v0_0, v1_2, v1_103 }
 
        const SKIP_N: u32 = 100;
 
        let port_suffix_stream = self.port_suffix_stream.clone().n_skipped(SKIP_N);
 
        SpecVarStream { connector_id: self.connector_id, port_suffix_stream }
 
    }
 
    fn new_port_id(&mut self) -> PortId {
 
        Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }.into()
 
    }
 
    fn new_component_id(&mut self) -> ComponentId {
 
        Id { connector_id: self.connector_id, u32_suffix: self.component_suffix_stream.next() }
 
            .into()
 
    }
 
}
 
impl Drop for Connector {
 
    fn drop(&mut self) {
 
        log!(self.unphased.logger(), "Connector dropping. Goodbye!");
 
    }
 
}
 
// Given a slice of ports, return the first, if any, port is present repeatedly
 
fn duplicate_port(slice: &[PortId]) -> Option<PortId> {
 
    let mut vec = Vec::with_capacity(slice.len());
 
    for port in slice.iter() {
 
        match vec.binary_search(port) {
 
            Err(index) => vec.insert(index, *port),
 
            Ok(_) => return Some(*port),
 
        }
 
    }
 
    None
 
}
 
impl Connector {
 
    /// Generate a random connector identifier from the system's source of randomness.
 
    pub fn random_id() -> ConnectorId {
 
        type Bytes8 = [u8; std::mem::size_of::<ConnectorId>()];
 
        unsafe {
 
            let mut bytes = std::mem::MaybeUninit::<Bytes8>::uninit();
 
            // getrandom is the canonical crate for a small, secure rng
 
            getrandom::getrandom(&mut *bytes.as_mut_ptr()).unwrap();
 
            // safe! representations of all valid Byte8 values are valid ConnectorId values
 
            std::mem::transmute::<_, _>(bytes.assume_init())
 
        }
 
    }
 

	
 
    /// Returns true iff the connector is in connected state, i.e., it's setup phase is complete,
 
    /// and it is ready to participate in synchronous rounds of communication.
 
    pub fn is_connected(&self) -> bool {
 
        // If designed for Rust usage, connectors would be exposed as an enum type from the start.
 
        // consequently, this "phased" business would also include connector variants and this would
 
        // get a lot closer to the connector impl. itself.
 
        // Instead, the C-oriented implementation doesn't distinguish connector states as types,
 
        // and distinguish them as enum variants instead
 
        match self.phased {
 
            ConnectorPhased::Setup(..) => false,
 
            ConnectorPhased::Communication(..) => true,
 
    #[inline]
 
    fn signal_for_shutdown(&self) {
 
        debug_assert_eq!(self.active_interfaces.load(Ordering::Acquire), 0);
 
        debug_assert_eq!(self.active_connectors.load(Ordering::Acquire), 0);
 

	
 
        let _lock = self.connector_queue.lock().unwrap();
 
        let should_signal = self.should_exit
 
            .compare_exchange(false, true, Ordering::SeqCst, Ordering::Acquire)
 
            .is_ok();
 

	
 
        if should_signal {
 
            self.scheduler_notifier.notify_all();
 
        }
 
    }
 
}
 

	
 
    /// Enables the connector's current logger to be swapped out for another
 
    pub fn swap_logger(&mut self, mut new_logger: Box<dyn Logger>) -> Box<dyn Logger> {
 
        std::mem::swap(&mut self.unphased.logger, &mut new_logger);
 
        new_logger
 
    }
 
unsafe impl Send for RuntimeInner {}
 
unsafe impl Sync for RuntimeInner {}
 

	
 
    /// Access the connector's current logger
 
    pub fn get_logger(&mut self) -> &mut dyn Logger {
 
        &mut *self.unphased.logger
 
    }
 
// -----------------------------------------------------------------------------
 
// ConnectorStore
 
// -----------------------------------------------------------------------------
 

	
 
    /// Create a new synchronous channel, returning its ends as a pair of ports,
 
    /// with polarity output, input respectively. Available during either setup/communication phase.
 
    /// # Panics
 
    /// This function panics if the connector's (large) port id space is exhausted.
 
    pub fn new_port_pair(&mut self) -> [PortId; 2] {
 
        let cu = &mut self.unphased;
 
        // adds two new associated ports, related to each other, and exposed to the native
 
        let mut new_cid = || cu.ips.id_manager.new_port_id();
 
        // allocate two fresh port identifiers
 
        let [o, i] = [new_cid(), new_cid()];
 
        // store info for each:
 
        // - they are each others' peers
 
        // - they are owned by a local component with id `cid`
 
        // - polarity putter, getter respectively
 
        cu.ips.port_info.map.insert(
 
            o,
 
            PortInfo {
 
                route: Route::LocalComponent,
 
                peer: Some(i),
 
                owner: cu.native_component_id,
 
                polarity: Putter,
 
            },
 
        );
 
        cu.ips.port_info.map.insert(
 
            i,
 
            PortInfo {
 
                route: Route::LocalComponent,
 
                peer: Some(o),
 
                owner: cu.native_component_id,
 
                polarity: Getter,
 
            },
 
        );
 
        cu.ips
 
            .port_info
 
            .owned
 
            .entry(cu.native_component_id)
 
            .or_default()
 
            .extend([o, i].iter().copied());
 

	
 
        log!(cu.logger, "Added port pair (out->in) {:?} -> {:?}", o, i);
 
        [o, i]
 
    }
 
struct StoreEntry {
 
    connector: ScheduledConnector,
 
    generation: std::sync::atomic::AtomicU32,
 
    num_users: std::sync::atomic::AtomicU32,
 
}
 

	
 
    /// Instantiates a new component for the connector runtime to manage, and passing
 
    /// the given set of ports from the interface of the native component, to that of the
 
    /// newly created component (passing their ownership).
 
    /// # Errors
 
    /// Error is returned if the moved ports are not owned by the native component,
 
    /// if the given component name is not defined in the connector's protocol,
 
    /// the given sequence of ports contains a duplicate port,
 
    /// or if the component is unfit for instantiation with the given port sequence.
 
    /// # Panics
 
    /// This function panics if the connector's (large) component id space is exhausted.
 
    pub fn add_component(
 
        &mut self,
 
        module_name: &[u8],
 
        identifier: &[u8],
 
        ports: &[PortId],
 
    ) -> Result<(), AddComponentError> {
 
        // Check for error cases first before modifying `cu`
 
        use AddComponentError as Ace;
 
        let cu = &self.unphased;
 
        if let Some(port) = duplicate_port(ports) {
 
            return Err(Ace::DuplicatePort(port));
 
        }
 
        let expected_polarities = cu.proto_description.component_polarities(module_name, identifier)?;
 
        if expected_polarities.len() != ports.len() {
 
            return Err(Ace::WrongNumberOfParamaters { expected: expected_polarities.len() });
 
        }
 
        for (&expected_polarity, &port) in expected_polarities.iter().zip(ports.iter()) {
 
            let info = cu.ips.port_info.map.get(&port).ok_or(Ace::UnknownPort(port))?;
 
            if info.owner != cu.native_component_id {
 
                return Err(Ace::UnknownPort(port));
 
            }
 
            if info.polarity != expected_polarity {
 
                return Err(Ace::WrongPortPolarity { port, expected_polarity });
 
            }
 
        }
 
        // No errors! Time to modify `cu`
 
        // create a new component and identifier
 
        let Connector { phased, unphased: cu } = self;
 
        let new_cid = cu.ips.id_manager.new_component_id();
 
        cu.proto_components.insert(new_cid, cu.proto_description.new_component(module_name, identifier, ports));
 
        // update the ownership of moved ports
 
        for port in ports.iter() {
 
            match cu.ips.port_info.map.get_mut(port) {
 
                Some(port_info) => port_info.owner = new_cid,
 
                None => unreachable!(),
 
            }
 
        }
 
        if let Some(set) = cu.ips.port_info.owned.get_mut(&cu.native_component_id) {
 
            set.retain(|x| !ports.contains(x));
 
        }
 
        let moved_port_set: HashSet<PortId> = ports.iter().copied().collect();
 
        if let ConnectorPhased::Communication(comm) = phased {
 
            // Preserve invariant: batches only reason about native's ports.
 
            // Remove batch puts/gets for moved ports.
 
            for batch in comm.native_batches.iter_mut() {
 
                batch.to_put.retain(|port, _| !moved_port_set.contains(port));
 
                batch.to_get.retain(|port| !moved_port_set.contains(port));
 
            }
 
        }
 
        cu.ips.port_info.owned.insert(new_cid, moved_port_set);
 
        Ok(())
 
    }
 
struct ConnectorStore {
 
    // Freelist storage of connectors. Storage should be pointer-stable as
 
    // someone might be mutating the vector while we're executing one of the
 
    // connectors.
 
    entries: RawVec<*mut StoreEntry>,
 
    free: Vec<usize>,
 
}
 
impl Predicate {
 
    #[inline]
 
    pub fn singleton(k: SpecVar, v: SpecVal) -> Self {
 
        Self::default().inserted(k, v)
 
    }
 
    #[inline]
 
    pub fn inserted(mut self, k: SpecVar, v: SpecVal) -> Self {
 
        self.assigned.insert(k, v);
 
        self
 
    }
 

	
 
    // Return true whether `self` is a subset of `maybe_superset`
 
    pub fn assigns_subset(&self, maybe_superset: &Self) -> bool {
 
        for (var, val) in self.assigned.iter() {
 
            match maybe_superset.assigned.get(var) {
 
                Some(val2) if val2 == val => {}
 
                _ => return false, // var unmapped, or mapped differently
 
            }
 
impl ConnectorStore {
 
    fn with_capacity(capacity: usize) -> Self {
 
        Self {
 
            entries: RawVec::with_capacity(capacity),
 
            free: Vec::with_capacity(capacity),
 
        }
 
        // `maybe_superset` mirrored all my assignments!
 
        true
 
    }
 

	
 
    /// Given the two predicates {self, other}, return that whose
 
    /// assignments are the union of those of both.
 
    fn assignment_union(&self, other: &Self) -> AssignmentUnionResult {
 
        use AssignmentUnionResult as Aur;
 
        // iterators over assignments of both predicates. Rely on SORTED ordering of BTreeMap's keys.
 
        let [mut s_it, mut o_it] = [self.assigned.iter(), other.assigned.iter()];
 
        let [mut s, mut o] = [s_it.next(), o_it.next()];
 
        // populate lists of assignments in self but not other and vice versa.
 
        // do this by incrementally unfolding the iterators, keeping an eye
 
        // on the ordering between the head elements [s, o].
 
        // whenever s<o, other is certainly missing element 's', etc.
 
        let [mut s_not_o, mut o_not_s] = [vec![], vec![]];
 
        loop {
 
            match [s, o] {
 
                [None, None] => break, // both iterators are empty
 
                [None, Some(x)] => {
 
                    // self's iterator is empty.
 
                    // all remaning elements are in other but not self
 
                    o_not_s.push(x);
 
                    o_not_s.extend(o_it);
 
                    break;
 
                }
 
                [Some(x), None] => {
 
                    // other's iterator is empty.
 
                    // all remaning elements are in self but not other
 
                    s_not_o.push(x);
 
                    s_not_o.extend(s_it);
 
                    break;
 
                }
 
                [Some((sid, sb)), Some((oid, ob))] => {
 
                    if sid < oid {
 
                        // o is missing this element
 
                        s_not_o.push((sid, sb));
 
                        s = s_it.next();
 
                    } else if sid > oid {
 
                        // s is missing this element
 
                        o_not_s.push((oid, ob));
 
                        o = o_it.next();
 
                    } else if sb != ob {
 
                        assert_eq!(sid, oid);
 
                        // both predicates assign the variable but differ on the value
 
                        // No predicate exists which satisfies both!
 
                        return Aur::Nonexistant;
 
                    } else {
 
                        // both predicates assign the variable to the same value
 
                        s = s_it.next();
 
                        o = o_it.next();
 
                    }
 
                }
 
            }
 
        }
 
        // Observed zero inconsistencies. A unified predicate exists...
 
        match [s_not_o.is_empty(), o_not_s.is_empty()] {
 
            [true, true] => Aur::Equivalent,       // ... equivalent to both.
 
            [false, true] => Aur::FormerNotLatter, // ... equivalent to self.
 
            [true, false] => Aur::LatterNotFormer, // ... equivalent to other.
 
            [false, false] => {
 
                // ... which is the union of the predicates' assignments but
 
                //     is equivalent to neither self nor other.
 
                let mut new = self.clone();
 
                for (&id, &b) in o_not_s {
 
                    new.assigned.insert(id, b);
 
                }
 
                Aur::New(new)
 
            }
 
    /// Directly retrieves an entry. There be dragons here. The `connector`
 
    /// might have its destructor already executed. Accessing it might then lead
 
    /// to memory corruption.
 
    fn get(&self, index: u32) -> &'static mut StoreEntry {
 
        unsafe {
 
            let entry = self.entries.get_mut(index as usize);
 
            return &mut **entry;
 
        }
 
    }
 

	
 
    // Compute the union of the assignments of the two given predicates, if it exists.
 
    // It doesn't exist if there is some value which the predicates assign to different values.
 
    pub(crate) fn union_with(&self, other: &Self) -> Option<Self> {
 
        let mut res = self.clone();
 
        for (&channel_id, &assignment_1) in other.assigned.iter() {
 
            match res.assigned.insert(channel_id, assignment_1) {
 
                Some(assignment_2) if assignment_1 != assignment_2 => return None,
 
                _ => {}
 
    /// Creates a new connector. Caller should ensure ports are set up correctly
 
    /// and the connector is queued for execution if needed.
 
    fn create(&mut self, connector: ConnectorVariant, initially_sleeping: bool) -> ConnectorKey {
 
        let mut connector = ScheduledConnector {
 
            connector,
 
            ctx: ComponentCtx::new_empty(),
 
            public: ConnectorPublic::new(initially_sleeping),
 
            router: ControlMessageHandler::new(),
 
            shutting_down: false,
 
        };
 

	
 
        let index;
 
        let key;
 

	
 
        if self.free.is_empty() {
 
            // No free entries, allocate new entry
 
            index = self.entries.len();
 
            key = ConnectorKey{
 
                index: index as u32, generation: 0
 
            };
 
            connector.ctx.id = key.downcast();
 

	
 
            let connector = Box::into_raw(Box::new(StoreEntry{
 
                connector,
 
                generation: AtomicU32::new(0),
 
                num_users: AtomicU32::new(1),
 
            }));
 
            self.entries.push(connector);
 
        } else {
 
            // Free spot available
 
            index = self.free.pop().unwrap();
 

	
 
            unsafe {
 
                let target = &mut **self.entries.get_mut(index);
 
                std::ptr::write(&mut target.connector as *mut _, connector);
 
                let _old_num_users = target.num_users.fetch_add(1, Ordering::SeqCst);
 
                debug_assert_eq!(_old_num_users, 0);
 

	
 
                let generation = target.generation.load(Ordering::Acquire);
 
                key = ConnectorKey{ index: index as u32, generation };
 
                target.connector.ctx.id = key.downcast();
 
            }
 
        }
 
        Some(res)
 
    }
 
    pub(crate) fn query(&self, var: SpecVar) -> Option<SpecVal> {
 
        self.assigned.get(&var).copied()
 
    }
 
}
 

	
 
impl RoundCtx {
 
    // remove an arbitrary buffered message, along with the ID of the getter who receives it
 
    fn getter_pop(&mut self) -> Option<(PortId, SendPayloadMsg)> {
 
        self.payload_inbox.pop()
 
        println!("DEBUG [ global store  ] Created component at {}", key.index);
 
        return key;
 
    }
 

	
 
    // buffer a message along with the ID of the getter who receives it
 
    fn getter_push(&mut self, getter: PortId, msg: SendPayloadMsg) {
 
        self.payload_inbox.push((getter, msg));
 
    }
 

	
 
    // buffer a message along with the ID of the putter who sent it
 
    fn putter_push(&mut self, cu: &mut impl CuUndecided, putter: PortId, msg: SendPayloadMsg) {
 
        if let Some(getter) = self.ips.port_info.map.get(&putter).unwrap().peer {
 
            log!(cu.logger(), "Putter add (putter:{:?} => getter:{:?})", putter, getter);
 
            self.getter_push(getter, msg);
 
        } else {
 
            log!(cu.logger(), "Putter {:?} has no known peer!", putter);
 
            panic!("Putter {:?} has no known peer!", putter);
 
    /// Destroys a connector. Caller should make sure it is not scheduled for
 
    /// execution. Otherwise one experiences "bad stuff" (tm).
 
    fn destroy(&mut self, key: ConnectorKey) {
 
        unsafe {
 
            let target = self.entries.get_mut(key.index as usize);
 
            (**target).generation.fetch_add(1, Ordering::SeqCst);
 
            std::ptr::drop_in_place(*target);
 
            // Note: but not deallocating!
 
        }
 
    }
 
}
 

	
 
impl<T: Debug + std::cmp::Ord> Debug for VecSet<T> {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        f.debug_set().entries(self.vec.iter()).finish()
 
        println!("DEBUG [ global store  ] Destroyed component at {}", key.index);
 
        self.free.push(key.index as usize);
 
    }
 
}
 
impl Debug for Predicate {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        struct Assignment<'a>((&'a SpecVar, &'a SpecVal));
 
        impl Debug for Assignment<'_> {
 
            fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
                write!(f, "{:?}={:?}", (self.0).0, (self.0).1)
 

	
 
impl Drop for ConnectorStore {
 
    fn drop(&mut self) {
 
        // Everything in the freelist already had its destructor called, so only
 
        // has to be deallocated
 
        for free_idx in self.free.iter().copied() {
 
            unsafe {
 
                let memory = self.entries.get_mut(free_idx);
 
                let layout = std::alloc::Layout::for_value(&**memory);
 
                std::alloc::dealloc(*memory as *mut u8, layout);
 

	
 
                // mark as null for the remainder
 
                *memory = std::ptr::null_mut();
 
            }
 
        }
 
        f.debug_set().entries(self.assigned.iter().map(Assignment)).finish()
 
    }
 
}
 
impl IdParts for SpecVar {
 
    fn id_parts(self) -> (ConnectorId, U32Suffix) {
 
        self.0.id_parts()
 
    }
 
}
 
impl Debug for SpecVar {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        let (a, b) = self.id_parts();
 
        write!(f, "v{}_{}", a, b)
 
    }
 
}
 
impl SpecVal {
 
    const FIRING: Self = SpecVal(1);
 
    const SILENT: Self = SpecVal(0);
 
    fn is_firing(self) -> bool {
 
        self == Self::FIRING
 
        // all else treated as SILENT
 
    }
 
    fn iter_domain() -> impl Iterator<Item = Self> {
 
        (0..).map(SpecVal)
 
    }
 
}
 
impl Debug for SpecVal {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        self.0.fmt(f)
 
    }
 
}
 
impl Default for IoByteBuffer {
 
    fn default() -> Self {
 
        let mut byte_vec = Vec::with_capacity(Self::CAPACITY);
 
        unsafe {
 
            // safe! this vector is guaranteed to have sufficient capacity
 
            byte_vec.set_len(Self::CAPACITY);
 
        }
 
        Self { byte_vec }
 
    }
 
}
 
impl IoByteBuffer {
 
    const CAPACITY: usize = u16::MAX as usize + 1000;
 
    fn as_mut_slice(&mut self) -> &mut [u8] {
 
        self.byte_vec.as_mut_slice()
 
    }
 
}
 

	
 
impl Debug for IoByteBuffer {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        write!(f, "IoByteBuffer")
 
        // With the deallocated stuff marked as null, clear the remainder that
 
        // is not null
 
        for idx in 0..self.entries.len() {
 
            unsafe {
 
                let memory = *self.entries.get_mut(idx);
 
                if !memory.is_null() {
 
                    let _ = Box::from_raw(memory); // take care of deallocation, bit dirty, but meh
 
                }
 
            }
 
        }
 
    }
 
}
 
}
 
\ No newline at end of file
src/runtime/native.rs
Show inline comments
 
file renamed from src/runtime2/native.rs to src/runtime/native.rs
 
use std::collections::VecDeque;
 
use std::sync::{Arc, Mutex, Condvar};
 

	
 
use crate::protocol::ComponentCreationError;
 
use crate::protocol::eval::ValueGroup;
 
use crate::runtime2::consensus::RoundConclusion;
 
use crate::runtime::consensus::RoundConclusion;
 

	
 
use super::{ConnectorId, RuntimeInner};
 
use super::branch::{BranchId, FakeTree, QueueKind, SpeculativeState};
 
use super::scheduler::{SchedulerCtx, ComponentCtx, MessageTicket};
 
use super::port::{Port, PortIdLocal, Channel, PortKind};
 
use super::consensus::{Consensus, Consistency, find_ports_in_value_group};
 
use super::connector::{ConnectorScheduling, ConnectorPDL};
 
use super::inbox::{
 
    Message, DataMessage,
 
    SyncCompMessage, SyncPortMessage,
 
    ControlContent, ControlMessage
 
};
 

	
 
/// Generic connector interface from the scheduler's point of view.
 
pub(crate) trait Connector {
 
    /// Should run the connector's behaviour up until the next blocking point.
 
    /// One should generally request and handle new messages from the component
 
    /// context. Then perform any logic the component has to do, and in the
 
    /// process perhaps queue up some state changes using the same context.
 
    fn run(&mut self, sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling;
 
}
 

	
 
pub(crate) struct FinishedSync {
 
    // In the order of the `get` calls
 
    success: bool,
 
    inbox: Vec<ValueGroup>,
 
}
 

	
 
type SyncDone = Arc<(Mutex<Option<FinishedSync>>, Condvar)>;
 
type JobQueue = Arc<Mutex<VecDeque<ApplicationJob>>>;
 

	
 
enum ApplicationJob {
 
    NewChannel((Port, Port)),
 
    NewConnector(ConnectorPDL, Vec<PortIdLocal>),
 
    SyncRound(Vec<ApplicationSyncAction>),
 
    Shutdown,
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// ConnectorApplication
 
// -----------------------------------------------------------------------------
 

	
 
/// The connector which an application can directly interface with. Once may set
 
/// up the next synchronous round, and retrieve the data afterwards.
 
// TODO: Strong candidate for logic reduction in handling put/get. A lot of code
 
//  is an approximate copy-pasta from the regular component logic. I'm going to
 
//  wait until I'm implementing more native components to see which logic is
 
//  truly common.
 
pub struct ConnectorApplication {
 
    // Communicating about new jobs and setting up sync rounds
 
    sync_done: SyncDone,
 
    job_queue: JobQueue,
 
    is_in_sync: bool,
 
    // Handling current sync round
 
    sync_desc: Vec<ApplicationSyncAction>,
 
    tree: FakeTree,
 
    consensus: Consensus,
 
    last_finished_handled: Option<BranchId>,
 
    branch_extra: Vec<usize>, // instruction counter per branch
 
}
 

	
 
impl Connector for ConnectorApplication {
 
    fn run(&mut self, sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        if self.is_in_sync {
 
            let scheduling = self.run_in_sync_mode(sched_ctx, comp_ctx);
 
            let mut iter_id = self.last_finished_handled.or(self.tree.get_queue_first(QueueKind::FinishedSync));
 
            while let Some(branch_id) = iter_id {
 
                iter_id = self.tree.get_queue_next(branch_id);
 
                self.last_finished_handled = Some(branch_id);
 

	
 
                if let Some(conclusion) = self.consensus.handle_new_finished_sync_branch(branch_id, comp_ctx) {
 
                    // Can finish sync round immediately
 
                    self.collapse_sync_to_conclusion(conclusion, comp_ctx);
 
                    return ConnectorScheduling::Immediate;
 
                }
 
            }
 

	
 
            return scheduling;
 
        } else {
 
            return self.run_in_deterministic_mode(sched_ctx, comp_ctx);
 
        }
 
    }
 
}
 

	
 
impl ConnectorApplication {
 
    pub(crate) fn new(runtime: Arc<RuntimeInner>) -> (Self, ApplicationInterface) {
 
        let sync_done = Arc::new(( Mutex::new(None), Condvar::new() ));
 
        let job_queue = Arc::new(Mutex::new(VecDeque::with_capacity(32)));
 

	
 
        let connector = ConnectorApplication {
 
            sync_done: sync_done.clone(),
 
            job_queue: job_queue.clone(),
 
            is_in_sync: false,
 
            sync_desc: Vec::new(),
 
            tree: FakeTree::new(),
 
            consensus: Consensus::new(),
 
            last_finished_handled: None,
 
            branch_extra: vec![0],
 
        };
 
        let interface = ApplicationInterface::new(sync_done, job_queue, runtime);
 

	
 
        return (connector, interface);
 
    }
 

	
 
    fn handle_new_messages(&mut self, comp_ctx: &mut ComponentCtx) {
 
        while let Some(ticket) = comp_ctx.get_next_message_ticket() {
 
            let message = comp_ctx.read_message_using_ticket(ticket);
 
            if let Message::Data(_) = message {
 
                self.handle_new_data_message(ticket, comp_ctx)
 
            } else {
 
                match comp_ctx.take_message_using_ticket(ticket) {
 
                    Message::Data(message) => unreachable!(),
 
                    Message::SyncComp(message) => self.handle_new_sync_comp_message(message, comp_ctx),
 
                    Message::SyncPort(message) => self.handle_new_sync_port_message(message, comp_ctx),
 
                    Message::SyncControl(message) => todo!("implement"),
 
                    Message::Control(_) => unreachable!("control message in native API component"),
 
                }
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn handle_new_data_message(&mut self, ticket: MessageTicket, ctx: &mut ComponentCtx) {
 
        // Go through all branches that are awaiting new messages and see if
 
        // there is one that can receive this message.
 
        if !self.consensus.handle_new_data_message(ticket, ctx) {
 
            // Old message, so drop it
 
            return;
 
        }
 

	
 
        let mut iter_id = self.tree.get_queue_first(QueueKind::AwaitingMessage);
 
        while let Some(branch_id) = iter_id {
 
            let message = ctx.read_message_using_ticket(ticket).as_data();
 
            iter_id = self.tree.get_queue_next(branch_id);
 

	
 
            let branch = &self.tree[branch_id];
 
            if branch.awaiting_port != message.data_header.target_port { continue; }
 
            if !self.consensus.branch_can_receive(branch_id, &message) { continue; }
 

	
 
            // This branch can receive, so fork and given it the message
 
            let receiving_branch_id = self.tree.fork_branch(branch_id);
 
            debug_assert!(receiving_branch_id.index as usize == self.branch_extra.len());
 
            self.branch_extra.push(self.branch_extra[branch_id.index as usize]); // copy instruction index
 
            self.consensus.notify_of_new_branch(branch_id, receiving_branch_id);
 
            let receiving_branch = &mut self.tree[receiving_branch_id];
 

	
 
            receiving_branch.insert_message(message.data_header.target_port, message.content.clone());
 
            self.consensus.notify_of_received_message(receiving_branch_id, &message, ctx);
 

	
 
            // And prepare the branch for running
 
            self.tree.push_into_queue(QueueKind::Runnable, receiving_branch_id);
 
        }
 
    }
 

	
 
    pub(crate) fn handle_new_sync_comp_message(&mut self, message: SyncCompMessage, ctx: &mut ComponentCtx) {
 
        if let Some(conclusion) = self.consensus.handle_new_sync_comp_message(message, ctx) {
 
            self.collapse_sync_to_conclusion(conclusion, ctx);
 
        }
 
    }
 

	
 
    pub(crate) fn handle_new_sync_port_message(&mut self, message: SyncPortMessage, ctx: &mut ComponentCtx) {
 
        self.consensus.handle_new_sync_port_message(message, ctx);
 
    }
 

	
 
    fn run_in_sync_mode(&mut self, _sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        debug_assert!(self.is_in_sync);
 

	
 
        self.handle_new_messages(comp_ctx);
 

	
 
        let branch_id = self.tree.pop_from_queue(QueueKind::Runnable);
 
        if branch_id.is_none() {
 
            return ConnectorScheduling::NotNow;
 
        }
 

	
 
        let branch_id = branch_id.unwrap();
 
        let branch = &mut self.tree[branch_id];
 
        let mut instruction_idx = self.branch_extra[branch_id.index as usize];
 

	
 
        if instruction_idx >= self.sync_desc.len() {
 
            // Performed last instruction, so this branch is officially at the
 
            // end of the synchronous interaction.
 
            let consistency = self.consensus.notify_of_finished_branch(branch_id);
 
            if consistency == Consistency::Valid {
 
                branch.sync_state = SpeculativeState::ReachedSyncEnd;
 
                self.tree.push_into_queue(QueueKind::FinishedSync, branch_id);
 
            } else {
 
                branch.sync_state = SpeculativeState::Inconsistent;
 
            }
 
        } else {
 
            // We still have instructions to perform
 
            let cur_instruction = &self.sync_desc[instruction_idx];
 
            self.branch_extra[branch_id.index as usize] += 1;
 

	
 
            match &cur_instruction {
 
                ApplicationSyncAction::Put(port_id, content) => {
 
                    let port_id = *port_id;
 

	
 
                    let (sync_header, data_header) = self.consensus.handle_message_to_send(branch_id, port_id, &content, comp_ctx);
 
                    let message = Message::Data(DataMessage {
 
                        sync_header,
 
                        data_header,
 
                        content: content.clone(),
 
                    });
 
                    comp_ctx.submit_message(message);
 
                    self.tree.push_into_queue(QueueKind::Runnable, branch_id);
 
                    return ConnectorScheduling::Immediate;
 
                },
 
                ApplicationSyncAction::Get(port_id) => {
 
                    let port_id = *port_id;
 

	
 
                    branch.sync_state = SpeculativeState::HaltedAtBranchPoint;
 
                    branch.awaiting_port = port_id;
 
                    self.tree.push_into_queue(QueueKind::AwaitingMessage, branch_id);
 

	
 
                    let mut any_message_received = false;
 
                    for message in comp_ctx.get_read_data_messages(port_id) {
 
                        if self.consensus.branch_can_receive(branch_id, &message) {
 
                            // This branch can receive the message, so we do the
 
                            // fork-and-receive dance
 
                            let receiving_branch_id = self.tree.fork_branch(branch_id);
 
                            let branch = &mut self.tree[receiving_branch_id];
 
                            debug_assert!(receiving_branch_id.index as usize == self.branch_extra.len());
 
                            self.branch_extra.push(instruction_idx + 1);
 

	
 
                            branch.insert_message(port_id, message.content.clone());
 

	
 
                            self.consensus.notify_of_new_branch(branch_id, receiving_branch_id);
 
                            self.consensus.notify_of_received_message(receiving_branch_id, &message, comp_ctx);
 
                            self.tree.push_into_queue(QueueKind::Runnable, receiving_branch_id);
 

	
 
                            any_message_received = true;
 
                        }
 
                    }
 

	
 
                    if any_message_received {
 
                        return ConnectorScheduling::Immediate;
 
                    }
 
                }
 
            }
 
        }
 

	
 
        if self.tree.queue_is_empty(QueueKind::Runnable) {
 
            return ConnectorScheduling::NotNow;
 
        } else {
 
            return ConnectorScheduling::Later;
 
        }
 
    }
 

	
 
    fn run_in_deterministic_mode(&mut self, _sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        debug_assert!(!self.is_in_sync);
 

	
 
        // In non-sync mode the application component doesn't really do anything
 
        // except performing jobs submitted from the API. This is the only
 
        // case where we expect to be woken up.
 
        // Note that we have to communicate to the scheduler when we've received
 
        // ports or created components (hence: given away ports) *before* we
 
        // enter a sync round.
 
        let mut queue = self.job_queue.lock().unwrap();
 
        while let Some(job) = queue.pop_front() {
 
            match job {
 
                ApplicationJob::NewChannel((endpoint_a, endpoint_b)) => {
 
                    comp_ctx.push_port(endpoint_a);
 
                    comp_ctx.push_port(endpoint_b);
 

	
 
                    return ConnectorScheduling::Immediate;
 
                }
 
                ApplicationJob::NewConnector(connector, initial_ports) => {
 
                    comp_ctx.push_component(connector, initial_ports);
 

	
 
                    return ConnectorScheduling::Later;
 
                },
 
                ApplicationJob::SyncRound(mut description) => {
 
                    // Entering sync mode
 
                    comp_ctx.notify_sync_start();
 
                    self.sync_desc = description;
 
                    self.is_in_sync = true;
 
                    debug_assert!(self.last_finished_handled.is_none());
 
                    debug_assert!(self.branch_extra.len() == 1);
 

	
 
                    let first_branch_id = self.tree.start_sync();
 
                    self.tree.push_into_queue(QueueKind::Runnable, first_branch_id);
 
                    debug_assert!(first_branch_id.index == 1);
 
                    self.consensus.start_sync(comp_ctx);
 
                    self.consensus.notify_of_new_branch(BranchId::new_invalid(), first_branch_id);
 
                    self.branch_extra.push(0); // set first branch to first instruction
 

	
 
                    return ConnectorScheduling::Immediate;
 
                },
 
                ApplicationJob::Shutdown => {
 
                    debug_assert!(queue.is_empty());
 

	
 
                    return ConnectorScheduling::Exit;
 
                }
 
            }
 
        }
 

	
 
        // Queue was empty
 
        return ConnectorScheduling::NotNow;
 
    }
 

	
 
    fn collapse_sync_to_conclusion(&mut self, conclusion: RoundConclusion, comp_ctx: &mut ComponentCtx) {
 
        // Notifying tree, consensus algorithm and context of ending sync
 
        let mut fake_vec = Vec::new();
 

	
 
        let (branch_id, success) = match conclusion {
 
            RoundConclusion::Success(branch_id) => {
 
                debug_assert!(self.branch_extra[branch_id.index as usize] >= self.sync_desc.len()); // finished program provided by API
 
                (branch_id, true)
 
            },
 
            RoundConclusion::Failure => (BranchId::new_invalid(), false),
 
        };
 

	
 
        let mut solution_branch = self.tree.end_sync(branch_id);
 
        self.consensus.end_sync(branch_id, &mut fake_vec);
 
        debug_assert!(fake_vec.is_empty());
 

	
 
        comp_ctx.notify_sync_end(&[]);
 

	
 
        // Turning hashmapped inbox into vector of values
 
        let mut inbox = Vec::with_capacity(solution_branch.inbox.len());
 
        for action in &self.sync_desc {
 
            match action {
 
                ApplicationSyncAction::Put(_, _) => {},
 
                ApplicationSyncAction::Get(port_id) => {
 
                    debug_assert!(solution_branch.inbox.contains_key(port_id));
 
                    inbox.push(solution_branch.inbox.remove(port_id).unwrap());
 
                },
 
            }
 
        }
 

	
 
        // Notifying interface of ending sync
 
        self.is_in_sync = false;
 
        self.sync_desc.clear();
 
        self.branch_extra.truncate(1);
 
        self.last_finished_handled = None;
 

	
 
        let (results, notification) = &*self.sync_done;
 
        let mut results = results.lock().unwrap();
 
        *results = Some(FinishedSync{ success, inbox });
 
        notification.notify_one();
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// ApplicationInterface
 
// -----------------------------------------------------------------------------
 

	
 
#[derive(Debug)]
 
pub enum ChannelCreationError {
 
    InSync,
 
}
 

	
 
#[derive(Debug)]
 
pub enum ApplicationStartSyncError {
 
    AlreadyInSync,
 
    NoSyncActions,
 
    IncorrectPortKind,
 
    UnownedPort,
 
}
 

	
 
#[derive(Debug)]
 
pub enum ApplicationEndSyncError {
 
    NotInSync,
 
    Failure,
 
}
 

	
 
pub enum ApplicationSyncAction {
 
    Put(PortIdLocal, ValueGroup),
 
    Get(PortIdLocal),
 
}
 

	
 
/// The interface to a `ApplicationConnector`. This allows setting up the
 
/// interactions the `ApplicationConnector` performs within a synchronous round.
 
pub struct ApplicationInterface {
 
    sync_done: SyncDone,
 
    job_queue: JobQueue,
 
    runtime: Arc<RuntimeInner>,
 
    is_in_sync: bool,
 
    connector_id: ConnectorId,
 
    owned_ports: Vec<(PortKind, PortIdLocal)>,
 
}
 

	
 
impl ApplicationInterface {
 
    fn new(sync_done: SyncDone, job_queue: JobQueue, runtime: Arc<RuntimeInner>) -> Self {
 
        return Self{
 
            sync_done, job_queue, runtime,
 
            is_in_sync: false,
 
            connector_id: ConnectorId::new_invalid(),
 
            owned_ports: Vec::new(),
 
        }
 
    }
 

	
 
    /// Creates a new channel. Can only fail if the application interface is
 
    /// currently in sync mode.
 
    pub fn create_channel(&mut self) -> Result<Channel, ChannelCreationError> {
 
        if self.is_in_sync {
 
            return Err(ChannelCreationError::InSync);
 
        }
 

	
 
        let (getter_port, putter_port) = self.runtime.create_channel(self.connector_id);
 
        debug_assert_eq!(getter_port.kind, PortKind::Getter);
 
        let getter_id = getter_port.self_id;
 
        let putter_id = putter_port.self_id;
 

	
 
        {
 
            let mut lock = self.job_queue.lock().unwrap();
 
            lock.push_back(ApplicationJob::NewChannel((getter_port, putter_port)));
 
        }
 

	
 
        // Add to owned ports for error checking while creating a connector
 
        self.owned_ports.reserve(2);
 
        self.owned_ports.push((PortKind::Putter, putter_id));
 
        self.owned_ports.push((PortKind::Getter, getter_id));
 

	
 
        return Ok(Channel{ putter_id, getter_id });
 
    }
 

	
 
    /// Creates a new connector. Note that it is not scheduled immediately, but
 
    /// depends on the `ApplicationConnector` to run, followed by the created
 
    /// connector being scheduled.
 
    pub fn create_connector(&mut self, module: &str, routine: &str, arguments: ValueGroup) -> Result<(), ComponentCreationError> {
 
        if self.is_in_sync {
 
            return Err(ComponentCreationError::InSync);
 
        }
 

	
 
        // Retrieve ports and make sure that we own the ones that are currently
 
        // specified. This is also checked by the scheduler, but that is done
 
        // asynchronously.
 
        let mut initial_ports = Vec::new();
 
        find_ports_in_value_group(&arguments, &mut initial_ports);
 
        for initial_port in &initial_ports {
 
            if !self.owned_ports.iter().any(|(_, v)| v == initial_port) {
 
                return Err(ComponentCreationError::UnownedPort);
 
            }
 
        }
 

	
 
        // We own all ports, so remove them on this side
 
        for initial_port in &initial_ports {
 
            let position = self.owned_ports.iter().position(|(_, v)| v == initial_port).unwrap();
 
            self.owned_ports.remove(position);
 
        }
 

	
 
        let prompt = self.runtime.protocol_description.new_component_v2(module.as_bytes(), routine.as_bytes(), arguments)?;
 
        let prompt = self.runtime.protocol_description.new_component(module.as_bytes(), routine.as_bytes(), arguments)?;
 
        let connector = ConnectorPDL::new(prompt);
 

	
 
        // Put on job queue
 
        {
 
            let mut queue = self.job_queue.lock().unwrap();
 
            queue.push_back(ApplicationJob::NewConnector(connector, initial_ports));
 
        }
 

	
 
        self.wake_up_connector_with_ping();
 

	
 
        return Ok(());
 
    }
 

	
 
    /// Queues up a description of a synchronous round to run. Will not actually
 
    /// run the synchronous behaviour in blocking fashion. The results *must* be
 
    /// retrieved using `try_wait` or `wait` for the interface to be considered
 
    /// in non-sync mode.
 
    pub fn perform_sync_round(&mut self, actions: Vec<ApplicationSyncAction>) -> Result<(), ApplicationStartSyncError> {
 
        if self.is_in_sync {
 
            return Err(ApplicationStartSyncError::AlreadyInSync);
 
        }
 

	
 
        // Check the action ports for consistency
 
        for action in &actions {
 
            let (port_id, expected_kind) = match action {
 
                ApplicationSyncAction::Put(port_id, _) => (*port_id, PortKind::Putter),
 
                ApplicationSyncAction::Get(port_id) => (*port_id, PortKind::Getter),
 
            };
 

	
 
            match self.find_port_by_id(port_id) {
 
                Some(port_kind) => {
 
                    if port_kind != expected_kind {
 
                        return Err(ApplicationStartSyncError::IncorrectPortKind)
 
                    }
 
                },
 
                None => {
 
                    return Err(ApplicationStartSyncError::UnownedPort);
 
                }
 
            }
 
        }
 

	
 
        // Everything is consistent, go into sync mode and send the actions off
 
        // to the component that will actually perform the sync round
 
        self.is_in_sync = true;
 
        {
 
            let (is_done, _) = &*self.sync_done;
 
            let mut lock = is_done.lock().unwrap();
 
            *lock = None;
 
        }
 

	
 
        {
 
            let mut lock = self.job_queue.lock().unwrap();
 
            lock.push_back(ApplicationJob::SyncRound(actions));
 
        }
 

	
 
        self.wake_up_connector_with_ping();
 
        return Ok(())
 
    }
 

	
 
    /// Wait until the next sync-round is finished, returning the received
 
    /// messages in order of `get` calls.
 
    pub fn wait(&mut self) -> Result<Vec<ValueGroup>, ApplicationEndSyncError> {
 
        if !self.is_in_sync {
 
            return Err(ApplicationEndSyncError::NotInSync);
 
        }
 

	
 
        let (is_done, condition) = &*self.sync_done;
 
        let mut lock = is_done.lock().unwrap();
 
        lock = condition.wait_while(lock, |v| v.is_none()).unwrap(); // wait while not done
 

	
 
        self.is_in_sync = false;
 
        let result = lock.take().unwrap();
 
        if result.success {
 
            return Ok(result.inbox);
 
        } else {
 
            return Err(ApplicationEndSyncError::Failure);
 
        }
 
    }
 

	
 
    /// Called by runtime to set associated connector's ID.
 
    pub(crate) fn set_connector_id(&mut self, id: ConnectorId) {
 
        self.connector_id = id;
 
    }
 

	
 
    fn wake_up_connector_with_ping(&self) {
 
        let message = ControlMessage {
 
            id: 0,
 
            sending_component_id: self.connector_id,
 
            content: ControlContent::Ping,
 
        };
 
        self.runtime.send_message_maybe_destroyed(self.connector_id, Message::Control(message));
 
    }
 

	
 
    fn find_port_by_id(&self, port_id: PortIdLocal) -> Option<PortKind> {
 
        return self.owned_ports.iter()
 
            .find(|(_, owned_id)| *owned_id == port_id)
 
            .map(|(port_kind, _)| *port_kind);
 
    }
 
}
 

	
 
impl Drop for ApplicationInterface {
 
    fn drop(&mut self) {
 
        {
 
            let mut lock = self.job_queue.lock().unwrap();
 
            lock.push_back(ApplicationJob::Shutdown);
 
        }
 

	
 
        self.wake_up_connector_with_ping();
 
        self.runtime.decrement_active_interfaces();
 
    }
 
}
 
\ No newline at end of file
src/runtime/port.rs
Show inline comments
 
file renamed from src/runtime2/port.rs to src/runtime/port.rs
src/runtime/scheduler.rs
Show inline comments
 
file renamed from src/runtime2/scheduler.rs to src/runtime/scheduler.rs
 
use std::collections::VecDeque;
 
use std::sync::Arc;
 
use std::sync::atomic::Ordering;
 

	
 
use crate::protocol::eval::EvalError;
 
use crate::runtime2::port::ChannelId;
 
use crate::runtime::port::ChannelId;
 

	
 
use super::{ScheduledConnector, RuntimeInner, ConnectorId, ConnectorKey};
 
use super::port::{Port, PortState, PortIdLocal};
 
use super::native::Connector;
 
use super::branch::{BranchId};
 
use super::connector::{ConnectorPDL, ConnectorScheduling};
 
use super::inbox::{
 
    Message, DataMessage,
 
    ControlMessage, ControlContent,
 
    SyncControlMessage, SyncControlContent,
 
};
 

	
 
// Because it contains pointers we're going to do a copy by value on this one
 
#[derive(Clone, Copy)]
 
pub(crate) struct SchedulerCtx<'a> {
 
    pub(crate) runtime: &'a RuntimeInner
 
}
 

	
 
pub(crate) struct Scheduler {
 
    runtime: Arc<RuntimeInner>,
 
    scheduler_id: u32,
 
}
 

	
 
impl Scheduler {
 
    pub fn new(runtime: Arc<RuntimeInner>, scheduler_id: u32) -> Self {
 
        return Self{ runtime, scheduler_id };
 
    }
 

	
 
    pub fn run(&mut self) {
 
        // Setup global storage and workspaces that are reused for every
 
        // connector that we run
 
        'thread_loop: loop {
 
            // Retrieve a unit of work
 
            self.debug("Waiting for work");
 
            let connector_key = self.runtime.wait_for_work();
 
            if connector_key.is_none() {
 
                // We should exit
 
                self.debug(" ... No more work, quitting");
 
                break 'thread_loop;
 
            }
 

	
 
            // We have something to do
 
            let connector_key = connector_key.unwrap();
 
            let connector_id = connector_key.downcast();
 
            self.debug_conn(connector_id, &format!(" ... Got work, running {}", connector_key.index));
 

	
 
            let scheduled = self.runtime.get_component_private(&connector_key);
 

	
 
            // Keep running until we should no longer immediately schedule the
 
            // connector.
 
            let mut cur_schedule = ConnectorScheduling::Immediate;
 
            while let ConnectorScheduling::Immediate = cur_schedule {
 
                self.handle_inbox_messages(scheduled);
 

	
 
                // Run the main behaviour of the connector, depending on its
 
                // current state.
 
                if scheduled.shutting_down {
 
                    // Nothing to do. But we're stil waiting for all our pending
 
                    // control messages to be answered.
 
                    self.debug_conn(connector_id, &format!("Shutting down, {} Acks remaining", scheduled.router.num_pending_acks()));
 
                    if scheduled.router.num_pending_acks() == 0 {
 
                        // We're actually done, we can safely destroy the
 
                        // currently running connector
 
                        self.runtime.initiate_component_destruction(connector_key);
 
                        continue 'thread_loop;
 
                    } else {
 
                        cur_schedule = ConnectorScheduling::NotNow;
 
                    }
 
                } else {
 
                    self.debug_conn(connector_id, "Running ...");
 
                    let scheduler_ctx = SchedulerCtx{ runtime: &*self.runtime };
 
                    let new_schedule = scheduled.connector.run(scheduler_ctx, &mut scheduled.ctx);
 
                    self.debug_conn(connector_id, &format!("Finished running (new scheduling is {:?})", new_schedule));
 

	
 
                    // Handle all of the output from the current run: messages to
 
                    // send and connectors to instantiate.
 
                    self.handle_changes_in_context(scheduled);
 

	
 
                    cur_schedule = new_schedule;
 
                }
 
            }
 

	
 
            // If here then the connector does not require immediate execution.
 
            // So enqueue it if requested, and otherwise put it in a sleeping
 
            // state.
 
            match cur_schedule {
 
                ConnectorScheduling::Immediate => unreachable!(),
 
                ConnectorScheduling::Later => {
 
                    // Simply queue it again later
 
                    self.runtime.push_work(connector_key);
 
                },
 
                ConnectorScheduling::NotNow => {
 
                    // Need to sleep, note that we are the only ones which are
 
                    // allows to set the sleeping state to `true`, and since
 
                    // we're running it must currently be `false`.
 
                    self.try_go_to_sleep(connector_key, scheduled);
 
                },
 
                ConnectorScheduling::Exit => {
 
                    // Prepare for exit. Set the shutdown flag and broadcast
 
                    // messages to notify peers of closing channels
 
                    scheduled.shutting_down = true;
 
                    for port in &scheduled.ctx.ports {
 
                        if port.state != PortState::Closed {
 
                            let message = scheduled.router.prepare_closing_channel(
 
                                port.self_id, port.peer_id,
 
                                connector_id
 
                            );
 
                            self.debug_conn(connector_id, &format!("Sending message to {:?} [ exit ] \n --- {:?}", port.peer_connector, message));
 
                            self.runtime.send_message_assumed_alive(port.peer_connector, Message::Control(message));
 
                        }
 
                    }
 

	
 
                    // Any messages still in the public inbox should be handled
 
                    scheduled.ctx.inbox.clear_read_messages();
 
                    while let Some(ticket) = scheduled.ctx.get_next_message_ticket_even_if_not_in_sync() {
 
                        let message = scheduled.ctx.take_message_using_ticket(ticket);
 
                        self.handle_message_while_shutting_down(message, scheduled);
 
                    }
 

	
 
                    if scheduled.router.num_pending_acks() == 0 {
 
                        // All ports (if any) already closed
 
                        self.runtime.initiate_component_destruction(connector_key);
 
                        continue 'thread_loop;
 
                    }
 

	
 
                    self.try_go_to_sleep(connector_key, scheduled);
 
                },
 
            }
 
        }
 
    }
 

	
 
    /// Receiving messages from the public inbox and handling them or storing
 
    /// them in the component's private inbox
 
    fn handle_inbox_messages(&mut self, scheduled: &mut ScheduledConnector) {
 
        let connector_id = scheduled.ctx.id;
 

	
 
        while let Some(message) = scheduled.public.inbox.take_message() {
 
            // Check if the message has to be rerouted because we have moved the
 
            // target port to another component.
 
            self.debug_conn(connector_id, &format!("Handling message\n --- {:#?}", message));
 
            if let Some(target_port) = message.target_port() {
 
                if let Some(other_component_id) = scheduled.router.should_reroute(target_port) {
 
                    self.debug_conn(connector_id, " ... Rerouting the message");
 

	
 
                    // We insert directly into the private inbox. Since we have
 
                    // a reroute entry the component can not yet be running.
 
                    if let Message::Control(_) = &message {
 
                        self.runtime.send_message_assumed_alive(other_component_id, message);
 
                    } else {
 
                        let key = unsafe { ConnectorKey::from_id(other_component_id) };
 
                        let component = self.runtime.get_component_private(&key);
 
                        component.ctx.inbox.insert_new(message);
 
                    }
 

	
 
                    continue;
 
                }
 

	
 
                match scheduled.ctx.get_port_by_id(target_port) {
 
                    Some(port_info) => {
 
                        if port_info.state == PortState::Closed {
 
                            // We're no longer supposed to receive messages
 
                            // (rerouted message arrived much later!)
 
                            continue
 
                        }
 
                    },
 
                    None => {
 
                        // Apparently we no longer have a handle to the port
 
                        continue;
 
                    }
 
                }
 
            }
 

	
 
            // If here, then we should handle the message
 
            self.debug_conn(connector_id, " ... Handling the message");
 
            if let Message::Control(message) = &message {
 
                match message.content {
 
                    ControlContent::PortPeerChanged(port_id, new_target_connector_id) => {
 
                        // Need to change port target
 
                        let port = scheduled.ctx.get_port_mut_by_id(port_id).unwrap();
 
                        port.peer_connector = new_target_connector_id;
 

	
 
                        // Note: for simplicity we program the scheduler to always finish
 
                        // running a connector with an empty outbox. If this ever changes
 
                        // then accepting the "port peer changed" message implies we need
 
                        // to change the recipient of the message in the outbox.
 
                        debug_assert!(scheduled.ctx.outbox.is_empty());
 

	
 
                        // And respond with an Ack
 
                        let ack_message = Message::Control(ControlMessage {
 
                            id: message.id,
 
                            sending_component_id: connector_id,
 
                            content: ControlContent::Ack,
 
                        });
 
                        self.debug_conn(connector_id, &format!("Sending message to {:?} [pp ack]\n --- {:?}", message.sending_component_id, ack_message));
 
                        self.runtime.send_message_assumed_alive(message.sending_component_id, ack_message);
 
                    },
 
                    ControlContent::CloseChannel(port_id) => {
 
                        // Mark the port as being closed
 
                        let port = scheduled.ctx.get_port_mut_by_id(port_id).unwrap();
 
                        port.state = PortState::Closed;
 

	
 
                        // Send an Ack
 
                        let ack_message = Message::Control(ControlMessage {
 
                            id: message.id,
 
                            sending_component_id: connector_id,
 
                            content: ControlContent::Ack,
 
                        });
 
                        self.debug_conn(connector_id, &format!("Sending message to {:?} [cc ack] \n --- {:?}", message.sending_component_id, ack_message));
 
                        self.runtime.send_message_assumed_alive(message.sending_component_id, ack_message);
 
                    },
 
                    ControlContent::Ack => {
 
                        if let Some(component_key) = scheduled.router.handle_ack(message.id) {
 
                            self.runtime.push_work(component_key);
 
                        };
 
                    },
 
                    ControlContent::Ping => {},
 
                }
 
            } else {
 
                // Not a control message
 
                if scheduled.shutting_down {
 
                    // Since we're shutting down, we just want to respond with a
 
                    // message saying the message did not arrive.
 
                    debug_assert!(scheduled.ctx.inbox.get_next_message_ticket().is_none()); // public inbox should be completely cleared
 
                    self.handle_message_while_shutting_down(message, scheduled);
 
                } else {
 
                    scheduled.ctx.inbox.insert_new(message);
 
                }
 
            }
 
        }
 
    }
 

	
 
    fn handle_message_while_shutting_down(&mut self, message: Message, scheduled: &mut ScheduledConnector) {
 
        let target_port_and_round_number = match message {
 
            Message::Data(msg) => Some((msg.data_header.target_port, msg.sync_header.sync_round)),
 
            Message::SyncComp(_) => None,
 
            Message::SyncPort(msg) => Some((msg.target_port, msg.sync_header.sync_round)),
 
            Message::SyncControl(_) => None,
 
            Message::Control(_) => None,
 
        };
 

	
 
        if let Some((target_port, sync_round)) = target_port_and_round_number {
 
            // This message is aimed at a port, but we're shutting down, so
 
            // notify the peer that its was not received properly.
 
            // (also: since we're shutting down, we're not in sync mode and
 
            // the context contains the definitive set of owned ports)
 
            let port = scheduled.ctx.get_port_by_id(target_port).unwrap();
 
            if port.state == PortState::Open {
 
                let message = SyncControlMessage {
 
                    in_response_to_sync_round: sync_round,
 
                    target_component_id: port.peer_connector,
 
                    content: SyncControlContent::ChannelIsClosed(port.peer_id),
 
                };
 
                self.debug_conn(scheduled.ctx.id, &format!("Sending message to {:?} [shutdown]\n --- {:?}", port.peer_connector, message));
 
                self.runtime.send_message_assumed_alive(port.peer_connector, Message::SyncControl(message));
 
            }
 
        }
 
    }
 

	
 
    /// Handles changes to the context that were made by the component. This is
 
    /// the way (due to Rust's borrowing rules) that we bubble up changes in the
 
    /// component's state that the scheduler needs to know about (e.g. a message
 
    /// that the component wants to send, a port that has been added).
 
    fn handle_changes_in_context(&mut self, scheduled: &mut ScheduledConnector) {
 
        let connector_id = scheduled.ctx.id;
 

	
 
        // Handling any messages that were sent
 
        while let Some(message) = scheduled.ctx.outbox.pop_front() {
 
            let (target_component_id, over_port) = match &message {
 
                Message::Data(content) => {
 
                    // Data messages are always sent to a particular port, and
 
                    // may end up being rerouted.
 
                    let port_desc = scheduled.ctx.get_port_by_id(content.data_header.sending_port).unwrap();
 
                    debug_assert_eq!(port_desc.peer_id, content.data_header.target_port);
 
                    debug_assert_eq!(port_desc.state, PortState::Open); // checked when adding to context
 

	
 
                    (port_desc.peer_connector, true)
 
                },
 
                Message::SyncComp(content) => {
 
                    // Sync messages are always sent to a particular component,
 
                    // the sender must make sure it actually wants to send to
 
                    // the specified component (and is not using an inconsistent
 
                    // component ID associated with a port).
 
                    (content.target_component_id, false)
 
                },
 
                Message::SyncPort(content) => {
 
                    let port_desc = scheduled.ctx.get_port_by_id(content.source_port).unwrap();
 
                    debug_assert_eq!(port_desc.peer_id, content.target_port);
 
                    debug_assert_eq!(port_desc.state, PortState::Open); // checked when adding to context
 

	
 
                    (port_desc.peer_connector, true)
 
                },
 
                Message::SyncControl(_) => unreachable!("component sending 'SyncControl' messages directly"),
 
                Message::Control(_) => unreachable!("component sending 'Control' messages directly"),
 
            };
 

	
 
            self.debug_conn(connector_id, &format!("Sending message to {:?} [outbox, over port: {}] \n --- {:#?}", target_component_id, over_port, message));
 
            if over_port {
 
                self.runtime.send_message_assumed_alive(target_component_id, message);
 
            } else {
 
                self.runtime.send_message_maybe_destroyed(target_component_id, message);
 
            }
 
        }
 

	
 
        while let Some(state_change) = scheduled.ctx.state_changes.pop_front() {
 
            match state_change {
 
                ComponentStateChange::CreatedComponent(component, initial_ports) => {
 
                    // Creating a new component. Need to relinquish control of
 
                    // the ports.
 
                    let new_component_key = self.runtime.create_pdl_component(component, false);
 
                    let new_connector = self.runtime.get_component_private(&new_component_key);
 

	
 
                    // First pass: transfer ports and the associated messages,
 
                    // also count the number of ports that have peers
 
                    let mut num_peers = 0;
 
                    for port_id in initial_ports {
 
                        // Transfer messages associated with the transferred port
 
                        scheduled.ctx.inbox.transfer_messages_for_port(port_id, &mut new_connector.ctx.inbox);
 

	
 
                        // Transfer the port itself
 
                        let port_index = scheduled.ctx.ports.iter()
 
                            .position(|v| v.self_id == port_id)
 
                            .unwrap();
 
                        let port = scheduled.ctx.ports.remove(port_index);
 
                        new_connector.ctx.ports.push(port.clone());
 

	
 
                        if port.state == PortState::Open {
 
                            num_peers += 1;
 
                        }
 
                    }
 

	
 
                    if num_peers == 0 {
 
                        // No peers to notify, so just schedule the component
 
                        self.runtime.push_work(new_component_key);
 
                    } else {
 
                        // Some peers to notify
 
                        let new_component_id = new_component_key.downcast();
 
                        let control_id = scheduled.router.prepare_new_component(new_component_key);
 
                        for port in new_connector.ctx.ports.iter() {
 
                            if port.state == PortState::Closed {
 
                                continue;
 
                            }
 

	
 
                            let control_message = scheduled.router.prepare_changed_port_peer(
 
                                control_id, scheduled.ctx.id,
 
                                port.peer_connector, port.peer_id,
 
                                new_component_id, port.self_id
 
                            );
 
                            self.debug_conn(connector_id, &format!("Sending message to {:?} [newcom]\n --- {:#?}", port.peer_connector, control_message));
 
                            self.runtime.send_message_assumed_alive(port.peer_connector, Message::Control(control_message));
 
                        }
 
                    }
 
                },
 
                ComponentStateChange::CreatedPort(port) => {
 
                    scheduled.ctx.ports.push(port);
 
                },
 
                ComponentStateChange::ChangedPort(port_change) => {
 
                    if port_change.is_acquired {
 
                        scheduled.ctx.ports.push(port_change.port);
 
                    } else {
 
                        let index = scheduled.ctx.ports
 
                            .iter()
 
                            .position(|v| v.self_id == port_change.port.self_id)
 
                            .unwrap();
 
                        scheduled.ctx.ports.remove(index);
 
                    }
 
                }
 
            }
 
        }
 

	
 
        // Finally, check if we just entered or just left a sync region
 
        if scheduled.ctx.changed_in_sync {
 
            if scheduled.ctx.is_in_sync {
 
                // Just entered sync region
 
            } else {
 
                // Just left sync region. So prepare inbox for the next sync
 
                // round
 
                scheduled.ctx.inbox.clear_read_messages();
 
            }
 

	
 
            scheduled.ctx.changed_in_sync = false; // reset flag
 
        }
 
    }
 

	
 
    fn try_go_to_sleep(&self, connector_key: ConnectorKey, connector: &mut ScheduledConnector) {
 
        debug_assert_eq!(connector_key.index, connector.ctx.id.index);
 
        debug_assert_eq!(connector.public.sleeping.load(Ordering::Acquire), false);
 

	
 
        // This is the running connector, and only the running connector may
 
        // decide it wants to sleep again.
 
        connector.public.sleeping.store(true, Ordering::Release);
 

	
 
        // But due to reordering we might have received messages from peers who
 
        // did not consider us sleeping. If so, then we wake ourselves again.
 
        if !connector.public.inbox.is_empty() {
 
            // Try to wake ourselves up (needed because someone might be trying
 
            // the exact same atomic compare-and-swap at this point in time)
 
            let should_wake_up_again = connector.public.sleeping
 
                .compare_exchange(true, false, Ordering::SeqCst, Ordering::Acquire)
 
                .is_ok();
 

	
 
            if should_wake_up_again {
 
                self.runtime.push_work(connector_key)
 
            }
 
        }
 
    }
 

	
 
    fn debug(&self, message: &str) {
 
        println!("DEBUG [thrd:{:02} conn:  ]: {}", self.scheduler_id, message);
 
    }
 

	
 
    fn debug_conn(&self, conn: ConnectorId, message: &str) {
 
        println!("DEBUG [thrd:{:02} conn:{:02}]: {}", self.scheduler_id, conn.index, message);
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// ComponentCtx
 
// -----------------------------------------------------------------------------
 

	
 
enum ComponentStateChange {
 
    CreatedComponent(ConnectorPDL, Vec<PortIdLocal>),
 
    CreatedPort(Port),
 
    ChangedPort(ComponentPortChange),
 
}
 

	
 
#[derive(Clone)]
 
pub(crate) struct ComponentPortChange {
 
    pub is_acquired: bool, // otherwise: released
 
    pub port: Port,
 
}
 

	
 
/// The component context (better name may be invented). This was created
 
/// because part of the component's state is managed by the scheduler, and part
 
/// of it by the component itself. When the component starts a sync block or
 
/// exits a sync block the partially managed state by both component and
 
/// scheduler need to be exchanged.
 
pub(crate) struct ComponentCtx {
 
    // Mostly managed by the scheduler
 
    pub(crate) id: ConnectorId,
 
    ports: Vec<Port>,
 
    inbox: Inbox,
 
    // Submitted by the component
 
    is_in_sync: bool,
 
    changed_in_sync: bool,
 
    outbox: VecDeque<Message>,
 
    state_changes: VecDeque<ComponentStateChange>,
 

	
 
    // Workspaces that may be used by components to (generally) prevent
 
    // allocations. Be a good scout and leave it empty after you've used it.
 
    // TODO: Move to scheduler ctx, this is the wrong place
 
    pub workspace_ports: Vec<PortIdLocal>,
 
    pub workspace_branches: Vec<BranchId>,
 
}
 

	
 
impl ComponentCtx {
 
    pub(crate) fn new_empty() -> Self {
 
        return Self{
 
            id: ConnectorId::new_invalid(),
 
            ports: Vec::new(),
 
            inbox: Inbox::new(),
 
            is_in_sync: false,
 
            changed_in_sync: false,
 
            outbox: VecDeque::new(),
 
            state_changes: VecDeque::new(),
 
            workspace_ports: Vec::new(),
 
            workspace_branches: Vec::new(),
 
        };
 
    }
 

	
 
    /// Notify the runtime that the component has created a new component. May
 
    /// only be called outside of a sync block.
 
    pub(crate) fn push_component(&mut self, component: ConnectorPDL, initial_ports: Vec<PortIdLocal>) {
 
        debug_assert!(!self.is_in_sync);
 
        self.state_changes.push_back(ComponentStateChange::CreatedComponent(component, initial_ports));
 
    }
 

	
 
    /// Notify the runtime that the component has created a new port. May only
 
    /// be called outside of a sync block (for ports received during a sync
 
    /// block, pass them when calling `notify_sync_end`).
 
    pub(crate) fn push_port(&mut self, port: Port) {
 
        debug_assert!(!self.is_in_sync);
 
        self.state_changes.push_back(ComponentStateChange::CreatedPort(port))
 
    }
 

	
 
    /// Notify the runtime of an error. Note that this will not perform any
 
    /// special action beyond printing the error. The component is responsible
 
    /// for waiting until it is appropriate to shut down (i.e. being outside
 
    /// of a sync region) and returning the `Exit` scheduling code.
 
    pub(crate) fn push_error(&mut self, error: EvalError) {
 
        println!("ERROR: Component ({}) encountered a critical error:\n{}", self.id.index, error);
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn get_ports(&self) -> &[Port] {
 
        return self.ports.as_slice();
 
    }
 

	
 
    pub(crate) fn get_port_by_id(&self, id: PortIdLocal) -> Option<&Port> {
 
        return self.ports.iter().find(|v| v.self_id == id);
 
    }
 

	
 
    pub(crate) fn get_port_by_channel_id(&self, id: ChannelId) -> Option<&Port> {
 
        return self.ports.iter().find(|v| v.channel_id == id);
 
    }
 

	
 
    fn get_port_mut_by_id(&mut self, id: PortIdLocal) -> Option<&mut Port> {
 
        return self.ports.iter_mut().find(|v| v.self_id == id);
 
    }
 

	
 
    /// Notify that component will enter a sync block. Note that after calling
 
    /// this function you must allow the scheduler to pick up the changes in the
 
    /// context by exiting your code-executing loop, and to continue executing
 
    /// code the next time the scheduler picks up the component.
 
    pub(crate) fn notify_sync_start(&mut self) {
 
        debug_assert!(!self.is_in_sync);
 

	
 
        self.is_in_sync = true;
 
        self.changed_in_sync = true;
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn is_in_sync(&self) -> bool {
 
        return self.is_in_sync;
 
    }
 

	
 
    /// Submit a message for the scheduler to send to the appropriate receiver.
 
    /// May only be called inside of a sync block.
 
    pub(crate) fn submit_message(&mut self, contents: Message) -> Result<(), ()> {
 
        debug_assert!(self.is_in_sync);
 
        if let Some(port_id) = contents.source_port() {
 
            let port_info = self.get_port_by_id(port_id);
 
            let is_valid = match port_info {
 
                Some(port_info) => {
 
                    port_info.state == PortState::Open
 
                },
 
                None => false,
 
            };
 
            if !is_valid {
 
                // We don't own the port
 
                println!(" ****** DEBUG ****** : Sending through closed port!!! {}", port_id.index);
 
                return Err(());
 
            }
 
        }
 

	
 
        self.outbox.push_back(contents);
 
        return Ok(());
 
    }
 

	
 
    /// Notify that component just finished a sync block. Like
 
    /// `notify_sync_start`: drop out of the `Component::Run` function.
 
    pub(crate) fn notify_sync_end(&mut self, changed_ports: &[ComponentPortChange]) {
 
        debug_assert!(self.is_in_sync);
 

	
 
        self.is_in_sync = false;
 
        self.changed_in_sync = true;
 

	
 
        self.state_changes.reserve(changed_ports.len());
 
        for changed_port in changed_ports {
 
            self.state_changes.push_back(ComponentStateChange::ChangedPort(changed_port.clone()));
 
        }
 
    }
 

	
 
    /// Retrieves messages matching a particular port and branch id. But only
 
    /// those messages that have been previously received with
 
    /// `read_next_message`.
 
    pub(crate) fn get_read_data_messages(&self, match_port_id: PortIdLocal) -> MessagesIter {
 
        return self.inbox.get_read_data_messages(match_port_id);
 
    }
 

	
 
    pub(crate) fn get_next_message_ticket(&mut self) -> Option<MessageTicket> {
 
        if !self.is_in_sync { return None; }
 
        return self.inbox.get_next_message_ticket();
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn get_next_message_ticket_even_if_not_in_sync(&mut self) -> Option<MessageTicket> {
 
        return self.inbox.get_next_message_ticket();
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn read_message_using_ticket(&self, ticket: MessageTicket) -> &Message {
 
        return self.inbox.read_message_using_ticket(ticket);
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn take_message_using_ticket(&mut self, ticket: MessageTicket) -> Message {
 
        return self.inbox.take_message_using_ticket(ticket)
 
    }
 

	
 
    /// Puts back a message back into the inbox. The reason being that the
 
    /// message is actually part of the next sync round. This will
 
    pub(crate) fn put_back_message(&mut self, message: Message) {
 
        self.inbox.put_back_message(message);
 
    }
 
}
 

	
 
pub(crate) struct MessagesIter<'a> {
 
    messages: &'a [Message],
 
    next_index: usize,
 
    max_index: usize,
 
    match_port_id: PortIdLocal,
 
}
 

	
 
impl<'a> Iterator for MessagesIter<'a> {
 
    type Item = &'a DataMessage;
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        // Loop until match is found or at end of messages
 
        while self.next_index < self.max_index {
 
            let message = &self.messages[self.next_index];
 
            if let Message::Data(message) = &message {
 
                if message.data_header.target_port == self.match_port_id {
 
                    // Found a match
 
                    self.next_index += 1;
 
                    return Some(message);
 
                }
 
            } else {
 
                // Unreachable because:
 
                //  1. We only iterate over messages that were previously retrieved by `read_next_message`.
 
                //  2. Inbox does not contain control/ping messages.
 
                //  3. If `read_next_message` encounters anything else than a data message, it is removed from the inbox.
 
                unreachable!();
 
            }
 

	
 
            self.next_index += 1;
 
        }
 

	
 
        // No more messages
 
        return None;
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Private Inbox
 
// -----------------------------------------------------------------------------
 

	
 
/// A structure that contains inbox messages. Some messages are left inside and
 
/// continuously re-read. Others are taken out, but may potentially be put back
 
/// for later reading. Later reading in this case implies that they are put back
 
/// for reading in the next sync round.
 
/// TODO: Again, lazy concurrency, see git history for other implementation
 
struct Inbox {
 
    messages: Vec<Message>,
 
    delayed: Vec<Message>,
 
    next_read_idx: u32,
 
    generation: u32,
 
}
 

	
 
#[derive(Clone, Copy)]
 
pub(crate) struct MessageTicket {
 
    index: u32,
 
    generation: u32,
 
}
 

	
 
impl Inbox {
 
    fn new() -> Self {
 
        return Inbox {
 
            messages: Vec::new(),
 
            delayed: Vec::new(),
 
            next_read_idx: 0,
 
            generation: 0,
 
        }
 
    }
 

	
 
    fn insert_new(&mut self, message: Message) {
 
        assert!(self.messages.len() < u32::MAX as usize); // TODO: @Size
 
        self.messages.push(message);
 
    }
 

	
 
    fn get_next_message_ticket(&mut self) -> Option<MessageTicket> {
 
        if self.next_read_idx as usize >= self.messages.len() { return None };
 
        let idx = self.next_read_idx;
 
        self.generation += 1;
 
        self.next_read_idx += 1;
 
        return Some(MessageTicket{ index: idx, generation: self.generation });
 
    }
 

	
 
    fn read_message_using_ticket(&self, ticket: MessageTicket) -> &Message {
 
        debug_assert_eq!(self.generation, ticket.generation);
 
        return &self.messages[ticket.index as usize];
 
    }
 

	
 
    fn take_message_using_ticket(&mut self, ticket: MessageTicket) -> Message {
 
        debug_assert_eq!(self.generation, ticket.generation);
 
        debug_assert!(ticket.index < self.next_read_idx);
 
        self.next_read_idx -= 1;
 
        return self.messages.remove(ticket.index as usize);
 
    }
 

	
 
    fn put_back_message(&mut self, message: Message) {
 
        // We have space in front of the array because we've taken out a message
 
        // before.
 
        self.delayed.push(message);
 
    }
 

	
 
    fn get_read_data_messages(&self, match_port_id: PortIdLocal) -> MessagesIter {
 
        return MessagesIter{
 
            messages: self.messages.as_slice(),
 
            next_index: 0,
 
            max_index: self.next_read_idx as usize,
 
            match_port_id
 
        };
 
    }
 

	
 
    fn clear_read_messages(&mut self) {
 
        self.messages.drain(0..self.next_read_idx as usize);
 
        for (idx, v) in self.delayed.drain(..).enumerate() {
 
            self.messages.insert(idx, v);
 
        }
 
        self.next_read_idx = 0;
 
    }
 

	
 
    fn transfer_messages_for_port(&mut self, port: PortIdLocal, new_inbox: &mut Inbox) {
 
        debug_assert!(self.delayed.is_empty());
 
        let mut idx = 0;
 
        while idx < self.messages.len() {
 
            let msg = &self.messages[idx];
 
            if let Some(target) = msg.target_port() {
 
                if target == port {
 
                    new_inbox.messages.push(self.messages.remove(idx));
 
                    continue;
 
                }
 
            }
 

	
 
            idx += 1;
 
        }
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Control messages
 
// -----------------------------------------------------------------------------
 

	
 
struct ControlEntry {
 
    id: u32,
 
    variant: ControlVariant,
 
}
 

	
 
enum ControlVariant {
 
    NewComponent(ControlNewComponent),
 
    ChangedPort(ControlChangedPort),
 
    ClosedChannel(ControlClosedChannel),
 
}
 

	
 
impl ControlVariant {
 
    fn as_new_component_mut(&mut self) -> &mut ControlNewComponent {
 
        match self {
 
            ControlVariant::NewComponent(v) => v,
 
            _ => unreachable!(),
 
        }
 
    }
 
}
 

	
 
/// Entry for a new component waiting for execution after all of its peers have
 
/// confirmed the `ControlChangedPort` messages.
 
struct ControlNewComponent {
 
    num_acks_pending: u32,          // if it hits 0, we schedule the component
 
    component_key: ConnectorKey,    // this is the component we schedule
 
}
 

	
 
struct ControlChangedPort {
 
    reroute_if_sent_to_this_port: PortIdLocal, // if sent to this port, then reroute
 
    source_connector: ConnectorId,             // connector we expect messages from
 
    target_connector: ConnectorId,             // connector we need to reroute to
 
    new_component_entry_id: u32,               // if Ack'd, we reduce the counter on this `ControlNewComponent` entry
 
}
 

	
 
struct ControlClosedChannel {
 
    source_port: PortIdLocal,
 
    target_port: PortIdLocal,
 
}
 

	
 
pub(crate) struct ControlMessageHandler {
 
    id_counter: u32,
 
    active: Vec<ControlEntry>,
 
}
 

	
 
impl ControlMessageHandler {
 
    pub fn new() -> Self {
 
        ControlMessageHandler {
 
            id_counter: 0,
 
            active: Vec::new(),
 
        }
 
    }
 

	
 
    /// Prepares a message indicating that a channel has closed, we keep a local
 
    /// entry to match against the (hopefully) returned `Ack` message.
 
    pub fn prepare_closing_channel(
 
        &mut self, self_port_id: PortIdLocal, peer_port_id: PortIdLocal,
 
        self_connector_id: ConnectorId
 
    ) -> ControlMessage {
 
        let id = self.take_id();
 

	
 
        self.active.push(ControlEntry{
 
            id,
 
            variant: ControlVariant::ClosedChannel(ControlClosedChannel{
 
                source_port: self_port_id,
 
                target_port: peer_port_id,
 
            }),
 
        });
 

	
 
        return ControlMessage {
 
            id,
 
            sending_component_id: self_connector_id,
 
            content: ControlContent::CloseChannel(peer_port_id),
 
        };
 
    }
 

	
 
    /// Prepares a control entry for a new component. This returns the id of
 
    /// the entry for calls to `prepare_changed_port_peer`. Don't call this
 
    /// function if the component has no peers that need to be messaged.
 
    pub fn prepare_new_component(&mut self, component_key: ConnectorKey) -> u32 {
 
        let id = self.take_id();
 
        self.active.push(ControlEntry{
 
            id,
 
            variant: ControlVariant::NewComponent(ControlNewComponent{
 
                num_acks_pending: 0,
 
                component_key,
 
            }),
 
        });
 

	
 
        return id;
 
    }
 

	
 
    pub fn prepare_changed_port_peer(
 
        &mut self, new_component_entry_id: u32, creating_component_id: ConnectorId,
 
        changed_component_id: ConnectorId, changed_port_id: PortIdLocal,
 
        new_target_component_id: ConnectorId, new_target_port_id: PortIdLocal
 
    ) -> ControlMessage {
 
        // Add the peer-changed entry
 
        let change_port_entry_id = self.take_id();
 
        self.active.push(ControlEntry{
 
            id: change_port_entry_id,
 
            variant: ControlVariant::ChangedPort(ControlChangedPort{
 
                reroute_if_sent_to_this_port: new_target_port_id,
 
                source_connector: changed_component_id,
 
                target_connector: new_target_component_id,
 
                new_component_entry_id,
 
            })
 
        });
 

	
 
        // Increment counter on "new component" entry
 
        let position = self.position(new_component_entry_id).unwrap();
 
        let new_component_entry = &mut self.active[position];
 
        let new_component_entry = new_component_entry.variant.as_new_component_mut();
 
        new_component_entry.num_acks_pending += 1;
 

	
 
        return ControlMessage{
 
            id: change_port_entry_id,
 
            sending_component_id: creating_component_id,
 
            content: ControlContent::PortPeerChanged(changed_port_id, new_target_component_id),
 
        };
 
    }
 

	
 
    /// Returns true if the supplied message should be rerouted. If so then this
 
    /// function returns the connector that should retrieve this message.
 
    pub fn should_reroute(&self, target_port: PortIdLocal) -> Option<ConnectorId> {
 
        for entry in &self.active {
 
            if let ControlVariant::ChangedPort(entry) = &entry.variant {
 
                if entry.reroute_if_sent_to_this_port == target_port {
 
                    // Need to reroute this message
 
                    return Some(entry.target_connector);
 
                }
 
            }
 
        }
 

	
 
        return None;
 
    }
 

	
 
    /// Handles an Ack as an answer to a previously sent control message.
 
    /// Handling an Ack might spawn a new message that needs to be sent.
 
    pub fn handle_ack(&mut self, id: u32) -> Option<ConnectorKey> {
 
        let index = self.position(id);
 

	
 
        match index {
 
            Some(index) => {
 
                // Remove the entry. If `ChangedPort`, then retrieve associated
 
                // `NewComponent`. Otherwise: early exits
 
                let removed_entry = self.active.remove(index);
 
                let new_component_idx = match removed_entry.variant {
 
                    ControlVariant::ChangedPort(message) => {
 
                        self.position(message.new_component_entry_id).unwrap()
 
                    },
 
                    _ => return None,
 
                };
 

	
 
                // Decrement counter, if 0, then schedule component
 
                let new_component_entry = self.active[new_component_idx].variant.as_new_component_mut();
 
                new_component_entry.num_acks_pending -= 1;
 
                if new_component_entry.num_acks_pending != 0 {
 
                    return None;
 
                }
 

	
 
                // Return component key for scheduling
 
                let new_component_entry = self.active.remove(new_component_idx);
 
                let new_component_entry = match new_component_entry.variant {
 
                    ControlVariant::NewComponent(entry) => entry,
 
                    _ => unreachable!(),
 
                };
 

	
 
                return Some(new_component_entry.component_key);
 
            },
 
            None => {
 
                todo!("handling of nefarious ACKs");
 
                return None;
 
            },
 
        }
 
    }
 

	
 
    /// Retrieves the number of responses we still expect to receive from our
 
    /// peers
 
    #[inline]
 
    pub fn num_pending_acks(&self) -> usize {
 
        return self.active.len();
 
    }
 

	
 
    fn take_id(&mut self) -> u32 {
 
        let generated_id = self.id_counter;
 
        let (new_id, _) = self.id_counter.overflowing_add(1);
 
        self.id_counter = new_id;
 

	
 
        return generated_id;
 
    }
 

	
 
    #[inline]
 
    fn position(&self, id: u32) -> Option<usize> {
 
        return self.active.iter().position(|v| v.id == id);
 
    }
 
}
 
\ No newline at end of file
src/runtime/tests/api_component.rs
Show inline comments
 
file renamed from src/runtime2/tests/api_component.rs to src/runtime/tests/api_component.rs
src/runtime/tests/data_transmission.rs
Show inline comments
 
file renamed from src/runtime2/tests/data_transmission.rs to src/runtime/tests/data_transmission.rs
src/runtime/tests/mod.rs
Show inline comments
 
file renamed from src/runtime2/tests/mod.rs to src/runtime/tests/mod.rs
 
mod network_shapes;
 
mod api_component;
 
mod speculation;
 
mod data_transmission;
 
mod sync_failure;
 

	
 
use super::*;
 
use crate::{PortId, ProtocolDescription};
 
use crate::common::Id;
 
use crate::protocol::eval::*;
 
use crate::runtime2::native::{ApplicationSyncAction};
 
use crate::runtime::native::{ApplicationSyncAction};
 

	
 
// Generic testing constants, use when appropriate to simplify stress-testing
 
// pub(crate) const NUM_THREADS: u32 =  8;     // number of threads in runtime
 
// pub(crate) const NUM_INSTANCES: u32 = 750;  // number of test instances constructed
 
// pub(crate) const NUM_LOOPS: u32 = 10;       // number of loops within a single test (not used by all tests)
 

	
 
pub(crate) const NUM_THREADS: u32 = 6;
 
pub(crate) const NUM_INSTANCES: u32 = 2;
 
pub(crate) const NUM_LOOPS: u32 = 1;
 

	
 

	
 
fn create_runtime(pdl: &str) -> Runtime {
 
    let protocol = ProtocolDescription::parse(pdl.as_bytes()).expect("parse pdl");
 
    let runtime = Runtime::new(NUM_THREADS, protocol);
 

	
 
    return runtime;
 
}
 

	
 
fn run_test_in_runtime<F: Fn(&mut ApplicationInterface)>(pdl: &str, constructor: F) {
 
    let protocol = ProtocolDescription::parse(pdl.as_bytes())
 
        .expect("parse PDL");
 
    let runtime = Runtime::new(NUM_THREADS, protocol);
 

	
 
    let mut api = runtime.create_interface();
 
    for _ in 0..NUM_INSTANCES {
 
        constructor(&mut api);
 
    }
 
}
 

	
 
pub(crate) struct TestTimer {
 
    name: &'static str,
 
    started: std::time::Instant
 
}
 

	
 
impl TestTimer {
 
    pub(crate) fn new(name: &'static str) -> Self {
 
        Self{ name, started: std::time::Instant::now() }
 
    }
 
}
 

	
 
impl Drop for TestTimer {
 
    fn drop(&mut self) {
 
        let delta = std::time::Instant::now() - self.started;
 
        let nanos = (delta.as_secs_f64() * 1_000_000.0) as u64;
 
        let millis = nanos / 1000;
 
        let nanos = nanos % 1000;
 
        println!("[{}] Took {:>4}.{:03} ms", self.name, millis, nanos);
 
    }
 
}
src/runtime/tests/network_shapes.rs
Show inline comments
 
file renamed from src/runtime2/tests/network_shapes.rs to src/runtime/tests/network_shapes.rs
src/runtime/tests/speculation.rs
Show inline comments
 
file renamed from src/runtime2/tests/speculation.rs to src/runtime/tests/speculation.rs
src/runtime/tests/sync_failure.rs
Show inline comments
 
file renamed from src/runtime2/tests/sync_failure.rs to src/runtime/tests/sync_failure.rs
src/runtime2/mod.rs
Show inline comments
 
deleted file
src/runtime_old/communication.rs
Show inline comments
 
file renamed from src/runtime/communication.rs to src/runtime_old/communication.rs
src/runtime_old/endpoints.rs
Show inline comments
 
file renamed from src/runtime/endpoints.rs to src/runtime_old/endpoints.rs
src/runtime_old/error.rs
Show inline comments
 
file renamed from src/runtime/error.rs to src/runtime_old/error.rs
src/runtime_old/logging.rs
Show inline comments
 
file renamed from src/runtime/logging.rs to src/runtime_old/logging.rs
src/runtime_old/mod.rs
Show inline comments
 
new file 100644
 
/// cbindgen:ignore
 
mod communication;
 
/// cbindgen:ignore
 
mod endpoints;
 
pub mod error;
 
/// cbindgen:ignore
 
mod logging;
 
/// cbindgen:ignore
 
mod setup;
 

	
 
#[cfg(test)]
 
mod tests;
 

	
 
use crate::common::*;
 
use error::*;
 
use mio::net::UdpSocket;
 

	
 
/// The interface between the user's application and a communication session,
 
/// in which the application plays the part of a (native) component. This structure provides the application
 
/// with functionality available to all components: the ability to add new channels (port pairs), and to
 
/// instantiate new components whose definitions are defined in the connector's configured protocol
 
/// description. Native components have the additional ability to add `dangling' ports backed by local/remote
 
/// IP addresses, to be coupled with a counterpart once the connector's setup is completed by `connect`.
 
/// This allows sets of applications to cooperate in constructing shared sessions that span the network.
 
#[derive(Debug)]
 
pub struct Connector {
 
    unphased: ConnectorUnphased,
 
    phased: ConnectorPhased,
 
}
 

	
 
/// Characterizes a type which can write lines of logging text.
 
/// The implementations provided in the `logging` module are likely to be sufficient,
 
/// but for added flexibility, users are able to implement their own loggers for use
 
/// by connectors.
 
pub trait Logger: Debug + Send + Sync {
 
    fn line_writer(&mut self) -> Option<&mut dyn std::io::Write>;
 
}
 

	
 
/// A logger that appends the logged strings to a growing byte buffer
 
#[derive(Debug)]
 
pub struct VecLogger(ConnectorId, Vec<u8>);
 

	
 
/// A trivial logger that always returns None, such that no logging information is ever written.
 
#[derive(Debug)]
 
pub struct DummyLogger;
 

	
 
/// A logger that writes the logged lines to a given file.
 
#[derive(Debug)]
 
pub struct FileLogger(ConnectorId, std::fs::File);
 

	
 
// Interface between protocol state and the connector runtime BEFORE all components
 
// ave begun their branching speculation. See ComponentState::nonsync_run.
 
pub(crate) struct NonsyncProtoContext<'a> {
 
    ips: &'a mut IdAndPortState,
 
    logger: &'a mut dyn Logger,
 
    unrun_components: &'a mut Vec<(ComponentId, ComponentState)>, // lives for Nonsync phase
 
    proto_component_id: ComponentId,                              // KEY in id->component map
 
}
 

	
 
// Interface between protocol state and the connector runtime AFTER all components
 
// have begun their branching speculation. See ComponentState::sync_run.
 
pub(crate) struct SyncProtoContext<'a> {
 
    rctx: &'a RoundCtx,
 
    branch_inner: &'a mut ProtoComponentBranchInner, // sub-structure of component branch
 
    predicate: &'a Predicate,                        // KEY in pred->branch map
 
}
 

	
 
// The data coupled with a particular protocol component branch, but crucially omitting
 
// the `ComponentState` such that this may be passed by reference to the state with separate
 
// access control.
 
#[derive(Default, Debug, Clone)]
 
struct ProtoComponentBranchInner {
 
    did_put_or_get: HashSet<PortId>,
 
    inbox: HashMap<PortId, Payload>,
 
}
 

	
 
// A speculative variable that lives for the duration of the synchronous round.
 
// Each is assigned a value in domain `SpecVal`.
 
#[derive(
 
    Copy, Clone, Eq, PartialEq, Ord, Hash, PartialOrd, serde::Serialize, serde::Deserialize,
 
)]
 
struct SpecVar(PortId);
 

	
 
// The codomain of SpecVal. Has two associated constants for values FIRING and SILENT,
 
// but may also enumerate many more values to facilitate finer-grained nondeterministic branching.
 
#[derive(
 
    Copy, Clone, Eq, PartialEq, Ord, Hash, PartialOrd, serde::Serialize, serde::Deserialize,
 
)]
 
struct SpecVal(u16);
 

	
 
// Data associated with a successful synchronous round, retained afterwards such that the
 
// native component can freely reflect on how it went, reading the messages received at their
 
// inputs, and reflecting on which of their connector's synchronous batches succeeded.
 
#[derive(Debug)]
 
struct RoundEndedNative {
 
    batch_index: usize,
 
    gotten: HashMap<PortId, Payload>,
 
}
 

	
 
// Implementation of a set in terms of a vector (optimized for reading, not writing)
 
#[derive(Default)]
 
struct VecSet<T: std::cmp::Ord> {
 
    // invariant: ordered, deduplicated
 
    vec: Vec<T>,
 
}
 

	
 
// Allows a connector to remember how to forward payloads towards the component that
 
// owns their destination port. `LocalComponent` corresponds with messages for components
 
// managed by the connector itself (hinting for it to look it up in a local structure),
 
// whereas the other variants direct the connector to forward the messages over the network.
 
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
 
enum Route {
 
    LocalComponent,
 
    NetEndpoint { index: usize },
 
    UdpEndpoint { index: usize },
 
}
 

	
 
// The outcome of a synchronous round, representing the distributed consensus.
 
// In the success case, the attached predicate encodes a row in the session's trace table.
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
enum Decision {
 
    Failure, // some connector timed out!
 
    Success(Predicate),
 
}
 

	
 
// The type of control messages exchanged between connectors over the network
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
enum Msg {
 
    SetupMsg(SetupMsg),
 
    CommMsg(CommMsg),
 
}
 

	
 
// Control messages exchanged during the setup phase only
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
enum SetupMsg {
 
    MyPortInfo(MyPortInfo),
 
    LeaderWave { wave_leader: ConnectorId },
 
    LeaderAnnounce { tree_leader: ConnectorId },
 
    YouAreMyParent,
 
}
 

	
 
// Control message particular to the communication phase.
 
// as such, it's annotated with a round_index
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
struct CommMsg {
 
    round_index: usize,
 
    contents: CommMsgContents,
 
}
 

	
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
enum CommMsgContents {
 
    SendPayload(SendPayloadMsg),
 
    CommCtrl(CommCtrlMsg),
 
}
 

	
 
// Connector <-> connector control messages for use in the communication phase
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
enum CommCtrlMsg {
 
    Suggest { suggestion: Decision }, // child->parent
 
    Announce { decision: Decision },  // parent->child
 
}
 

	
 
// Speculative payload message, communicating the value for the given
 
// port's message predecated on the given speculative variable assignments.
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
struct SendPayloadMsg {
 
    predicate: Predicate,
 
    payload: Payload,
 
}
 

	
 
// Return result of `Predicate::assignment_union`, communicating the contents
 
// of the predicate which represents the (consistent) union of their mappings,
 
// if it exists (no variable mapped distinctly by the input predicates)
 
#[derive(Debug, PartialEq)]
 
enum AssignmentUnionResult {
 
    FormerNotLatter,
 
    LatterNotFormer,
 
    Equivalent,
 
    New(Predicate),
 
    Nonexistant,
 
}
 

	
 
// One of two endpoints for a control channel with a connector on either end.
 
// The underlying transport is TCP, so we use an inbox buffer to allow
 
// discrete payload receipt.
 
struct NetEndpoint {
 
    inbox: Vec<u8>,
 
    stream: TcpStream,
 
}
 

	
 
// Datastructure used during the setup phase representing a NetEndpoint TO BE SETUP
 
#[derive(Debug, Clone)]
 
struct NetEndpointSetup {
 
    getter_for_incoming: PortId,
 
    sock_addr: SocketAddr,
 
    endpoint_polarity: EndpointPolarity,
 
}
 

	
 
// Datastructure used during the setup phase representing a UdpEndpoint TO BE SETUP
 
#[derive(Debug, Clone)]
 
struct UdpEndpointSetup {
 
    getter_for_incoming: PortId,
 
    local_addr: SocketAddr,
 
    peer_addr: SocketAddr,
 
}
 

	
 
// NetEndpoint annotated with the ID of the port that receives payload
 
// messages received through the endpoint. This approach assumes that NetEndpoints
 
// DO NOT multiplex port->port channels, and so a mapping such as this is possible.
 
// As a result, the messages themselves don't need to carry the PortID with them.
 
#[derive(Debug)]
 
struct NetEndpointExt {
 
    net_endpoint: NetEndpoint,
 
    getter_for_incoming: PortId,
 
}
 

	
 
// Endpoint for a "raw" UDP endpoint. Corresponds to the "Udp Mediator Component"
 
// described in the literature.
 
// It acts as an endpoint by receiving messages via the poller etc. (managed by EndpointManager),
 
// It acts as a native component by managing a (speculative) set of payload messages (an outbox,
 
//  protecting the peer on the other side of the network).
 
#[derive(Debug)]
 
struct UdpEndpointExt {
 
    sock: UdpSocket, // already bound and connected
 
    received_this_round: bool,
 
    outgoing_payloads: HashMap<Predicate, Payload>,
 
    getter_for_incoming: PortId,
 
}
 

	
 
// Meta-data for the connector: its role in the consensus tree.
 
#[derive(Debug)]
 
struct Neighborhood {
 
    parent: Option<usize>,
 
    children: VecSet<usize>,
 
}
 

	
 
// Manages the connector's ID, and manages allocations for connector/port IDs.
 
#[derive(Debug, Clone)]
 
struct IdManager {
 
    connector_id: ConnectorId,
 
    port_suffix_stream: U32Stream,
 
    component_suffix_stream: U32Stream,
 
}
 

	
 
// Newtype wrapper around a byte buffer, used for UDP mediators to receive incoming datagrams.
 
struct IoByteBuffer {
 
    byte_vec: Vec<u8>,
 
}
 

	
 
// A generator of speculative variables. Created on-demand during the synchronous round
 
// by the IdManager.
 
#[derive(Debug)]
 
struct SpecVarStream {
 
    connector_id: ConnectorId,
 
    port_suffix_stream: U32Stream,
 
}
 

	
 
// Manages the messy state of the various endpoints, pollers, buffers, etc.
 
#[derive(Debug)]
 
struct EndpointManager {
 
    // invariants:
 
    // 1. net and udp endpoints are registered with poll with tokens computed with TargetToken::into
 
    // 2. Events is empty
 
    poll: Poll,
 
    events: Events,
 
    delayed_messages: Vec<(usize, Msg)>,
 
    undelayed_messages: Vec<(usize, Msg)>, // ready to yield
 
    net_endpoint_store: EndpointStore<NetEndpointExt>,
 
    udp_endpoint_store: EndpointStore<UdpEndpointExt>,
 
    io_byte_buffer: IoByteBuffer,
 
}
 

	
 
// A storage of endpoints, which keeps track of which components have raised
 
// an event during poll(), signifying that they need to be checked for new incoming data
 
#[derive(Debug)]
 
struct EndpointStore<T> {
 
    endpoint_exts: Vec<T>,
 
    polled_undrained: VecSet<usize>,
 
}
 

	
 
// The information associated with a port identifier, designed for local storage.
 
#[derive(Clone, Debug)]
 
struct PortInfo {
 
    owner: ComponentId,
 
    peer: Option<PortId>,
 
    polarity: Polarity,
 
    route: Route,
 
}
 

	
 
// Similar to `PortInfo`, but designed for communication during the setup procedure.
 
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
 
struct MyPortInfo {
 
    polarity: Polarity,
 
    port: PortId,
 
    owner: ComponentId,
 
}
 

	
 
// Newtype around port info map, allowing the implementation of some
 
// useful methods
 
#[derive(Default, Debug, Clone)]
 
struct PortInfoMap {
 
    // invariant: self.invariant_preserved()
 
    // `owned` is redundant information, allowing for fast lookup
 
    // of a component's owned ports (which occurs during the sync round a lot)
 
    map: HashMap<PortId, PortInfo>,
 
    owned: HashMap<ComponentId, HashSet<PortId>>,
 
}
 

	
 
// A convenient substructure for containing port info and the ID manager.
 
// Houses the bulk of the connector's persistent state between rounds.
 
// It turns out several situations require access to both things.
 
#[derive(Debug, Clone)]
 
struct IdAndPortState {
 
    port_info: PortInfoMap,
 
    id_manager: IdManager,
 
}
 

	
 
// A component's setup-phase-specific data
 
#[derive(Debug)]
 
struct ConnectorCommunication {
 
    round_index: usize,
 
    endpoint_manager: EndpointManager,
 
    neighborhood: Neighborhood,
 
    native_batches: Vec<NativeBatch>,
 
    round_result: Result<Option<RoundEndedNative>, SyncError>,
 
}
 

	
 
// A component's data common to both setup and communication phases
 
#[derive(Debug)]
 
struct ConnectorUnphased {
 
    proto_description: Arc<ProtocolDescription>,
 
    proto_components: HashMap<ComponentId, ComponentState>,
 
    logger: Box<dyn Logger>,
 
    ips: IdAndPortState,
 
    native_component_id: ComponentId,
 
}
 

	
 
// A connector's phase-specific data
 
#[derive(Debug)]
 
enum ConnectorPhased {
 
    Setup(Box<ConnectorSetup>),
 
    Communication(Box<ConnectorCommunication>),
 
}
 

	
 
// A connector's setup-phase-specific data
 
#[derive(Debug)]
 
struct ConnectorSetup {
 
    net_endpoint_setups: Vec<NetEndpointSetup>,
 
    udp_endpoint_setups: Vec<UdpEndpointSetup>,
 
}
 

	
 
// A newtype wrapper for a map from speculative variable to speculative value
 
// A missing mapping corresponds with "unspecified".
 
#[derive(Default, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)]
 
struct Predicate {
 
    assigned: BTreeMap<SpecVar, SpecVal>,
 
}
 

	
 
// Identifies a child of this connector in the _solution tree_.
 
// Each connector creates its own local solutions for the consensus procedure during `sync`,
 
// from the solutions of its children. Those children are either locally-managed components,
 
// (which are leaves in the solution tree), or other connectors reachable through the given
 
// network endpoint (which are internal nodes in the solution tree).
 
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
 
enum SubtreeId {
 
    LocalComponent(ComponentId),
 
    NetEndpoint { index: usize },
 
}
 

	
 
// An accumulation of the connector's knowledge of all (a) the local solutions its children
 
// in the solution tree have found, and (b) its own solutions derivable from those of its children.
 
// This structure starts off each round with an empty set, and accumulates solutions as they are found
 
// by local components, or received over the network in control messages.
 
// IMPORTANT: solutions, once found, don't go away until the end of the round. That is to
 
// say that these sets GROW until the round is over, and all solutions are reset.
 
#[derive(Debug)]
 
struct SolutionStorage {
 
    // invariant: old_local U new_local solutions are those that can be created from
 
    // the UNION of one element from each set in `subtree_solution`.
 
    // invariant is maintained by potentially populating new_local whenever subtree_solutions is populated.
 
    old_local: HashSet<Predicate>, // already sent to this connector's parent OR decided
 
    new_local: HashSet<Predicate>, // not yet sent to this connector's parent OR decided
 
    // this pair acts as SubtreeId -> HashSet<Predicate> which is friendlier to iteration
 
    subtree_solutions: Vec<HashSet<Predicate>>,
 
    subtree_id_to_index: HashMap<SubtreeId, usize>,
 
}
 

	
 
// Stores the transient data of a synchronous round.
 
// Some of it is for bookkeeping, and the rest is a temporary mirror of fields of
 
// `ConnectorUnphased`, such that any changes are safely contained within RoundCtx,
 
// and can be undone if the round fails.
 
struct RoundCtx {
 
    solution_storage: SolutionStorage,
 
    spec_var_stream: SpecVarStream,
 
    payload_inbox: Vec<(PortId, SendPayloadMsg)>,
 
    deadline: Option<Instant>,
 
    ips: IdAndPortState,
 
}
 

	
 
// A trait intended to limit the access of the ConnectorUnphased structure
 
// such that we don't accidentally modify any important component/port data
 
// while the results of the round are undecided. Why? Any actions during Connector::sync
 
// are _speculative_ until the round is decided, and we need a safe way of rolling
 
// back any changes.
 
trait CuUndecided {
 
    fn logger(&mut self) -> &mut dyn Logger;
 
    fn proto_description(&self) -> &ProtocolDescription;
 
    fn native_component_id(&self) -> ComponentId;
 
    fn logger_and_protocol_description(&mut self) -> (&mut dyn Logger, &ProtocolDescription);
 
    fn logger_and_protocol_components(
 
        &mut self,
 
    ) -> (&mut dyn Logger, &mut HashMap<ComponentId, ComponentState>);
 
}
 

	
 
// Represents a set of synchronous port operations that the native component
 
// has described as an "option" for completing during the synchronous rounds.
 
// Operations contained here succeed together or not at all.
 
// A native with N=2+ batches are expressing an N-way nondeterministic choice
 
#[derive(Debug, Default)]
 
struct NativeBatch {
 
    // invariant: putters' and getters' polarities respected
 
    to_put: HashMap<PortId, Payload>,
 
    to_get: HashSet<PortId>,
 
}
 

	
 
// Parallels a mio::Token type, but more clearly communicates
 
// the way it identifies the evented structre it corresponds to.
 
// See runtime/setup for methods converting between TokenTarget and mio::Token
 
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
 
enum TokenTarget {
 
    NetEndpoint { index: usize },
 
    UdpEndpoint { index: usize },
 
}
 

	
 
// Returned by the endpoint manager as a result of comm_recv, telling the connector what happened,
 
// such that it can know when to continue polling, and when to block.
 
enum CommRecvOk {
 
    TimeoutWithoutNew,
 
    NewPayloadMsgs,
 
    NewControlMsg { net_index: usize, msg: CommCtrlMsg },
 
}
 
////////////////
 
fn err_would_block(err: &std::io::Error) -> bool {
 
    err.kind() == std::io::ErrorKind::WouldBlock
 
}
 
impl<T: std::cmp::Ord> VecSet<T> {
 
    fn new(mut vec: Vec<T>) -> Self {
 
        // establish the invariant
 
        vec.sort();
 
        vec.dedup();
 
        Self { vec }
 
    }
 
    fn contains(&self, element: &T) -> bool {
 
        self.vec.binary_search(element).is_ok()
 
    }
 
    // Insert the given element. Returns whether it was already present.
 
    fn insert(&mut self, element: T) -> bool {
 
        match self.vec.binary_search(&element) {
 
            Ok(_) => false,
 
            Err(index) => {
 
                self.vec.insert(index, element);
 
                true
 
            }
 
        }
 
    }
 
    fn iter(&self) -> std::slice::Iter<T> {
 
        self.vec.iter()
 
    }
 
    fn pop(&mut self) -> Option<T> {
 
        self.vec.pop()
 
    }
 
}
 
impl PortInfoMap {
 
    fn ports_owned_by(&self, owner: ComponentId) -> impl Iterator<Item = &PortId> {
 
        self.owned.get(&owner).into_iter().flat_map(HashSet::iter)
 
    }
 
    fn spec_var_for(&self, port: PortId) -> SpecVar {
 
        // Every port maps to a speculative variable
 
        // Two distinct ports map to the same variable
 
        // IFF they are two ends of the same logical channel.
 
        let info = self.map.get(&port).unwrap();
 
        SpecVar(match info.polarity {
 
            Getter => port,
 
            Putter => info.peer.unwrap(),
 
        })
 
    }
 
    fn invariant_preserved(&self) -> bool {
 
        // for every port P with some owner O,
 
        // P is in O's owned set
 
        for (port, info) in self.map.iter() {
 
            match self.owned.get(&info.owner) {
 
                Some(set) if set.contains(port) => {}
 
                _ => {
 
                    println!("{:#?}\n WITH port {:?}", self, port);
 
                    return false;
 
                }
 
            }
 
        }
 
        // for every port P owned by every owner O,
 
        // P's owner is O
 
        for (&owner, set) in self.owned.iter() {
 
            for port in set {
 
                match self.map.get(port) {
 
                    Some(info) if info.owner == owner => {}
 
                    _ => {
 
                        println!("{:#?}\n WITH owner {:?} port {:?}", self, owner, port);
 
                        return false;
 
                    }
 
                }
 
            }
 
        }
 
        true
 
    }
 
}
 
impl SpecVarStream {
 
    fn next(&mut self) -> SpecVar {
 
        let phantom_port: PortId =
 
            Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }
 
                .into();
 
        SpecVar(phantom_port)
 
    }
 
}
 
impl IdManager {
 
    fn new(connector_id: ConnectorId) -> Self {
 
        Self {
 
            connector_id,
 
            port_suffix_stream: Default::default(),
 
            component_suffix_stream: Default::default(),
 
        }
 
    }
 
    fn new_spec_var_stream(&self) -> SpecVarStream {
 
        // Spec var stream starts where the current port_id stream ends, with gap of SKIP_N.
 
        // This gap is entirely unnecessary (i.e. 0 is fine)
 
        // It's purpose is only to make SpecVars easier to spot in logs.
 
        // E.g. spot the spec var: { v0_0, v1_2, v1_103 }
 
        const SKIP_N: u32 = 100;
 
        let port_suffix_stream = self.port_suffix_stream.clone().n_skipped(SKIP_N);
 
        SpecVarStream { connector_id: self.connector_id, port_suffix_stream }
 
    }
 
    fn new_port_id(&mut self) -> PortId {
 
        Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }.into()
 
    }
 
    fn new_component_id(&mut self) -> ComponentId {
 
        Id { connector_id: self.connector_id, u32_suffix: self.component_suffix_stream.next() }
 
            .into()
 
    }
 
}
 
impl Drop for Connector {
 
    fn drop(&mut self) {
 
        log!(self.unphased.logger(), "Connector dropping. Goodbye!");
 
    }
 
}
 
// Given a slice of ports, return the first, if any, port is present repeatedly
 
fn duplicate_port(slice: &[PortId]) -> Option<PortId> {
 
    let mut vec = Vec::with_capacity(slice.len());
 
    for port in slice.iter() {
 
        match vec.binary_search(port) {
 
            Err(index) => vec.insert(index, *port),
 
            Ok(_) => return Some(*port),
 
        }
 
    }
 
    None
 
}
 
impl Connector {
 
    /// Generate a random connector identifier from the system's source of randomness.
 
    pub fn random_id() -> ConnectorId {
 
        type Bytes8 = [u8; std::mem::size_of::<ConnectorId>()];
 
        unsafe {
 
            let mut bytes = std::mem::MaybeUninit::<Bytes8>::uninit();
 
            // getrandom is the canonical crate for a small, secure rng
 
            getrandom::getrandom(&mut *bytes.as_mut_ptr()).unwrap();
 
            // safe! representations of all valid Byte8 values are valid ConnectorId values
 
            std::mem::transmute::<_, _>(bytes.assume_init())
 
        }
 
    }
 

	
 
    /// Returns true iff the connector is in connected state, i.e., it's setup phase is complete,
 
    /// and it is ready to participate in synchronous rounds of communication.
 
    pub fn is_connected(&self) -> bool {
 
        // If designed for Rust usage, connectors would be exposed as an enum type from the start.
 
        // consequently, this "phased" business would also include connector variants and this would
 
        // get a lot closer to the connector impl. itself.
 
        // Instead, the C-oriented implementation doesn't distinguish connector states as types,
 
        // and distinguish them as enum variants instead
 
        match self.phased {
 
            ConnectorPhased::Setup(..) => false,
 
            ConnectorPhased::Communication(..) => true,
 
        }
 
    }
 

	
 
    /// Enables the connector's current logger to be swapped out for another
 
    pub fn swap_logger(&mut self, mut new_logger: Box<dyn Logger>) -> Box<dyn Logger> {
 
        std::mem::swap(&mut self.unphased.logger, &mut new_logger);
 
        new_logger
 
    }
 

	
 
    /// Access the connector's current logger
 
    pub fn get_logger(&mut self) -> &mut dyn Logger {
 
        &mut *self.unphased.logger
 
    }
 

	
 
    /// Create a new synchronous channel, returning its ends as a pair of ports,
 
    /// with polarity output, input respectively. Available during either setup/communication phase.
 
    /// # Panics
 
    /// This function panics if the connector's (large) port id space is exhausted.
 
    pub fn new_port_pair(&mut self) -> [PortId; 2] {
 
        let cu = &mut self.unphased;
 
        // adds two new associated ports, related to each other, and exposed to the native
 
        let mut new_cid = || cu.ips.id_manager.new_port_id();
 
        // allocate two fresh port identifiers
 
        let [o, i] = [new_cid(), new_cid()];
 
        // store info for each:
 
        // - they are each others' peers
 
        // - they are owned by a local component with id `cid`
 
        // - polarity putter, getter respectively
 
        cu.ips.port_info.map.insert(
 
            o,
 
            PortInfo {
 
                route: Route::LocalComponent,
 
                peer: Some(i),
 
                owner: cu.native_component_id,
 
                polarity: Putter,
 
            },
 
        );
 
        cu.ips.port_info.map.insert(
 
            i,
 
            PortInfo {
 
                route: Route::LocalComponent,
 
                peer: Some(o),
 
                owner: cu.native_component_id,
 
                polarity: Getter,
 
            },
 
        );
 
        cu.ips
 
            .port_info
 
            .owned
 
            .entry(cu.native_component_id)
 
            .or_default()
 
            .extend([o, i].iter().copied());
 

	
 
        log!(cu.logger, "Added port pair (out->in) {:?} -> {:?}", o, i);
 
        [o, i]
 
    }
 

	
 
    /// Instantiates a new component for the connector runtime to manage, and passing
 
    /// the given set of ports from the interface of the native component, to that of the
 
    /// newly created component (passing their ownership).
 
    /// # Errors
 
    /// Error is returned if the moved ports are not owned by the native component,
 
    /// if the given component name is not defined in the connector's protocol,
 
    /// the given sequence of ports contains a duplicate port,
 
    /// or if the component is unfit for instantiation with the given port sequence.
 
    /// # Panics
 
    /// This function panics if the connector's (large) component id space is exhausted.
 
    pub fn add_component(
 
        &mut self,
 
        module_name: &[u8],
 
        identifier: &[u8],
 
        ports: &[PortId],
 
    ) -> Result<(), AddComponentError> {
 
        // Check for error cases first before modifying `cu`
 
        use AddComponentError as Ace;
 
        let cu = &self.unphased;
 
        if let Some(port) = duplicate_port(ports) {
 
            return Err(Ace::DuplicatePort(port));
 
        }
 
        let expected_polarities = cu.proto_description.component_polarities(module_name, identifier)?;
 
        if expected_polarities.len() != ports.len() {
 
            return Err(Ace::WrongNumberOfParamaters { expected: expected_polarities.len() });
 
        }
 
        for (&expected_polarity, &port) in expected_polarities.iter().zip(ports.iter()) {
 
            let info = cu.ips.port_info.map.get(&port).ok_or(Ace::UnknownPort(port))?;
 
            if info.owner != cu.native_component_id {
 
                return Err(Ace::UnknownPort(port));
 
            }
 
            if info.polarity != expected_polarity {
 
                return Err(Ace::WrongPortPolarity { port, expected_polarity });
 
            }
 
        }
 
        // No errors! Time to modify `cu`
 
        // create a new component and identifier
 
        let Connector { phased, unphased: cu } = self;
 
        let new_cid = cu.ips.id_manager.new_component_id();
 
        cu.proto_components.insert(new_cid, cu.proto_description.new_component(module_name, identifier, ports));
 
        // update the ownership of moved ports
 
        for port in ports.iter() {
 
            match cu.ips.port_info.map.get_mut(port) {
 
                Some(port_info) => port_info.owner = new_cid,
 
                None => unreachable!(),
 
            }
 
        }
 
        if let Some(set) = cu.ips.port_info.owned.get_mut(&cu.native_component_id) {
 
            set.retain(|x| !ports.contains(x));
 
        }
 
        let moved_port_set: HashSet<PortId> = ports.iter().copied().collect();
 
        if let ConnectorPhased::Communication(comm) = phased {
 
            // Preserve invariant: batches only reason about native's ports.
 
            // Remove batch puts/gets for moved ports.
 
            for batch in comm.native_batches.iter_mut() {
 
                batch.to_put.retain(|port, _| !moved_port_set.contains(port));
 
                batch.to_get.retain(|port| !moved_port_set.contains(port));
 
            }
 
        }
 
        cu.ips.port_info.owned.insert(new_cid, moved_port_set);
 
        Ok(())
 
    }
 
}
 
impl Predicate {
 
    #[inline]
 
    pub fn singleton(k: SpecVar, v: SpecVal) -> Self {
 
        Self::default().inserted(k, v)
 
    }
 
    #[inline]
 
    pub fn inserted(mut self, k: SpecVar, v: SpecVal) -> Self {
 
        self.assigned.insert(k, v);
 
        self
 
    }
 

	
 
    // Return true whether `self` is a subset of `maybe_superset`
 
    pub fn assigns_subset(&self, maybe_superset: &Self) -> bool {
 
        for (var, val) in self.assigned.iter() {
 
            match maybe_superset.assigned.get(var) {
 
                Some(val2) if val2 == val => {}
 
                _ => return false, // var unmapped, or mapped differently
 
            }
 
        }
 
        // `maybe_superset` mirrored all my assignments!
 
        true
 
    }
 

	
 
    /// Given the two predicates {self, other}, return that whose
 
    /// assignments are the union of those of both.
 
    fn assignment_union(&self, other: &Self) -> AssignmentUnionResult {
 
        use AssignmentUnionResult as Aur;
 
        // iterators over assignments of both predicates. Rely on SORTED ordering of BTreeMap's keys.
 
        let [mut s_it, mut o_it] = [self.assigned.iter(), other.assigned.iter()];
 
        let [mut s, mut o] = [s_it.next(), o_it.next()];
 
        // populate lists of assignments in self but not other and vice versa.
 
        // do this by incrementally unfolding the iterators, keeping an eye
 
        // on the ordering between the head elements [s, o].
 
        // whenever s<o, other is certainly missing element 's', etc.
 
        let [mut s_not_o, mut o_not_s] = [vec![], vec![]];
 
        loop {
 
            match [s, o] {
 
                [None, None] => break, // both iterators are empty
 
                [None, Some(x)] => {
 
                    // self's iterator is empty.
 
                    // all remaning elements are in other but not self
 
                    o_not_s.push(x);
 
                    o_not_s.extend(o_it);
 
                    break;
 
                }
 
                [Some(x), None] => {
 
                    // other's iterator is empty.
 
                    // all remaning elements are in self but not other
 
                    s_not_o.push(x);
 
                    s_not_o.extend(s_it);
 
                    break;
 
                }
 
                [Some((sid, sb)), Some((oid, ob))] => {
 
                    if sid < oid {
 
                        // o is missing this element
 
                        s_not_o.push((sid, sb));
 
                        s = s_it.next();
 
                    } else if sid > oid {
 
                        // s is missing this element
 
                        o_not_s.push((oid, ob));
 
                        o = o_it.next();
 
                    } else if sb != ob {
 
                        assert_eq!(sid, oid);
 
                        // both predicates assign the variable but differ on the value
 
                        // No predicate exists which satisfies both!
 
                        return Aur::Nonexistant;
 
                    } else {
 
                        // both predicates assign the variable to the same value
 
                        s = s_it.next();
 
                        o = o_it.next();
 
                    }
 
                }
 
            }
 
        }
 
        // Observed zero inconsistencies. A unified predicate exists...
 
        match [s_not_o.is_empty(), o_not_s.is_empty()] {
 
            [true, true] => Aur::Equivalent,       // ... equivalent to both.
 
            [false, true] => Aur::FormerNotLatter, // ... equivalent to self.
 
            [true, false] => Aur::LatterNotFormer, // ... equivalent to other.
 
            [false, false] => {
 
                // ... which is the union of the predicates' assignments but
 
                //     is equivalent to neither self nor other.
 
                let mut new = self.clone();
 
                for (&id, &b) in o_not_s {
 
                    new.assigned.insert(id, b);
 
                }
 
                Aur::New(new)
 
            }
 
        }
 
    }
 

	
 
    // Compute the union of the assignments of the two given predicates, if it exists.
 
    // It doesn't exist if there is some value which the predicates assign to different values.
 
    pub(crate) fn union_with(&self, other: &Self) -> Option<Self> {
 
        let mut res = self.clone();
 
        for (&channel_id, &assignment_1) in other.assigned.iter() {
 
            match res.assigned.insert(channel_id, assignment_1) {
 
                Some(assignment_2) if assignment_1 != assignment_2 => return None,
 
                _ => {}
 
            }
 
        }
 
        Some(res)
 
    }
 
    pub(crate) fn query(&self, var: SpecVar) -> Option<SpecVal> {
 
        self.assigned.get(&var).copied()
 
    }
 
}
 

	
 
impl RoundCtx {
 
    // remove an arbitrary buffered message, along with the ID of the getter who receives it
 
    fn getter_pop(&mut self) -> Option<(PortId, SendPayloadMsg)> {
 
        self.payload_inbox.pop()
 
    }
 

	
 
    // buffer a message along with the ID of the getter who receives it
 
    fn getter_push(&mut self, getter: PortId, msg: SendPayloadMsg) {
 
        self.payload_inbox.push((getter, msg));
 
    }
 

	
 
    // buffer a message along with the ID of the putter who sent it
 
    fn putter_push(&mut self, cu: &mut impl CuUndecided, putter: PortId, msg: SendPayloadMsg) {
 
        if let Some(getter) = self.ips.port_info.map.get(&putter).unwrap().peer {
 
            log!(cu.logger(), "Putter add (putter:{:?} => getter:{:?})", putter, getter);
 
            self.getter_push(getter, msg);
 
        } else {
 
            log!(cu.logger(), "Putter {:?} has no known peer!", putter);
 
            panic!("Putter {:?} has no known peer!", putter);
 
        }
 
    }
 
}
 

	
 
impl<T: Debug + std::cmp::Ord> Debug for VecSet<T> {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        f.debug_set().entries(self.vec.iter()).finish()
 
    }
 
}
 
impl Debug for Predicate {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        struct Assignment<'a>((&'a SpecVar, &'a SpecVal));
 
        impl Debug for Assignment<'_> {
 
            fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
                write!(f, "{:?}={:?}", (self.0).0, (self.0).1)
 
            }
 
        }
 
        f.debug_set().entries(self.assigned.iter().map(Assignment)).finish()
 
    }
 
}
 
impl IdParts for SpecVar {
 
    fn id_parts(self) -> (ConnectorId, U32Suffix) {
 
        self.0.id_parts()
 
    }
 
}
 
impl Debug for SpecVar {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        let (a, b) = self.id_parts();
 
        write!(f, "v{}_{}", a, b)
 
    }
 
}
 
impl SpecVal {
 
    const FIRING: Self = SpecVal(1);
 
    const SILENT: Self = SpecVal(0);
 
    fn is_firing(self) -> bool {
 
        self == Self::FIRING
 
        // all else treated as SILENT
 
    }
 
    fn iter_domain() -> impl Iterator<Item = Self> {
 
        (0..).map(SpecVal)
 
    }
 
}
 
impl Debug for SpecVal {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        self.0.fmt(f)
 
    }
 
}
 
impl Default for IoByteBuffer {
 
    fn default() -> Self {
 
        let mut byte_vec = Vec::with_capacity(Self::CAPACITY);
 
        unsafe {
 
            // safe! this vector is guaranteed to have sufficient capacity
 
            byte_vec.set_len(Self::CAPACITY);
 
        }
 
        Self { byte_vec }
 
    }
 
}
 
impl IoByteBuffer {
 
    const CAPACITY: usize = u16::MAX as usize + 1000;
 
    fn as_mut_slice(&mut self) -> &mut [u8] {
 
        self.byte_vec.as_mut_slice()
 
    }
 
}
 

	
 
impl Debug for IoByteBuffer {
 
    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
 
        write!(f, "IoByteBuffer")
 
    }
 
}
src/runtime_old/setup.rs
Show inline comments
 
file renamed from src/runtime/setup.rs to src/runtime_old/setup.rs
src/runtime_old/tests.rs
Show inline comments
 
file renamed from src/runtime/tests.rs to src/runtime_old/tests.rs
0 comments (0 inline, 0 general)