Changeset - 252d005a21e3
[Not reviewed]
0 5 1
MH - 4 years ago 2021-11-12 12:39:10
contact@maxhenger.nl
initial API implementation
6 files changed with 270 insertions and 218 deletions:
0 comments (0 inline, 0 general)
src/runtime2/branch.rs
Show inline comments
 
use std::collections::HashMap;
 
use std::ops::{Index, IndexMut};
 

	
 
use crate::protocol::ComponentState;
 
use crate::protocol::eval::{Value, ValueGroup};
 

	
 
use super::port::PortIdLocal;
 

	
 
// To share some logic between the FakeTree and ExecTree implementation
 
trait BranchListItem {
 
    #[inline] fn get_id(&self) -> BranchId;
 
    #[inline] fn set_next_id(&mut self, id: BranchId);
 
    #[inline] fn get_next_id(&self) -> BranchId;
 
    fn get_id(&self) -> BranchId;
 
    fn set_next_id(&mut self, id: BranchId);
 
    fn get_next_id(&self) -> BranchId;
 
}
 

	
 
/// Generic branch ID. A component will always have one branch: the
 
/// non-speculative branch. This branch has ID 0. Hence in a speculative context
 
/// we use this fact to let branch ID 0 denote the ID being invalid.
 
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
 
pub struct BranchId {
 
    pub index: u32
 
}
 

	
 
impl BranchId {
 
    #[inline]
 
@@ -135,25 +135,25 @@ impl BranchQueue {
 
        }
 
    }
 

	
 
    #[inline]
 
    fn is_empty(&self) -> bool {
 
        debug_assert!(self.first.is_valid() == self.last.is_valid());
 
        return !self.first.is_valid();
 
    }
 
}
 

	
 
const NUM_QUEUES: usize = 3;
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
 
pub(crate) enum QueueKind {
 
    Runnable,
 
    AwaitingMessage,
 
    FinishedSync,
 
}
 

	
 
impl QueueKind {
 
    fn as_index(&self) -> usize {
 
        return match self {
 
            QueueKind::Runnable => 0,
 
            QueueKind::AwaitingMessage => 1,
 
            QueueKind::FinishedSync => 2,
 
@@ -210,34 +210,43 @@ impl ExecTree {
 

	
 
    /// Pushes a branch (ID) into a queue.
 
    pub fn push_into_queue(&mut self, kind: QueueKind, id: BranchId) {
 
        push_into_queue(&mut self.queues[kind.as_index()], &mut self.branches, id);
 
    }
 

	
 
    /// Returns the non-sync branch (TODO: better name?)
 
    pub fn base_branch_mut(&mut self) -> &mut Branch {
 
        debug_assert!(!self.is_in_sync());
 
        return &mut self.branches[0];
 
    }
 

	
 
    /// Returns an iterator over all the elements in the queue of the given
 
    /// kind. One can start the iteration at the branch *after* the provided
 
    /// branch. Just make sure it actually is in the provided queue.
 
    pub fn iter_queue(&self, kind: QueueKind, start_after: Option<BranchId>) -> BranchQueueIter<'_, Branch> {
 
        // Make sure branch is in correct queue while in debug mode
 
        debug_assert!(start_after
 
            .map(|branch_id| self.iter_queue(kind, None).any(|v| v.id == branch_id))
 
            .unwrap_or(true));
 
    /// Returns the branch ID of the first branch in a particular queue.
 
    pub fn get_queue_first(&self, kind: QueueKind) -> Option<BranchId> {
 
        let queue = &self.queues[kind.as_index()];
 
        return iter_queue(queue, &self.branches, start_after);
 
        if queue.first.is_valid() {
 
            return Some(queue.first);
 
        } else {
 
            return None;
 
        }
 
    }
 

	
 
    /// Returns the next branch ID of a branch (assumed to be in a particular
 
    /// queue.
 
    pub fn get_queue_next(&self, branch_id: BranchId) -> Option<BranchId> {
 
        let branch = &self.branches[branch_id.index as usize];
 
        if branch.next_in_queue.is_valid() {
 
            return Some(branch.next_in_queue);
 
        } else {
 
            return None;
 
        }
 
    }
 

	
 
    /// Returns an iterator that starts with the provided branch, and then
 
    /// continues to visit all of the branch's parents.
 
    pub fn iter_parents(&self, branch_id: BranchId) -> BranchParentIter {
 
        return BranchParentIter{
 
            branches: self.branches.as_slice(),
 
            index: branch_id.index as usize,
 
        }
 
    }
 

	
 
    // --- Preparing and finishing a speculative round
 
@@ -261,25 +270,24 @@ impl ExecTree {
 
        let parent_branch = &self[parent_branch_id];
 
        let new_branch = Branch::new_sync(self.branches.len() as u32, parent_branch);
 
        let new_branch_id = new_branch.id;
 
        self.branches.push(new_branch);
 

	
 
        return new_branch_id;
 
    }
 

	
 
    /// Collapses the speculative execution tree back into a deterministic one,
 
    /// using the provided branch as the final sync result.
 
    pub fn end_sync(&mut self, branch_id: BranchId) {
 
        debug_assert!(self.is_in_sync());
 
        debug_assert!(self.iter_queue(QueueKind::FinishedSync, None).any(|v| v.id == branch_id));
 

	
 
        // Swap indicated branch into the first position
 
        self.branches.swap(0, branch_id.index as usize);
 
        self.branches.truncate(1);
 

	
 
        // Reset all values to non-sync defaults
 
        let branch = &mut self.branches[0];
 
        branch.id = BranchId::new_invalid();
 
        branch.parent_id = BranchId::new_invalid();
 
        branch.sync_state = SpeculativeState::RunningNonSync;
 
        debug_assert!(!branch.awaiting_port.is_valid());
 
        branch.next_in_queue = BranchId::new_invalid();
 
@@ -300,45 +308,24 @@ impl Index<BranchId> for ExecTree {
 
        debug_assert!(index.is_valid());
 
        return &self.branches[index.index as usize];
 
    }
 
}
 

	
 
impl IndexMut<BranchId> for ExecTree {
 
    fn index_mut(&mut self, index: BranchId) -> &mut Self::Output {
 
        debug_assert!(index.is_valid());
 
        return &mut self.branches[index.index as usize];
 
    }
 
}
 

	
 
/// Iterator over branches in a `ExecTree` queue.
 
pub(crate) struct BranchQueueIter<'a, B: BranchListItem> {
 
    branches: &'a [B],
 
    index: usize,
 
}
 

	
 
impl<'a, B: BranchListItem> Iterator for BranchQueueIter<'a, B> {
 
    type Item = &'a B;
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        if self.index == 0 {
 
            // i.e. the invalid branch index
 
            return None;
 
        }
 

	
 
        let branch = &self.branches[self.index];
 
        self.index = branch.get_next_id().index as usize;
 
        return Some(branch);
 
    }
 
}
 

	
 
/// Iterator over the parents of an `ExecTree` branch.
 
pub(crate) struct BranchParentIter<'a> {
 
    branches: &'a [Branch],
 
    index: usize,
 
}
 

	
 
impl<'a> Iterator for BranchParentIter<'a> {
 
    type Item = &'a Branch;
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        if self.index == 0 {
 
            return None;
 
@@ -365,28 +352,28 @@ pub(crate) struct FakeBranch {
 
    pub awaiting_port: PortIdLocal,
 
    pub next_in_queue: BranchId,
 
    pub inbox: HashMap<PortIdLocal, ValueGroup>,
 
}
 

	
 
impl BranchListItem for FakeBranch {
 
    #[inline] fn get_id(&self) -> BranchId { return self.id; }
 
    #[inline] fn set_next_id(&mut self, id: BranchId) { self.next_in_queue = id; }
 
    #[inline] fn get_next_id(&self) -> BranchId { return self.next_in_queue; }
 
}
 

	
 
impl FakeBranch {
 
    fn new_root(index: u32) -> FakeBranch {
 
        debug_assert!(index == 0);
 
    fn new_root(_index: u32) -> FakeBranch {
 
        debug_assert!(_index == 1);
 
        return FakeBranch{
 
            id: BranchId::new_invalid(),
 
            id: BranchId::new(1),
 
            parent_id: BranchId::new_invalid(),
 
            sync_state: SpeculativeState::RunningInSync,
 
            awaiting_port: PortIdLocal::new_invalid(),
 
            next_in_queue: BranchId::new_invalid(),
 
            inbox: HashMap::new(),
 
        }
 
    }
 

	
 
    fn new_branching(index: u32, parent_branch: &FakeBranch) -> FakeBranch {
 
        return FakeBranch {
 
            id: BranchId::new(index),
 
            parent_id: parent_branch.id,
 
@@ -406,83 +393,107 @@ impl FakeBranch {
 
}
 

	
 
/// A little helper for native components that don't have a set of branches that
 
/// are actually executing code, but just have to manage the idea of branches
 
/// due to them performing the equivalent of a branching `get` call.
 
pub(crate) struct FakeTree {
 
    pub branches: Vec<FakeBranch>,
 
    queues: [BranchQueue; NUM_QUEUES],
 
}
 

	
 
impl FakeTree {
 
    pub fn new() -> Self {
 
        // TODO: Don't like this? Cause is that now we don't have a non-sync
 
        //  branch. But we assumed BranchId=0 means the branch is invalid. We
 
        //  can do the rusty Option<BranchId> stuff. But we still need a token
 
        //  value within the protocol to signify no-branch-id. Maybe the high
 
        //  bit? Branches are crazy expensive, no-one is going to have 2^32
 
        //  branches anyway. 2^31 isn't too bad.
 
        return Self {
 
            branches: Vec::new(),
 
            branches: vec![FakeBranch{
 
                id: BranchId::new_invalid(),
 
                parent_id: BranchId::new_invalid(),
 
                sync_state: SpeculativeState::RunningNonSync,
 
                awaiting_port: PortIdLocal::new_invalid(),
 
                next_in_queue: BranchId::new_invalid(),
 
                inbox: HashMap::new(),
 
            }],
 
            queues: [BranchQueue::new(); 3]
 
        }
 
    }
 

	
 
    fn is_in_sync(&self) -> bool {
 
        return !self.branches.is_empty();
 
    }
 

	
 
    pub fn queue_is_empty(&self, kind: QueueKind) -> bool {
 
        return self.queues[kind.as_index()].is_empty();
 
    }
 

	
 
    pub fn pop_from_queue(&mut self, kind: QueueKind) -> Option<BranchId> {
 
        debug_assert_ne!(kind, QueueKind::FinishedSync);
 
        return pop_from_queue(&mut self.queues[kind.as_index()], &mut self.branches);
 
    }
 

	
 
    pub fn push_into_queue(&mut self, kind: QueueKind, id: BranchId) {
 
        push_into_queue(&mut self.queues[kind.as_index()], &mut self.branches, id);
 
    }
 

	
 
    pub fn iter_queue(&self, kind: QueueKind, start_after: Option<BranchId>) -> BranchQueueIter<'_, FakeBranch> {
 
        debug_assert!(start_after
 
            .map(|branch_id| self.iter_queue(kind, None).any(|v| v.id == branch_id))
 
            .unwrap_or(true)
 
        );
 
        return iter_queue(&self.queues[kind.as_index()], &self.branches, start_after);
 
    pub fn get_queue_first(&self, kind: QueueKind) -> Option<BranchId> {
 
        let queue = &self.queues[kind.as_index()];
 
        if queue.first.is_valid() {
 
            return Some(queue.first)
 
        } else {
 
            return None;
 
        }
 
    }
 

	
 
    pub fn get_queue_next(&self, branch_id: BranchId) -> Option<BranchId> {
 
        let branch = &self.branches[branch_id.index as usize];
 
        if branch.next_in_queue.is_valid() {
 
            return Some(branch.next_in_queue);
 
        } else {
 
            return None;
 
        }
 
    }
 

	
 
    pub fn start_sync(&mut self) -> BranchId {
 
        debug_assert!(!self.is_in_sync());
 

	
 
        // Create the first branch
 
        let sync_branch = FakeBranch::new_root(0);
 
        let sync_branch_id = sync_branch.id;
 
        self.branches.push(sync_branch);
 

	
 
        return sync_branch_id;
 
    }
 

	
 
    pub fn fork_branch(&mut self, parent_branch_id: BranchId) -> BranchId {
 
        debug_assert!(self.is_in_sync());
 
        let parent_branch = &self[parent_branch_id];
 
        let new_branch = FakeBranch::new_branching(self.branches.len() as u32, parent_branch);
 
        let new_branch_id = new_branch.id;
 
        self.branches.push(new_branch);
 

	
 
        return new_branch_id;
 
    }
 

	
 
    pub fn end_sync(&mut self, branch_id: BranchId) -> FakeBranch {
 
        debug_assert!(branch_id.is_valid());
 
        debug_assert!(self.is_in_sync());
 
        debug_assert!(self.iter_queue(QueueKind::FinishedSync, None).any(|v| v.id == BranchId));
 

	
 
        // Take out the succeeding branch, then just clear all fake branches.
 
        let mut iter = self.branches.drain(branch_id.index..);
 
        let result = iter.next().unwrap();
 
        self.branches.swap(1, branch_id.index as usize);
 
        self.branches.truncate(2);
 
        let result = self.branches.pop().unwrap();
 

	
 
        for queue_index in 0..NUM_QUEUES {
 
            self.queues[queue_index] = BranchQueue::new();
 
        }
 

	
 
        return result;
 
    }
 
}
 

	
 
impl Index<BranchId> for FakeTree {
 
    type Output = FakeBranch;
 

	
 
@@ -513,32 +524,17 @@ fn pop_from_queue<B: BranchListItem>(queue: &mut BranchQueue, branches: &mut [B]
 
        }
 

	
 
        return Some(first_branch.get_id());
 
    }
 
}
 

	
 
fn push_into_queue<B: BranchListItem>(queue: &mut BranchQueue, branches: &mut [B], branch_id: BranchId) {
 
    debug_assert!(!branches[branch_id.index as usize].get_next_id().is_valid());
 
    if queue.is_empty() {
 
        queue.first = branch_id;
 
        queue.last = branch_id;
 
    } else {
 
        let last_branch = &mut branches[queue.last as usize];
 
        let last_branch = &mut branches[queue.last.index as usize];
 
        last_branch.set_next_id(branch_id);
 
        queue.last = branch_id;
 
    }
 
}
 

	
 
fn iter_queue<'a, B: BranchListItem>(queue: &BranchQueue, branches: &'a [B], start_after: Option<BranchId>) -> BranchQueueIter<'a, B> {
 
    let index = match start_after {
 
        Some(branch_id) => {
 
            // Assuming caller is correct and that the branch is in the queue
 
            let first_branch = &branches[branch_id.index as usize];
 
            first_branch.get_next_id().index as usize
 
        },
 
        None => {
 
            queue.first.index as usize
 
        }
 
    };
 

	
 
    return BranchQueueIter{ branches, index };
 
}
 
\ No newline at end of file
src/runtime2/connector.rs
Show inline comments
 
@@ -57,24 +57,25 @@ impl ConnectorPublic {
 

	
 
#[derive(Eq, PartialEq)]
 
pub(crate) enum ConnectorScheduling {
 
    Immediate,      // Run again, immediately
 
    Later,          // Schedule for running, at some later point in time
 
    NotNow,         // Do not reschedule for running
 
    Exit,           // Connector has exited
 
}
 

	
 
pub(crate) struct ConnectorPDL {
 
    tree: ExecTree,
 
    consensus: Consensus,
 
    last_finished_handled: Option<BranchId>,
 
}
 

	
 
struct ConnectorRunContext<'a> {
 
    branch_id: BranchId,
 
    consensus: &'a Consensus,
 
    received: &'a HashMap<PortIdLocal, ValueGroup>,
 
    scheduler: SchedulerCtx<'a>,
 
    prepared_channel: Option<(Value, Value)>,
 
}
 

	
 
impl<'a> RunContext for ConnectorRunContext<'a>{
 
    fn did_put(&mut self, port: PortId) -> bool {
 
@@ -97,76 +98,95 @@ impl<'a> RunContext for ConnectorRunContext<'a>{
 
        return annotation.expected_firing.map(|v| Value::Bool(v));
 
    }
 

	
 
    fn get_channel(&mut self) -> Option<(Value, Value)> {
 
        return self.prepared_channel.take();
 
    }
 
}
 

	
 
impl Connector for ConnectorPDL {
 
    fn run(&mut self, sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        self.handle_new_messages(comp_ctx);
 
        if self.tree.is_in_sync() {
 
            // Run in sync mode
 
            let scheduling = self.run_in_sync_mode(sched_ctx, comp_ctx);
 
            if let Some(solution_branch_id) = self.consensus.handle_new_finished_sync_branches(&self.tree, comp_ctx) {
 
                self.collapse_sync_to_solution_branch(solution_branch_id, comp_ctx);
 
                return ConnectorScheduling::Immediate;
 
            } else {
 
                return scheduling
 

	
 
            // Handle any new finished branches
 
            let mut iter_id = self.last_finished_handled.or(self.tree.get_queue_first(QueueKind::FinishedSync));
 
            while let Some(branch_id) = iter_id {
 
                iter_id = self.tree.get_queue_next(branch_id);
 
                self.last_finished_handled = Some(branch_id);
 

	
 

	
 
                if let Some(solution_branch_id) = self.consensus.handle_new_finished_sync_branch(branch_id, comp_ctx) {
 
                    // Actually found a solution
 
                    self.collapse_sync_to_solution_branch(solution_branch_id, comp_ctx);
 
                    return ConnectorScheduling::Immediate;
 
                }
 

	
 
                self.last_finished_handled = Some(branch_id);
 
            }
 

	
 
            return scheduling;
 
        } else {
 
            let scheduling = self.run_in_deterministic_mode(sched_ctx, comp_ctx);
 
            return scheduling;
 
        }
 
    }
 
}
 

	
 
impl ConnectorPDL {
 
    pub fn new(initial: ComponentState) -> Self {
 
        Self{
 
            tree: ExecTree::new(initial),
 
            consensus: Consensus::new(),
 
            last_finished_handled: None,
 
        }
 
    }
 

	
 
    // --- Handling messages
 

	
 
    pub fn handle_new_messages(&mut self, ctx: &mut ComponentCtx) {
 
        while let Some(message) = ctx.read_next_message() {
 
            match message {
 
                Message::Data(message) => self.handle_new_data_message(message, ctx),
 
                Message::Sync(message) => self.handle_new_sync_message(message, ctx),
 
                Message::Control(_) => unreachable!("control message in component"),
 
            }
 
        }
 
    }
 

	
 
    pub fn handle_new_data_message(&mut self, message: DataMessage, ctx: &mut ComponentCtx) {
 
        // Go through all branches that are awaiting new messages and see if
 
        // there is one that can receive this message.
 
        debug_assert!(ctx.workspace_branches.is_empty());
 
        let mut branches = Vec::new(); // TODO: @Remove
 
        if !self.consensus.handle_new_data_message(&self.tree, &message, ctx, &mut branches) {
 
        if !self.consensus.handle_new_data_message(&message, ctx) {
 
            // Old message, so drop it
 
            return;
 
        }
 

	
 
        for branch_id in branches.drain(..) {
 
        let mut iter_id = self.tree.get_queue_first(QueueKind::AwaitingMessage);
 
        while let Some(branch_id) = iter_id {
 
            iter_id = self.tree.get_queue_next(branch_id);
 

	
 
            let branch = &self.tree[branch_id];
 
            if branch.awaiting_port != message.data_header.target_port { continue; }
 
            if !self.consensus.branch_can_receive(branch_id, &message) { continue; }
 

	
 
            // This branch can receive, so fork and given it the message
 
            let receiving_branch_id = self.tree.fork_branch(branch_id);
 
            self.consensus.notify_of_new_branch(branch_id, receiving_branch_id);
 
            let receiving_branch = &mut self.tree[receiving_branch_id];
 

	
 
            receiving_branch.insert_message(message.data_header.target_port, message.content.as_message().unwrap().clone());
 
            self.consensus.notify_of_received_message(receiving_branch_id, &message.sync_header, &message.data_header, &message.content);
 
            self.consensus.notify_of_received_message(receiving_branch_id, &message);
 

	
 
            // And prepare the branch for running
 
            self.tree.push_into_queue(QueueKind::Runnable, receiving_branch_id);
 
        }
 
    }
 

	
 
    pub fn handle_new_sync_message(&mut self, message: SyncMessage, ctx: &mut ComponentCtx) {
 
        if let Some(solution_branch_id) = self.consensus.handle_new_sync_message(message, ctx) {
 
            self.collapse_sync_to_solution_branch(solution_branch_id, ctx);
 
        }
 
    }
 

	
 
@@ -232,34 +252,34 @@ impl ConnectorPDL {
 
                let consistency = self.consensus.notify_of_speculative_mapping(branch_id, port_id, true);
 
                if consistency == Consistency::Valid {
 
                    // `get()` is valid, so mark the branch as awaiting a message
 
                    branch.sync_state = SpeculativeState::HaltedAtBranchPoint;
 
                    branch.awaiting_port = port_id;
 
                    self.tree.push_into_queue(QueueKind::AwaitingMessage, branch_id);
 

	
 
                    // Note: we only know that a branch is waiting on a message when
 
                    // it reaches the `get` call. But we might have already received
 
                    // a message that targets this branch, so check now.
 
                    let mut any_message_received = false;
 
                    for message in comp_ctx.get_read_data_messages(port_id) {
 
                        if self.consensus.branch_can_receive(branch_id, &message.sync_header, &message.data_header, &message.content) {
 
                        if self.consensus.branch_can_receive(branch_id, &message) {
 
                            // This branch can receive the message, so we do the
 
                            // fork-and-receive dance
 
                            let receiving_branch_id = self.tree.fork_branch(branch_id);
 
                            let branch = &mut self.tree[receiving_branch_id];
 

	
 
                            branch.insert_message(port_id, message.content.as_message().unwrap().clone());
 

	
 
                            self.consensus.notify_of_new_branch(branch_id, receiving_branch_id);
 
                            self.consensus.notify_of_received_message(receiving_branch_id, &message.sync_header, &message.data_header, &message.content);
 
                            self.consensus.notify_of_received_message(receiving_branch_id, &message);
 
                            self.tree.push_into_queue(QueueKind::Runnable, receiving_branch_id);
 

	
 
                            any_message_received = true;
 
                        }
 
                    }
 

	
 
                    if any_message_received {
 
                        return ConnectorScheduling::Immediate;
 
                    }
 
                } else {
 
                    branch.sync_state = SpeculativeState::Inconsistent;
 
                }
 
@@ -318,24 +338,25 @@ impl ConnectorPDL {
 
        };
 
        let run_result = branch.code_state.run(&mut run_context, &sched_ctx.runtime.protocol_description);
 

	
 
        match run_result {
 
            RunResult::ComponentTerminated => {
 
                branch.sync_state = SpeculativeState::Finished;
 

	
 
                return ConnectorScheduling::Exit;
 
            },
 
            RunResult::ComponentAtSyncStart => {
 
                comp_ctx.notify_sync_start();
 
                let sync_branch_id = self.tree.start_sync();
 
                debug_assert!(self.last_finished_handled.is_none());
 
                self.consensus.start_sync(comp_ctx);
 
                self.consensus.notify_of_new_branch(BranchId::new_invalid(), sync_branch_id);
 
                self.tree.push_into_queue(QueueKind::Runnable, sync_branch_id);
 

	
 
                return ConnectorScheduling::Immediate;
 
            },
 
            RunResult::NewComponent(definition_id, monomorph_idx, arguments) => {
 
                // Note: we're relinquishing ownership of ports. But because
 
                // we are in non-sync mode the scheduler will handle and check
 
                // port ownership transfer.
 
                debug_assert!(comp_ctx.workspace_ports.is_empty());
 
                find_ports_in_value_group(&arguments, &mut comp_ctx.workspace_ports);
 
@@ -372,14 +393,15 @@ impl ConnectorPDL {
 

	
 
    pub fn collapse_sync_to_solution_branch(&mut self, solution_branch_id: BranchId, ctx: &mut ComponentCtx) {
 
        let mut fake_vec = Vec::new();
 
        self.tree.end_sync(solution_branch_id);
 
        self.consensus.end_sync(solution_branch_id, &mut fake_vec);
 

	
 
        for port in fake_vec {
 
            // TODO: Handle sent/received ports
 
            debug_assert!(ctx.get_port_by_id(port).is_some());
 
        }
 

	
 
        ctx.notify_sync_end(&[]);
 
        self.last_finished_handled = None;
 
    }
 
}
 
\ No newline at end of file
src/runtime2/consensus.rs
Show inline comments
 
use crate::collections::VecSet;
 

	
 
use crate::protocol::eval::ValueGroup;
 
use crate::runtime2::branch::BranchQueueIter;
 

	
 
use super::branch::{BranchId, ExecTree, QueueKind};
 
use super::ConnectorId;
 
use super::branch::BranchId;
 
use super::port::{ChannelId, PortIdLocal};
 
use super::inbox::{
 
    Message, PortAnnotation,
 
    DataMessage, DataContent, DataHeader,
 
    SyncMessage, SyncContent, SyncHeader,
 
};
 
use super::scheduler::ComponentCtx;
 

	
 
struct BranchAnnotation {
 
    port_mapping: Vec<PortAnnotation>,
 
}
 

	
 
@@ -40,53 +39,52 @@ struct Peer {
 
    expected_sync_round: u32,
 
}
 

	
 
/// The consensus algorithm. Currently only implemented to find the component
 
/// with the highest ID within the sync region and letting it handle all the
 
/// local solutions.
 
///
 
/// The type itself serves as an experiment to see how code should be organized.
 
// TODO: Flatten all datastructures
 
// TODO: Have a "branch+port position hint" in case multiple operations are
 
//  performed on the same port to prevent repeated lookups
 
// TODO: A lot of stuff should be batched. Like checking all the sync headers
 
//  and sending "I have a higher ID" messages.
 
//  and sending "I have a higher ID" messages. Should reduce locking by quite a
 
//  bit.
 
pub(crate) struct Consensus {
 
    // --- State that is cleared after each round
 
    // Local component's state
 
    highest_connector_id: ConnectorId,
 
    branch_annotations: Vec<BranchAnnotation>,
 
    last_finished_handled: Option<BranchId>,
 
    // Gathered state from communication
 
    encountered_ports: VecSet<PortIdLocal>, // to determine if we should send "port remains silent" messages.
 
    solution_combiner: SolutionCombiner,
 
    // --- Persistent state
 
    peers: Vec<Peer>,
 
    sync_round: u32,
 
    // --- Workspaces
 
    workspace_ports: Vec<PortIdLocal>,
 
}
 

	
 
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
 
pub(crate) enum Consistency {
 
    Valid,
 
    Inconsistent,
 
}
 

	
 
impl Consensus {
 
    pub fn new() -> Self {
 
        return Self {
 
            highest_connector_id: ConnectorId::new_invalid(),
 
            branch_annotations: Vec::new(),
 
            last_finished_handled: None,
 
            encountered_ports: VecSet::new(),
 
            solution_combiner: SolutionCombiner::new(),
 
            peers: Vec::new(),
 
            sync_round: 0,
 
            workspace_ports: Vec::new(),
 
        }
 
    }
 

	
 
    // --- Controlling sync round and branches
 

	
 
    /// Returns whether the consensus algorithm is running in sync mode
 
    pub fn is_in_sync(&self) -> bool {
 
@@ -97,25 +95,24 @@ impl Consensus {
 
    pub fn get_annotation(&self, branch_id: BranchId, port_id: PortIdLocal) -> &PortAnnotation {
 
        let branch = &self.branch_annotations[branch_id.index as usize];
 
        let port = branch.port_mapping.iter().find(|v| v.port_id == port_id).unwrap();
 
        return port;
 
    }
 

	
 
    /// Sets up the consensus algorithm for a new synchronous round. The
 
    /// provided ports should be the ports the component owns at the start of
 
    /// the sync round.
 
    pub fn start_sync(&mut self, ctx: &ComponentCtx) {
 
        debug_assert!(!self.highest_connector_id.is_valid());
 
        debug_assert!(self.branch_annotations.is_empty());
 
        debug_assert!(self.last_finished_handled.is_none());
 
        debug_assert!(self.solution_combiner.local.is_empty());
 

	
 
        // We'll use the first "branch" (the non-sync one) to store our ports,
 
        // this allows cloning if we created a new branch.
 
        self.branch_annotations.push(BranchAnnotation{
 
            port_mapping: ctx.get_ports().iter()
 
                .map(|v| PortAnnotation{
 
                    port_id: v.self_id,
 
                    registered_id: None,
 
                    expected_firing: None,
 
                })
 
                .collect(),
 
@@ -179,101 +176,91 @@ impl Consensus {
 
                            return Consistency::Valid;
 
                        } else {
 
                            return Consistency::Inconsistent;
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        unreachable!("notify_of_speculative_mapping called with unowned port");
 
    }
 

	
 
    /// Generates sync messages for any branches that are at the end of the
 
    /// sync block. To find these branches, they should've been put in the
 
    /// "finished" queue in the execution tree.
 
    pub fn handle_new_finished_sync_branches(&mut self, tree: &ExecTree, ctx: &mut ComponentCtx) -> Option<BranchId> {
 
        debug_assert!(self.is_in_sync());
 

	
 
        let mut last_branch_id = self.last_finished_handled;
 
        for branch in tree.iter_queue(QueueKind::FinishedSync, last_branch_id) {
 
            // Turn the port mapping into a local solution
 
            let source_mapping = &self.branch_annotations[branch.id.index as usize].port_mapping;
 
            let mut target_mapping = Vec::with_capacity(source_mapping.len());
 

	
 
            for port in source_mapping {
 
                // Note: if the port is silent, and we've never communicated
 
                // over the port, then we need to do so now, to let the peer
 
                // component know about our sync leader state.
 
                let port_desc = ctx.get_port_by_id(port.port_id).unwrap();
 
                let peer_port_id = port_desc.peer_id;
 
                let channel_id = port_desc.channel_id;
 

	
 
                if !self.encountered_ports.contains(&port.port_id) {
 
                    ctx.submit_message(Message::Data(DataMessage {
 
                        sync_header: SyncHeader{
 
                            sending_component_id: ctx.id,
 
                            highest_component_id: self.highest_connector_id,
 
                            sync_round: self.sync_round
 
                        },
 
                        data_header: DataHeader{
 
                            expected_mapping: source_mapping.clone(),
 
                            sending_port: port.port_id,
 
                            target_port: peer_port_id,
 
                            new_mapping: BranchId::new_invalid(),
 
                        },
 
                        content: DataContent::SilentPortNotification,
 
                    }));
 
                    self.encountered_ports.push(port.port_id);
 
                }
 

	
 
                target_mapping.push((
 
                    channel_id,
 
                    port.registered_id.unwrap_or(BranchId::new_invalid())
 
                ));
 
            }
 

	
 
            let local_solution = LocalSolution{
 
                component: ctx.id,
 
                final_branch_id: branch.id,
 
                port_mapping: target_mapping,
 
            };
 
            let solution_branch = self.send_or_store_local_solution(local_solution, ctx);
 
            if solution_branch.is_some() {
 
                // No need to continue iterating, we've found the solution
 
                return solution_branch;
 
    /// Generates a new local solution from a finished branch. If the component
 
    /// is not the leader of the sync region then it will be sent to the
 
    /// appropriate component. If it is the leader then there is a chance that
 
    /// this solution completes a global solution. In that case the solution
 
    /// branch ID will be returned.
 
    pub(crate) fn handle_new_finished_sync_branch(&mut self, branch_id: BranchId, ctx: &mut ComponentCtx) -> Option<BranchId> {
 
        // Turn the port mapping into a local solution
 
        let source_mapping = &self.branch_annotations[branch_id.index as usize].port_mapping;
 
        let mut target_mapping = Vec::with_capacity(source_mapping.len());
 

	
 
        for port in source_mapping {
 
            // Note: if the port is silent, and we've never communicated
 
            // over the port, then we need to do so now, to let the peer
 
            // component know about our sync leader state.
 
            let port_desc = ctx.get_port_by_id(port.port_id).unwrap();
 
            let peer_port_id = port_desc.peer_id;
 
            let channel_id = port_desc.channel_id;
 

	
 
            if !self.encountered_ports.contains(&port.port_id) {
 
                ctx.submit_message(Message::Data(DataMessage {
 
                    sync_header: SyncHeader{
 
                        sending_component_id: ctx.id,
 
                        highest_component_id: self.highest_connector_id,
 
                        sync_round: self.sync_round
 
                    },
 
                    data_header: DataHeader{
 
                        expected_mapping: source_mapping.clone(),
 
                        sending_port: port.port_id,
 
                        target_port: peer_port_id,
 
                        new_mapping: BranchId::new_invalid(),
 
                    },
 
                    content: DataContent::SilentPortNotification,
 
                }));
 
                self.encountered_ports.push(port.port_id);
 
            }
 

	
 
            last_branch_id = Some(branch.id);
 
            target_mapping.push((
 
                channel_id,
 
                port.registered_id.unwrap_or(BranchId::new_invalid())
 
            ));
 
        }
 

	
 
        self.last_finished_handled = last_branch_id;
 
        return None;
 
        let local_solution = LocalSolution{
 
            component: ctx.id,
 
            final_branch_id: branch_id,
 
            port_mapping: target_mapping,
 
        };
 
        let solution_branch = self.send_or_store_local_solution(local_solution, ctx);
 
        return solution_branch;
 
    }
 

	
 
    /// Notifies the consensus algorithm about the chosen branch to commit to
 
    /// memory.
 
    pub fn end_sync(&mut self, branch_id: BranchId, final_ports: &mut Vec<PortIdLocal>) {
 
        debug_assert!(self.is_in_sync());
 

	
 
        // TODO: Handle sending and receiving ports
 
        // Set final ports
 
        final_ports.clear();
 
        let branch = &self.branch_annotations[branch_id.index as usize];
 
        for port in &branch.port_mapping {
 
            final_ports.push(port.port_id);
 
        }
 

	
 
        // Clear out internal storage to defaults
 
        self.highest_connector_id = ConnectorId::new_invalid();
 
        self.branch_annotations.clear();
 
        self.last_finished_handled = None;
 
        self.encountered_ports.clear();
 
        self.solution_combiner.clear();
 

	
 
        self.sync_round += 1;
 

	
 
        for peer in self.peers.iter_mut() {
 
            peer.encountered_this_round = false;
 
            peer.expected_sync_round += 1;
 
        }
 
    }
 

	
 
    // --- Handling messages
 
@@ -316,33 +303,28 @@ impl Consensus {
 
        for mapping in &mut branch.port_mapping {
 
            if mapping.port_id == source_port_id {
 
                mapping.expected_firing = Some(true);
 
                mapping.registered_id = Some(branch_id);
 
            }
 
        }
 

	
 
        self.encountered_ports.push(source_port_id);
 

	
 
        return (self.create_sync_header(ctx), data_header);
 
    }
 

	
 
    /// Handles a new data message by handling the data and sync header, and
 
    /// checking which *existing* branches *can* receive the message. So two
 
    /// cautionary notes:
 
    /// 1. A future branch might also be able to receive this message, see the
 
    ///     `branch_can_receive` function.
 
    /// 2. We return the branches that *can* receive the message, you still
 
    ///     have to explicitly call `notify_of_received_message`.
 
    pub fn handle_new_data_message(&mut self, potential_receivers: BranchQueueIter<'_, >, message: &DataMessage, ctx: &mut ComponentCtx, target_ids: &mut Vec<BranchId>) -> bool {
 
        self.handle_received_data_header(exec_tree, &message.sync_header, &message.data_header, &message.content, target_ids);
 
    /// Handles a new data message by handling the sync header. The caller is
 
    /// responsible for checking for branches that might be able to receive
 
    /// the message.
 
    pub fn handle_new_data_message(&mut self, message: &DataMessage, ctx: &mut ComponentCtx) -> bool {
 
        return self.handle_received_sync_header(&message.sync_header, ctx)
 
    }
 

	
 
    /// Handles a new sync message by handling the sync header and the contents
 
    /// of the message. Returns `Some` with the branch ID of the global solution
 
    /// if the sync solution has been found.
 
    pub fn handle_new_sync_message(&mut self, message: SyncMessage, ctx: &mut ComponentCtx) -> Option<BranchId> {
 
        if !self.handle_received_sync_header(&message.sync_header, ctx) {
 
            return None;
 
        }
 

	
 
        // And handle the contents
 
@@ -358,99 +340,84 @@ impl Consensus {
 
                return self.send_or_store_local_solution(solution, ctx);
 
            },
 
            SyncContent::GlobalSolution(solution) => {
 
                // Take branch of interest and return it.
 
                let (_, branch_id) = solution.component_branches.iter()
 
                    .find(|(connector_id, _)| *connector_id == ctx.id)
 
                    .unwrap();
 
                return Some(*branch_id);
 
            }
 
        }
 
    }
 

	
 
    pub fn notify_of_received_message(&mut self, branch_id: BranchId, sync_header: &SyncHeader, data_header: &DataHeader, content: &DataContent) {
 
        debug_assert!(self.branch_can_receive(branch_id, sync_header, data_header, content));
 
    pub fn notify_of_received_message(&mut self, branch_id: BranchId, message: &DataMessage) {
 
        debug_assert!(self.branch_can_receive(branch_id, message));
 

	
 
        let branch = &mut self.branch_annotations[branch_id.index as usize];
 
        for mapping in &mut branch.port_mapping {
 
            if mapping.port_id == data_header.target_port {
 
            if mapping.port_id == message.data_header.target_port {
 
                // Found the port in which the message should be inserted
 
                mapping.registered_id = Some(data_header.new_mapping);
 
                mapping.registered_id = Some(message.data_header.new_mapping);
 

	
 
                // Check for sent ports
 
                debug_assert!(self.workspace_ports.is_empty());
 
                find_ports_in_value_group(content.as_message().unwrap(), &mut self.workspace_ports);
 
                find_ports_in_value_group(message.content.as_message().unwrap(), &mut self.workspace_ports);
 
                if !self.workspace_ports.is_empty() {
 
                    todo!("handle received ports");
 
                    self.workspace_ports.clear();
 
                }
 

	
 
                return;
 
            }
 
        }
 

	
 
        // If here, then the branch didn't actually own the port? Means the
 
        // caller made a mistake
 
        unreachable!("incorrect notify_of_received_message");
 
    }
 

	
 
    /// Matches the mapping between the branch and the data message. If they
 
    /// match then the branch can receive the message.
 
    pub fn branch_can_receive(&self, branch_id: BranchId, sync_header: &SyncHeader, data_header: &DataHeader, content: &DataContent) -> bool {
 
        if let Some(peer) = self.peers.iter().find(|v| v.id == sync_header.sending_component_id) {
 
            if sync_header.sync_round < peer.expected_sync_round {
 
    pub fn branch_can_receive(&self, branch_id: BranchId, message: &DataMessage) -> bool {
 
        if let Some(peer) = self.peers.iter().find(|v| v.id == message.sync_header.sending_component_id) {
 
            if message.sync_header.sync_round < peer.expected_sync_round {
 
                return false;
 
            }
 
        }
 

	
 
        if let DataContent::SilentPortNotification = content {
 
        if let DataContent::SilentPortNotification = message.content {
 
            // No port can receive a "silent" notification.
 
            return false;
 
        }
 

	
 
        let annotation = &self.branch_annotations[branch_id.index as usize];
 
        for expected in &data_header.expected_mapping {
 
        for expected in &message.data_header.expected_mapping {
 
            // If we own the port, then we have an entry in the
 
            // annotation, check if the current mapping matches
 
            for current in &annotation.port_mapping {
 
                if expected.port_id == current.port_id {
 
                    if expected.registered_id != current.registered_id {
 
                        // IDs do not match, we cannot receive the
 
                        // message in this branch
 
                        return false;
 
                    }
 
                }
 
            }
 
        }
 

	
 
        return true;
 
    }
 

	
 
    // --- Internal helpers
 

	
 
    /// Checks data header and consults the stored port mapping and the
 
    /// execution tree to see which branches may receive the data message's
 
    /// contents.
 
    fn handle_received_data_header(&self, exec_tree: &ExecTree, sync_header: &SyncHeader, data_header: &DataHeader, content: &DataContent, target_ids: &mut Vec<BranchId>) {
 
        for branch in exec_tree.iter_queue(QueueKind::AwaitingMessage, None) {
 
            if branch.awaiting_port == data_header.target_port {
 
                // Found a branch awaiting the message, but we need to make sure
 
                // the mapping is correct
 
                if self.branch_can_receive(branch.id, sync_header, data_header, content) {
 
                    target_ids.push(branch.id);
 
                }
 
            }
 
        }
 
    }
 

	
 
    fn handle_received_sync_header(&mut self, sync_header: &SyncHeader, ctx: &mut ComponentCtx) -> bool {
 
        debug_assert!(sync_header.sending_component_id != ctx.id); // not sending to ourselves
 
        if !self.handle_peer(sync_header) {
 
            // We can drop this package
 
            return false;
 
        }
 

	
 
        if sync_header.highest_component_id > self.highest_connector_id {
 
            // Sender has higher component ID. So should be the target of our
 
            // messages. We should also let all of our peers know
 
            self.highest_connector_id = sync_header.highest_component_id;
 
            for peer in self.peers.iter() {
src/runtime2/native.rs
Show inline comments
 
use std::collections::VecDeque;
 
use std::sync::{Arc, Mutex, Condvar};
 
use std::sync::atomic::Ordering;
 
use std::collections::HashMap;
 

	
 
use crate::protocol::ComponentCreationError;
 
use crate::protocol::eval::ValueGroup;
 
use crate::runtime2::branch::{FakeTree, QueueKind, SpeculativeState};
 
use crate::runtime2::consensus::{Consensus, Consistency};
 
use crate::runtime2::inbox::{DataContent, DataMessage, SyncMessage};
 

	
 
use super::{ConnectorKey, ConnectorId, RuntimeInner};
 
use super::branch::{BranchId, FakeTree, QueueKind, SpeculativeState};
 
use super::scheduler::{SchedulerCtx, ComponentCtx};
 
use super::port::{Port, PortIdLocal, Channel, PortKind};
 
use super::consensus::find_ports_in_value_group;
 
use super::consensus::{Consensus, Consistency, find_ports_in_value_group};
 
use super::connector::{ConnectorScheduling, ConnectorPDL};
 
use super::inbox::{Message, ControlContent, ControlMessage};
 
use super::inbox::{Message, DataContent, DataMessage, SyncMessage, ControlContent, ControlMessage};
 

	
 
/// Generic connector interface from the scheduler's point of view.
 
pub(crate) trait Connector {
 
    /// Should run the connector's behaviour up until the next blocking point.
 
    /// One should generally request and handle new messages from the component
 
    /// context. Then perform any logic the component has to do, and in the
 
    /// process perhaps queue up some state changes using the same context.
 
    fn run(&mut self, sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling;
 
}
 

	
 
type SyncDone = Arc<(Mutex<bool>, Condvar)>;
 
pub(crate) struct FinishedSync {
 
    // In the order of the `get` calls
 
    inbox: Vec<ValueGroup>,
 
}
 

	
 
type SyncDone = Arc<(Mutex<Option<FinishedSync>>, Condvar)>;
 
type JobQueue = Arc<Mutex<VecDeque<ApplicationJob>>>;
 

	
 
enum ApplicationJob {
 
    NewChannel((Port, Port)),
 
    NewConnector(ConnectorPDL, Vec<PortIdLocal>),
 
    SyncRound(Vec<ApplicationSyncAction>),
 
    Shutdown,
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// ConnectorApplication
 
// -----------------------------------------------------------------------------
 

	
 
/// The connector which an application can directly interface with. Once may set
 
/// up the next synchronous round, and retrieve the data afterwards.
 
// TODO: Strong candidate for logic reduction in handling put/get. A lot of code
 
//  is an approximate copy-pasta from the regular component logic.
 
//  is an approximate copy-pasta from the regular component logic. I'm going to
 
//  wait until I'm implementing more native components to see which logic is
 
//  truly common.
 
pub struct ConnectorApplication {
 
    // Communicating about new jobs and setting up sync rounds
 
    sync_done: SyncDone,
 
    job_queue: JobQueue,
 
    is_in_sync: bool,
 
    // Handling current sync round
 
    sync_desc: Vec<ApplicationSyncAction>,
 
    exec_tree: FakeTree,
 
    tree: FakeTree,
 
    consensus: Consensus,
 
    last_finished_handled: Option<BranchId>,
 
    branch_extra: Vec<usize>, // instruction counter per branch
 
}
 

	
 
impl Connector for ConnectorApplication {
 
    fn run(&mut self, sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        if self.is_in_sync {
 
            return self.run_in_sync_mode(sched_ctx, comp_ctx);
 
            let scheduling = self.run_in_sync_mode(sched_ctx, comp_ctx);
 
            let mut iter_id = self.last_finished_handled.or(self.tree.get_queue_first(QueueKind::FinishedSync));
 
            while let Some(branch_id) = iter_id {
 
                iter_id = self.tree.get_queue_next(branch_id);
 
                self.last_finished_handled = Some(branch_id);
 

	
 
                if let Some(solution_branch) = self.consensus.handle_new_finished_sync_branch(branch_id, comp_ctx) {
 
                    // Can finish sync round immediately
 
                    self.collapse_sync_to_solution_branch(solution_branch, comp_ctx);
 
                    return ConnectorScheduling::Immediate;
 
                }
 
            }
 

	
 
            return scheduling;
 
        } else {
 
            return self.run_in_deterministic_mode(sched_ctx, comp_ctx);
 
        }
 
    }
 
}
 

	
 
impl ConnectorApplication {
 
    pub(crate) fn new(runtime: Arc<RuntimeInner>) -> (Self, ApplicationInterface) {
 
        let sync_done = Arc::new(( Mutex::new(false), Condvar::new() ));
 
        let sync_done = Arc::new(( Mutex::new(None), Condvar::new() ));
 
        let job_queue = Arc::new(Mutex::new(VecDeque::with_capacity(32)));
 

	
 
        let connector = ConnectorApplication {
 
            sync_done: sync_done.clone(),
 
            job_queue: job_queue.clone(),
 
            is_in_sync: false,
 
            sync_desc: Vec::new(),
 
            tree: FakeTree::new(),
 
            consensus: Consensus::new(),
 
            last_finished_handled: None,
 
            branch_extra: Vec::new(),
 
        };
 
        let interface = ApplicationInterface::new(sync_done, job_queue, runtime);
 

	
 
        return (connector, interface);
 
    }
 

	
 
    fn handle_new_messages(&mut self, comp_ctx: &mut ComponentCtx) {
 
        while let Some(message) = comp_ctx.read_next_message() {
 
            match message {
 
                Message::Data(_) => todo!("data message in API connector"),
 
                Message::Sync(_)  => todo!("sync message in API connector"),
 
                Message::Control(_) => todo!("impossible control message"),
 
            }
 
        }
 
    }
 

	
 
    pub fn handle_new_data_message(&mut self, message: DataMessage, ctx: &mut ComponentCtx) {
 
    pub(crate) fn handle_new_data_message(&mut self, message: DataMessage, ctx: &mut ComponentCtx) {
 
        // Go through all branches that are awaiting new messages and see if
 
        // there is one that can receive this message.
 
        debug_assert!(ctx.workspace_branches.is_empty());
 
        let mut branches = Vec::new(); // TODO: @Remove
 
        if !self.consensus.handle_new_data_message(&self.tree, &message, ctx, &mut branches) {
 
        if !self.consensus.handle_new_data_message(&message, ctx) {
 
            // Old message, so drop it
 
            return;
 
        }
 

	
 
        for branch_id in branches.drain(..) {
 
        let mut iter_id = self.tree.get_queue_first(QueueKind::AwaitingMessage);
 
        while let Some(branch_id) = iter_id {
 
            iter_id = self.tree.get_queue_next(branch_id);
 

	
 
            let branch = &self.tree[branch_id];
 
            if branch.awaiting_port != message.data_header.target_port { continue; }
 
            if !self.consensus.branch_can_receive(branch_id, &message) { continue; }
 

	
 
            // This branch can receive, so fork and given it the message
 
            let receiving_branch_id = self.tree.fork_branch(branch_id);
 
            self.consensus.notify_of_new_branch(branch_id, receiving_branch_id);
 
            let receiving_branch = &mut self.tree[receiving_branch_id];
 

	
 
            receiving_branch.insert_message(message.data_header.target_port, message.content.as_message().unwrap().clone());
 
            self.consensus.notify_of_received_message(receiving_branch_id, &message.sync_header, &message.data_header, &message.content);
 
            self.consensus.notify_of_received_message(receiving_branch_id, &message);
 

	
 
            // And prepare the branch for running
 
            self.tree.push_into_queue(QueueKind::Runnable, receiving_branch_id);
 
        }
 
    }
 

	
 
    pub fn handle_new_sync_message(&mut self, message: SyncMessage, ctx: &mut ComponentCtx) {
 
    pub(crate) fn handle_new_sync_message(&mut self, message: SyncMessage, ctx: &mut ComponentCtx) {
 
        if let Some(solution_branch_id) = self.consensus.handle_new_sync_message(message, ctx) {
 
            self.collapse_sync_to_solution_branch(solution_branch_id, ctx);
 
        }
 
    }
 

	
 
    fn run_in_sync_mode(&mut self, _sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        debug_assert!(self.is_in_sync);
 

	
 
        self.handle_new_messages(comp_ctx);
 

	
 
        let branch_id = self.exec_tree.pop_from_queue(QueueKind::Runnable);
 
        let branch_id = self.tree.pop_from_queue(QueueKind::Runnable);
 
        if branch_id.is_none() {
 
            return ConnectorScheduling::NotNow;
 
        }
 

	
 
        let branch_id = branch_id.unwrap();
 
        let branch = &mut self.exec_tree[branch_id];
 
        let branch = &mut self.tree[branch_id];
 
        let mut instruction_idx = self.branch_extra[branch_id.index as usize];
 

	
 
        if instruction_idx >= self.sync_desc.len() {
 
            // Performed last instruction, so this branch is officially at the
 
            // end of the synchronous interaction.
 
            let consistency = self.consensus.notify_of_finished_branch(branch_id);
 
            if consistency == Consistency::Valid {
 
                branch.sync_state = SpeculativeState::ReachedSyncEnd;
 
                self.exec_tree.push_into_queue(QueueKind::FinishedSync, branch_id);
 
                self.tree.push_into_queue(QueueKind::FinishedSync, branch_id);
 
            } else {
 
                branch.sync_state = SpeculativeState::Inconsistent;
 
            }
 
        } else {
 
            // We still have instructions to perform
 
            let cur_instruction = &self.sync_desc[instruction_idx];
 
            self.branch_extra[branch_id.index as usize] += 1;
 

	
 
            match &cur_instruction {
 
                ApplicationSyncAction::Put(port_id, content) => {
 
                    let port_id = *port_id;
 
                    let consistency = self.consensus.notify_of_speculative_mapping(branch_id, port_id, true);
 
                    if consistency == Consistency::Valid {
 
                        let (sync_header, data_header) = self.consensus.handle_message_to_send(branch_id, port_id, &content, ctx);
 
                        let (sync_header, data_header) = self.consensus.handle_message_to_send(branch_id, port_id, &content, comp_ctx);
 
                        let message = Message::Data(DataMessage {
 
                            sync_header,
 
                            data_header,
 
                            content: DataContent::Message(content.clone()),
 
                        });
 
                        comp_ctx.submit_message(message);
 
                        self.exec_tree.push_into_queue(QueueKind::Runnable, branch_id);
 
                        self.tree.push_into_queue(QueueKind::Runnable, branch_id);
 
                        return ConnectorScheduling::Immediate;
 
                    } else {
 
                        branch.sync_state = SpeculativeState::Inconsistent;
 
                    }
 
                },
 
                ApplicationSyncAction::Get(port_id) => {
 
                    let port_id = *port_id;
 
                    let consistency = self.consensus.notify_of_speculative_mapping(branch_id, port_id, true);
 
                    if consistency == Consistency::Valid {
 
                        branch.sync_state = SpeculativeState::HaltedAtBranchPoint;
 
                        branch.awaiting_port = port_id;
 
                        self.exec_tree.push_into_queue(QueueKind::AwaitingMessage, branch_id);
 
                        self.tree.push_into_queue(QueueKind::AwaitingMessage, branch_id);
 

	
 
                        let mut any_message_received = false;
 
                        for message in comp_ctx.get_read_data_messages(port_id) {
 
                            if self.consensus.branch_can_receive(branch_id, &message.sync_header, &message.data_header, &message.content) {
 
                            if self.consensus.branch_can_receive(branch_id, &message) {
 
                                // This branch can receive the message, so we do the
 
                                // fork-and-receive dance
 
                                let receiving_branch_id = self.exec_tree.fork_branch(branch_id);
 
                                let branch = &mut self.exec_tree[receiving_branch_id];
 
                                let receiving_branch_id = self.tree.fork_branch(branch_id);
 
                                let branch = &mut self.tree[receiving_branch_id];
 
                                debug_assert!(receiving_branch_id.index as usize == self.branch_extra.len());
 
                                self.branch_extra.push(instruction_idx + 1);
 

	
 
                                branch.insert_message(port_id, message.content.as_message().unwrap().clone());
 

	
 
                                self.consensus.notify_of_new_branch(branch_id, receiving_branch_id);
 
                                self.consensus.notify_of_received_message(receiving_branch_id, &message.sync_header, &message.data_header, &message.content);
 
                                self.exec_tree.push_into_queue(QueueKind::Runnable, receiving_branch_id);
 
                                self.consensus.notify_of_received_message(receiving_branch_id, &message);
 
                                self.tree.push_into_queue(QueueKind::Runnable, receiving_branch_id);
 

	
 
                                any_message_received = true;
 
                            }
 
                        }
 

	
 
                        if any_message_received {
 
                            return ConnectorScheduling::Immediate;
 
                        }
 
                    } else {
 
                        branch.sync_state = SpeculativeState::Inconsistent;
 
                    }
 
                }
 
            }
 
        }
 

	
 
        if self.exec_tree.queue_is_empty(QueueKind::Runnable) {
 
        if self.tree.queue_is_empty(QueueKind::Runnable) {
 
            return ConnectorScheduling::NotNow;
 
        } else {
 
            return ConnectorScheduling::Later;
 
        }
 
    }
 

	
 
    fn run_in_deterministic_mode(&mut self, _sched_ctx: SchedulerCtx, comp_ctx: &mut ComponentCtx) -> ConnectorScheduling {
 
        debug_assert!(!self.is_in_sync);
 

	
 
        // In non-sync mode the application component doesn't really do anything
 
        // except performing jobs submitted from the API. This is the only
 
        // case where we expect to be woken up.
 
        let mut queue = self.job_queue.lock().unwrap();
 
        while let Some(job) = queue.pop_front() {
 
            match job {
 
                ApplicationJob::NewChannel((endpoint_a, endpoint_b)) => {
 
                    comp_ctx.push_port(endpoint_a);
 
                    comp_ctx.push_port(endpoint_b);
 
                }
 
                ApplicationJob::NewConnector(connector, initial_ports) => {
 
                    comp_ctx.push_component(connector, initial_ports);
 
                },
 
                ApplicationJob::SyncRound(mut description) => {
 
                    // Entering sync mode
 
                    if description.is_empty() {
 
                        // To simplify logic we always have one instruction
 
                        description.push(ApplicationSyncAction::Noop);
 
                    }
 

	
 
                    self.sync_desc = description;
 
                    self.is_in_sync = true;
 
                    debug_assert!(self.last_finished_handled.is_none());
 
                    debug_assert!(self.branch_extra.is_empty());
 

	
 
                    let first_branch_id = self.exec_tree.start_sync();
 
                    self.exec_tree.push_into_queue(QueueKind::Runnable, first_branch_id);
 
                    self.consensus.start_sync(ctx);
 
                    let first_branch_id = self.tree.start_sync();
 
                    self.tree.push_into_queue(QueueKind::Runnable, first_branch_id);
 
                    self.consensus.start_sync(comp_ctx);
 
                    self.branch_extra.push(0); // set first branch to first instruction
 

	
 
                    return ConnectorScheduling::Immediate;
 
                },
 
                ApplicationJob::Shutdown => {
 
                    debug_assert!(queue.is_empty());
 
                    return ConnectorScheduling::Exit;
 
                }
 
            }
 
        }
 

	
 
        return ConnectorScheduling::NotNow;
 
    }
 

	
 
    fn collapse_sync_to_solution_branch(&mut self, branch_id: BranchId, comp_ctx: &mut ComponentCtx) {
 
        debug_assert!(self.branch_extra[branch_id.index as usize] >= self.sync_desc.len()); // finished program
 
        // Notifying tree, consensus algorithm and context of ending sync
 
        let mut fake_vec = Vec::new();
 
        let mut solution_branch = self.tree.end_sync(branch_id);
 
        self.consensus.end_sync(branch_id, &mut fake_vec);
 

	
 
        for port in fake_vec {
 
            debug_assert!(comp_ctx.get_port_by_id(port).is_some());
 
        }
 

	
 
        comp_ctx.notify_sync_end(&[]);
 

	
 
        // Turning hashmapped inbox into vector of values
 
        let mut inbox = Vec::with_capacity(solution_branch.inbox.len());
 
        for action in &self.sync_desc {
 
            match action {
 
                ApplicationSyncAction::Put(_, _) => {},
 
                ApplicationSyncAction::Get(port_id) => {
 
                    debug_assert!(solution_branch.inbox.contains_key(port_id));
 
                    inbox.push(solution_branch.inbox.remove(port_id).unwrap());
 
                },
 
            }
 
        }
 

	
 
        // Notifying interface of ending sync
 
        self.is_in_sync = false;
 
        self.sync_desc.clear();
 
        self.branch_extra.clear();
 
        self.last_finished_handled = None;
 

	
 
        let (results, notification) = &*self.sync_done;
 
        let mut results = results.lock().unwrap();
 
        *results = Some(FinishedSync{ inbox });
 
        notification.notify_one();
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// ApplicationInterface
 
// -----------------------------------------------------------------------------
 

	
 
#[derive(Debug)]
 
pub enum ChannelCreationError {
 
    InSync,
 
}
 

	
 
#[derive(Debug)]
 
@@ -336,32 +398,32 @@ impl ApplicationInterface {
 
    // TODO: Yank out scheduler logic for common use.
 
    pub fn create_connector(&mut self, module: &str, routine: &str, arguments: ValueGroup) -> Result<(), ComponentCreationError> {
 
        if self.is_in_sync {
 
            return Err(ComponentCreationError::InSync);
 
        }
 

	
 
        // Retrieve ports and make sure that we own the ones that are currently
 
        // specified. This is also checked by the scheduler, but that is done
 
        // asynchronously.
 
        let mut initial_ports = Vec::new();
 
        find_ports_in_value_group(&arguments, &mut initial_ports);
 
        for initial_port in &initial_ports {
 
            if !self.owned_ports.iter().any(|v| v == initial_port) {
 
            if !self.owned_ports.iter().any(|(_, v)| v == initial_port) {
 
                return Err(ComponentCreationError::UnownedPort);
 
            }
 
        }
 

	
 
        // We own all ports, so remove them on this side
 
        for initial_port in &initial_ports {
 
            let position = self.owned_ports.iter().position(|v| v == initial_port).unwrap();
 
            let position = self.owned_ports.iter().position(|(_, v)| v == initial_port).unwrap();
 
            self.owned_ports.remove(position);
 
        }
 

	
 
        let state = self.runtime.protocol_description.new_component_v2(module.as_bytes(), routine.as_bytes(), arguments)?;
 
        let connector = ConnectorPDL::new(state);
 

	
 
        // Put on job queue
 
        {
 
            let mut queue = self.job_queue.lock().unwrap();
 
            queue.push_back(ApplicationJob::NewConnector(connector, initial_ports));
 
        }
 

	
 
@@ -395,45 +457,47 @@ impl ApplicationInterface {
 
                None => {
 
                    return Err(ApplicationStartSyncError::UnownedPort);
 
                }
 
            }
 
        }
 

	
 
        // Everything is consistent, go into sync mode and send the actions off
 
        // to the component that will actually perform the sync round
 
        self.is_in_sync = true;
 
        {
 
            let (is_done, _) = &*self.sync_done;
 
            let mut lock = is_done.lock().unwrap();
 
            *lock = false;
 
            *lock = None;
 
        }
 

	
 
        {
 
            let mut lock = self.job_queue.lock().unwrap();
 
            lock.push_back(ApplicationJob::SyncRound(actions));
 
        }
 

	
 
        self.wake_up_connector_with_ping();
 
        return Ok(())
 
    }
 

	
 
    /// Wait until the next sync-round is finished
 
    pub fn wait(&self) -> Result<Vec<ValueGroup>, ApplicationEndSyncError> {
 
        if !self.is_in_sync {
 
            return Err(ApplicationEndSyncError::NotInSync);
 
        }
 

	
 
        let (is_done, condition) = &*self.sync_done;
 
        let lock = is_done.lock().unwrap();
 
        condition.wait_while(lock, |v| !*v).unwrap(); // wait while not done
 
        let mut lock = is_done.lock().unwrap();
 
        lock = condition.wait_while(lock, |v| v.is_none()).unwrap(); // wait while not done
 

	
 
        return Ok(lock.take().unwrap().inbox);
 
    }
 

	
 
    /// Called by runtime to set associated connector's ID.
 
    pub(crate) fn set_connector_id(&mut self, id: ConnectorId) {
 
        self.connector_id = id;
 
    }
 

	
 
    fn wake_up_connector_with_ping(&self) {
 
        let connector = self.runtime.get_component_public(self.connector_id);
 
        connector.inbox.insert_message(Message::Control(ControlMessage {
 
            id: 0,
 
            sending_component_id: self.connector_id,
src/runtime2/tests/mod.rs
Show inline comments
 
mod network_shapes;
 

	
 
use super::*;
 
use crate::{PortId, ProtocolDescription};
 
use crate::common::Id;
 
use crate::protocol::eval::*;
 

	
 
const NUM_THREADS: u32 = 3;     // number of threads in runtime
 
const NUM_INSTANCES: u32 = 5;   // number of test instances constructed
 
const NUM_LOOPS: u32 = 5;       // number of loops within a single test (not used by all tests)
 

	
 
fn create_runtime(pdl: &str) -> Runtime {
 
    let protocol = ProtocolDescription::parse(pdl.as_bytes()).expect("parse pdl");
 
    let runtime = Runtime::new(NUM_THREADS, protocol);
 
@@ -66,25 +68,25 @@ fn test_put_and_get() {
 
        while (index < loops) {
 
            synchronous {
 
                auto result = get(receiver);
 
                assert(result);
 
            }
 
            index += 1;
 
        }
 
    }
 
    ";
 

	
 
    let thing = TestTimer::new("put_and_get");
 
    run_test_in_runtime(CODE, |api| {
 
        let channel = api.create_channel();
 
        let channel = api.create_channel().unwrap();
 

	
 
        api.create_connector("", "putter", ValueGroup::new_stack(vec![
 
            Value::Output(PortId(Id{ connector_id: 0, u32_suffix: channel.putter_id.index })),
 
            Value::UInt32(NUM_LOOPS)
 
        ])).expect("create putter");
 

	
 
        api.create_connector("", "getter", ValueGroup::new_stack(vec![
 
            Value::Input(PortId(Id{ connector_id: 0, u32_suffix: channel.getter_id.index })),
 
            Value::UInt32(NUM_LOOPS)
 
        ])).expect("create getter");
 
    });
 
}
 
@@ -141,25 +143,25 @@ fn test_star_shaped_request() {
 
            edge_index += 1;
 
        }
 

	
 
        new center(requests, responses, num_loops);
 
    }
 
    ";
 

	
 
    let thing = TestTimer::new("star_shaped_request");
 
    run_test_in_runtime(CODE, |api| {
 
        api.create_connector("", "constructor", ValueGroup::new_stack(vec![
 
            Value::UInt32(5),
 
            Value::UInt32(NUM_LOOPS),
 
        ]));
 
        ])).expect("create connector");
 
    });
 
}
 

	
 
#[test]
 
fn test_conga_line_request() {
 
    const CODE: &'static str = "
 
    primitive start(out<u32> req, in<u32> resp, u32 num_nodes, u32 num_loops) {
 
        u32 loop_index = 0;
 
        u32 initial_value = 1337;
 
        while (loop_index < num_loops) {
 
            synchronous {
 
                put(req, initial_value);
 
@@ -218,15 +220,15 @@ fn test_conga_line_request() {
 
            node += 1;
 
        }
 

	
 
        new end(last_req_in, last_resp_out, num_loops);
 
    }
 
    ";
 

	
 
    let thing = TestTimer::new("conga_line_request");
 
    run_test_in_runtime(CODE, |api| {
 
        api.create_connector("", "constructor", ValueGroup::new_stack(vec![
 
            Value::UInt32(5),
 
            Value::UInt32(NUM_LOOPS)
 
        ]));
 
        ])).expect("create connector");
 
    });
 
}
 
\ No newline at end of file
src/runtime2/tests/network_shapes.rs
Show inline comments
 
new file 100644
 
// Testing particular graph shapes
 
\ No newline at end of file
0 comments (0 inline, 0 general)