Files @ 1677e0c9568d
Branch filter:

Location: CSY/reowolf/src/runtime2/inbox.rs - annotation

1677e0c9568d 6.0 KiB application/rls-services+xml Show Source Show as Raw Download as Raw
MH
Halfway implementing failure, fixing bug involving wrong mapping
a43d61913724
68411f4b8014
cf26538b25dc
8c5d438b0fa3
1677e0c9568d
1677e0c9568d
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
a43d61913724
8c5d438b0fa3
68411f4b8014
cf26538b25dc
68411f4b8014
1677e0c9568d
1677e0c9568d
088be7630245
68411f4b8014
58dfabd1be9f
58dfabd1be9f
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
088be7630245
68411f4b8014
1755ca411ca7
68411f4b8014
68411f4b8014
68411f4b8014
7662b8fb871d
58dfabd1be9f
58dfabd1be9f
68411f4b8014
1755ca411ca7
68411f4b8014
1677e0c9568d
68411f4b8014
68411f4b8014
088be7630245
58dfabd1be9f
58dfabd1be9f
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
1755ca411ca7
68411f4b8014
68411f4b8014
68411f4b8014
c97c5d60bc61
c97c5d60bc61
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
c97c5d60bc61
c97c5d60bc61
68411f4b8014
c97c5d60bc61
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
c97c5d60bc61
68411f4b8014
1677e0c9568d
1677e0c9568d
68411f4b8014
1677e0c9568d
68411f4b8014
1677e0c9568d
1677e0c9568d
68411f4b8014
1677e0c9568d
58dfabd1be9f
58dfabd1be9f
68411f4b8014
1677e0c9568d
68411f4b8014
1677e0c9568d
68411f4b8014
68411f4b8014
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
1677e0c9568d
8a530d2dc72f
8a530d2dc72f
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
daf15df0f8ca
68411f4b8014
68411f4b8014
daf15df0f8ca
daf15df0f8ca
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
daf15df0f8ca
daf15df0f8ca
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
1677e0c9568d
1677e0c9568d
68411f4b8014
b4ac681e0e7f
b4ac681e0e7f
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
a99ae23c30ec
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
68411f4b8014
cf26538b25dc
use std::sync::Mutex;
use std::collections::VecDeque;

use crate::protocol::eval::ValueGroup;
use crate::runtime2::consensus::SolutionCombiner;
use crate::runtime2::port::ChannelId;

use super::ConnectorId;
use super::branch::BranchId;
use super::consensus::{GlobalSolution, LocalSolution};
use super::port::PortIdLocal;

// TODO: Remove Debug derive from all types

#[derive(Debug, Copy, Clone)]
pub(crate) struct ChannelAnnotation {
    pub channel_id: ChannelId,
    pub registered_id: Option<BranchMarker>,
    pub expected_firing: Option<bool>,
}

/// Marker for a branch in a port mapping. A marker is, like a branch ID, a
/// unique identifier for a branch, but differs in that a branch only has one
/// branch ID, but might have multiple associated markers (i.e. one branch
/// performing a `put` three times will generate three markers.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) struct BranchMarker{
    marker: u32,
}

impl BranchMarker {
    #[inline]
    pub(crate) fn new(marker: u32) -> Self {
        debug_assert!(marker != 0);
        return Self{ marker };
    }

    #[inline]
    pub(crate) fn new_invalid() -> Self {
        return Self{ marker: 0 }
    }
}

/// The header added by the synchronization algorithm to all.
#[derive(Debug, Clone)]
pub(crate) struct SyncHeader {
    pub sending_component_id: ConnectorId,
    pub highest_component_id: ConnectorId,
    pub sync_round: u32,
}

/// The header added to data messages
#[derive(Debug, Clone)]
pub(crate) struct DataHeader {
    pub expected_mapping: Vec<ChannelAnnotation>,
    pub sending_port: PortIdLocal,
    pub target_port: PortIdLocal,
    pub new_mapping: BranchMarker,
}

// TODO: Very much on the fence about this. On one hand I thought making it a
//  data message was neat because "silent port notification" should be rerouted
//  like any other data message to determine the component ID of the receiver
//  and to make it part of the leader election algorithm for the sync leader.
//  However: it complicates logic quite a bit. Really it might be easier to
//  create `Message::SyncAtComponent` and `Message::SyncAtPort` messages...
#[derive(Debug, Clone)]
pub(crate) enum DataContent {
    SilentPortNotification,
    Message(ValueGroup),
}

impl DataContent {
    pub(crate) fn as_message(&self) -> Option<&ValueGroup> {
        match self {
            DataContent::SilentPortNotification => None,
            DataContent::Message(message) => Some(message),
        }
    }
}

/// A data message is a message that is intended for the receiver's PDL code,
/// but will also be handled by the consensus algorithm
#[derive(Debug, Clone)]
pub(crate) struct DataMessage {
    pub sync_header: SyncHeader,
    pub data_header: DataHeader,
    pub content: DataContent,
}

#[derive(Debug)]
pub(crate) enum SyncCompContent {
    LocalFailure, // notifying leader that component has failed (e.g. timeout, whatever)
    LocalSolution(LocalSolution), // sending a local solution to the leader
    PartialSolution(SolutionCombiner), // when new leader is detected, forward all local results
    GlobalSolution(GlobalSolution), // broadcasting to everyone
    GlobalFailure, // broadcasting to everyone
    AckFailure, // acknowledgement of failure to leader
    Notification, // just a notification (so purpose of message is to send the SyncHeader)
    Presence(ConnectorId, Vec<ChannelId>), // notifying leader of component presence (needed to ensure failing a round involves all components in a sync round)
}

/// A sync message is a message that is intended only for the consensus
/// algorithm. The message goes directly to a component.
#[derive(Debug)]
pub(crate) struct SyncCompMessage {
    pub sync_header: SyncHeader,
    pub target_component_id: ConnectorId,
    pub content: SyncCompContent,
}

#[derive(Debug)]
pub(crate) enum SyncPortContent {
    NotificationWave,
}

#[derive(Debug)]
pub(crate) struct SyncPortMessage {
    pub sync_header: SyncHeader,
    pub source_port: PortIdLocal,
    pub target_port: PortIdLocal,
    pub content: SyncPortContent,
}

/// A control message is a message intended for the scheduler that is executing
/// a component.
#[derive(Debug)]
pub(crate) struct ControlMessage {
    pub id: u32, // generic identifier, used to match request to response
    pub sending_component_id: ConnectorId,
    pub content: ControlContent,
}

#[derive(Debug)]
pub(crate) enum ControlContent {
    PortPeerChanged(PortIdLocal, ConnectorId),
    CloseChannel(PortIdLocal),
    Ack,
    Ping,
}

/// Combination of data message and control messages.
#[derive(Debug)]
pub(crate) enum Message {
    Data(DataMessage),
    SyncComp(SyncCompMessage),
    SyncPort(SyncPortMessage),
    Control(ControlMessage),
}

impl Message {
    /// If the message is sent through a particular channel, then this function
    /// returns the port through which the message was sent.
    pub(crate) fn source_port(&self) -> Option<PortIdLocal> {
        // Currently only data messages have a source port
        if let Message::Data(message) = self {
            return Some(message.data_header.sending_port);
        } else {
            return None;
        }
    }
}

/// The public inbox of a connector. The thread running the connector that owns
/// this inbox may retrieved from it. Non-owning threads may only put new
/// messages inside of it.
// TODO: @Optimize, lazy concurrency. Probably ringbuffer with read/write heads.
//  Should behave as a MPSC queue.
pub struct PublicInbox {
    messages: Mutex<VecDeque<Message>>,
}

impl PublicInbox {
    pub fn new() -> Self {
        Self{
            messages: Mutex::new(VecDeque::new()),
        }
    }

    pub(crate) fn insert_message(&self, message: Message) {
        let mut lock = self.messages.lock().unwrap();
        lock.push_back(message);
    }

    pub(crate) fn take_message(&self) -> Option<Message> {
        let mut lock = self.messages.lock().unwrap();
        return lock.pop_front();
    }

    pub fn is_empty(&self) -> bool {
        let lock = self.messages.lock().unwrap();
        return lock.is_empty();
    }
}