Files
@ dd4e6a5314f7
Branch filter:
Location: CSY/reowolf/src/runtime2/inbox.rs
dd4e6a5314f7
6.4 KiB
application/rls-services+xml
WIP on more failure fixing
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 | use std::sync::Mutex;
use std::collections::VecDeque;
use crate::protocol::eval::ValueGroup;
use crate::runtime2::consensus::{ComponentPresence, SolutionCombiner};
use crate::runtime2::port::ChannelId;
use super::ConnectorId;
use super::branch::BranchId;
use super::consensus::{GlobalSolution, LocalSolution};
use super::port::PortIdLocal;
// TODO: Remove Debug derive from all types
#[derive(Debug, Copy, Clone)]
pub(crate) struct ChannelAnnotation {
pub channel_id: ChannelId,
pub registered_id: Option<BranchMarker>,
pub expected_firing: Option<bool>,
}
/// Marker for a branch in a port mapping. A marker is, like a branch ID, a
/// unique identifier for a branch, but differs in that a branch only has one
/// branch ID, but might have multiple associated markers (i.e. one branch
/// performing a `put` three times will generate three markers.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) struct BranchMarker{
marker: u32,
}
impl BranchMarker {
#[inline]
pub(crate) fn new(marker: u32) -> Self {
debug_assert!(marker != 0);
return Self{ marker };
}
#[inline]
pub(crate) fn new_invalid() -> Self {
return Self{ marker: 0 }
}
}
/// The header added by the synchronization algorithm to all.
#[derive(Debug, Clone)]
pub(crate) struct SyncHeader {
pub sending_component_id: ConnectorId,
pub highest_component_id: ConnectorId,
pub sync_round: u32,
}
/// The header added to data messages
#[derive(Debug, Clone)]
pub(crate) struct DataHeader {
pub expected_mapping: Vec<ChannelAnnotation>,
pub sending_port: PortIdLocal,
pub target_port: PortIdLocal,
pub new_mapping: BranchMarker,
}
/// A data message is a message that is intended for the receiver's PDL code,
/// but will also be handled by the consensus algorithm
#[derive(Debug, Clone)]
pub(crate) struct DataMessage {
pub sync_header: SyncHeader,
pub data_header: DataHeader,
pub content: ValueGroup,
}
#[derive(Debug)]
pub(crate) enum SyncCompContent {
LocalFailure, // notifying leader that component has failed (e.g. timeout, whatever)
LocalSolution(LocalSolution), // sending a local solution to the leader
PartialSolution(SolutionCombiner), // when new leader is detected, forward all local results
GlobalSolution(GlobalSolution), // broadcasting to everyone
GlobalFailure, // broadcasting to everyone
AckFailure, // acknowledgement of failure to leader
Notification, // just a notification (so purpose of message is to send the SyncHeader)
Presence(ComponentPresence), // notifying leader of component presence (needed to ensure failing a round involves all components in a sync round)
}
/// A sync message is a message that is intended only for the consensus
/// algorithm. The message goes directly to a component.
#[derive(Debug)]
pub(crate) struct SyncCompMessage {
pub sync_header: SyncHeader,
pub target_component_id: ConnectorId,
pub content: SyncCompContent,
}
#[derive(Debug)]
pub(crate) enum SyncPortContent {
SilentPortNotification,
NotificationWave,
}
/// A sync message intended for the consensus algorithm. This message does not
/// go to a component, but through a channel (and results in potential
/// rerouting) because we're not sure about the ID of the component that holds
/// the other end of the channel.
#[derive(Debug)]
pub(crate) struct SyncPortMessage {
pub sync_header: SyncHeader,
pub source_port: PortIdLocal,
pub target_port: PortIdLocal,
pub content: SyncPortContent,
}
#[derive(Debug)]
pub(crate) enum SyncControlContent {
ChannelIsClosed(PortIdLocal), // contains port that is owned by the recipient of the message
}
/// A sync control message: originating from the scheduler, but intended for the
/// current sync round of the recipient. Every kind of consensus algorithm must
/// be able to handle such a message.
#[derive(Debug)]
pub(crate) struct SyncControlMessage {
// For now these control messages are only aimed at components. Might change
// in the future. But for now we respond to messages from components that
// have, because of that message, published their ID.
pub in_response_to_sync_round: u32,
pub target_component_id: ConnectorId,
pub content: SyncControlContent,
}
/// A control message is a message intended for the scheduler that is executing
/// a component.
#[derive(Debug)]
pub(crate) struct ControlMessage {
pub id: u32, // generic identifier, used to match request to response
pub sending_component_id: ConnectorId,
pub content: ControlContent,
}
#[derive(Debug)]
pub(crate) enum ControlContent {
PortPeerChanged(PortIdLocal, ConnectorId),
CloseChannel(PortIdLocal),
Ack,
Ping,
}
/// Combination of data message and control messages.
#[derive(Debug)]
pub(crate) enum Message {
Data(DataMessage),
SyncComp(SyncCompMessage),
SyncPort(SyncPortMessage),
SyncControl(SyncControlMessage),
Control(ControlMessage),
}
impl Message {
/// If the message is sent through a particular channel, then this function
/// returns the port through which the message was sent.
pub(crate) fn source_port(&self) -> Option<PortIdLocal> {
// Currently only data messages have a source port
match self {
Message::Data(message) => return Some(message.data_header.sending_port),
Message::SyncPort(message) => return Some(message.source_port),
Message::SyncComp(_) => return None,
Message::SyncControl(_) => return None,
Message::Control(_) => return None,
}
}
}
/// The public inbox of a connector. The thread running the connector that owns
/// this inbox may retrieved from it. Non-owning threads may only put new
/// messages inside of it.
// TODO: @Optimize, lazy concurrency. Probably ringbuffer with read/write heads.
// Should behave as a MPSC queue.
pub struct PublicInbox {
messages: Mutex<VecDeque<Message>>,
}
impl PublicInbox {
pub fn new() -> Self {
Self{
messages: Mutex::new(VecDeque::new()),
}
}
pub(crate) fn insert_message(&self, message: Message) {
let mut lock = self.messages.lock().unwrap();
lock.push_back(message);
}
pub(crate) fn take_message(&self) -> Option<Message> {
let mut lock = self.messages.lock().unwrap();
return lock.pop_front();
}
pub fn is_empty(&self) -> bool {
let lock = self.messages.lock().unwrap();
return lock.is_empty();
}
}
|