diff --git a/src/runtime2/component/component_pdl.rs b/src/runtime2/component/component_pdl.rs new file mode 100644 index 0000000000000000000000000000000000000000..48f002ef7e5f0619d249bdcee564d62977161b9f --- /dev/null +++ b/src/runtime2/component/component_pdl.rs @@ -0,0 +1,430 @@ +use crate::protocol::*; +use crate::protocol::eval::{ + PortId as EvalPortId, Prompt, + ValueGroup, Value, + EvalContinuation, EvalResult, EvalError +}; + +use crate::runtime2::store::QueueDynMpsc; +use crate::runtime2::runtime::*; +use crate::runtime2::scheduler::SchedulerCtx; +use crate::runtime2::communication::*; + +pub enum CompScheduling { + Immediate, + Requeue, + Sleep, + Exit, +} + +pub struct CompCtx { + pub id: CompId, + pub ports: Vec, + pub peers: Vec, + pub messages: Vec, // same size as "ports" + pub port_id_counter: u32, +} + +impl Default for CompCtx { + fn default() -> Self { + return Self{ + id: CompId(0), + ports: Vec::new(), + peers: Vec::new(), + messages: Vec::new(), + port_id_counter: 0, + } + } +} + +impl CompCtx { + fn take_message(&mut self, port_id: PortId) -> Option { + let port_index = self.get_port_index(port_id).unwrap(); + let old_value = &mut self.messages[port_index]; + if old_value.values.is_empty() { + return None; + } + + // Replace value in array with an empty one + let mut message = ValueGroup::new_stack(Vec::new()); + std::mem::swap(old_value, &mut message); + return Some(message); + } + + fn find_peer(&self, port_id: PortId) -> (&Port, &Peer) { + let port_index = self.get_port_index(port_id).unwrap(); + let port_info = &self.ports[port_index]; + let peer_index = self.get_peer_index(port_info.peer_comp_id).unwrap(); + let peer_info = &self.peers[peer_index]; + return (port_info, peer_info); + } + + fn create_channel(&mut self) -> Channel { + let putter_id = PortId(self.take_port_id()); + let getter_id = PortId(self.take_port_id()); + self.ports.push(Port{ + self_id: putter_id, + peer_id: getter_id, + kind: PortKind::Putter, + state: PortState::Open, + peer_comp_id: self.id, + }); + self.ports.push(Port{ + self_id: getter_id, + peer_id: putter_id, + kind: PortKind::Getter, + state: PortState::Closed, + peer_comp_id: self.id, + }); + + return Channel{ putter_id, getter_id }; + } + + fn get_port_index(&self, port_id: PortId) -> Option { + for (index, port) in self.ports.iter().enumerate() { + if port.self_id == port_id { + return Some(index); + } + } + + return None; + } + + fn get_peer_index(&self, peer_id: CompId) -> Option { + for (index, peer) in self.peers.iter().enumerate() { + if peer.id == peer_id { + return Some(index); + } + } + + return None; + } + + fn take_port_id(&mut self) -> u32 { + let port_id = self.port_id_counter; + self.port_id_counter = self.port_id_counter.wrapping_add(1); + return port_id; + } +} + +pub enum ExecStmt { + CreatedChannel((Value, Value)), + PerformedPut, + PerformedGet(ValueGroup), + None, +} + +impl ExecStmt { + fn take(&mut self) -> ExecStmt { + let mut value = ExecStmt::None; + std::mem::swap(self, &mut value); + return value; + } + + fn is_none(&self) -> bool { + match self { + ExecStmt::None => return true, + _ => return false, + } + } +} + +pub struct ExecCtx { + stmt: ExecStmt, +} + +impl RunContext for ExecCtx { + fn performed_put(&mut self, _port: EvalPortId) -> bool { + match self.stmt.take() { + ExecStmt::None => return false, + ExecStmt::PerformedPut => return true, + _ => unreachable!(), + } + } + + fn performed_get(&mut self, _port: EvalPortId) -> Option { + match self.stmt.take() { + ExecStmt::None => return None, + ExecStmt::PerformedGet(value) => return Some(value), + _ => unreachable!(), + } + } + + fn fires(&mut self, _port: EvalPortId) -> Option { + todo!("remove fires") + } + + fn performed_fork(&mut self) -> Option { + todo!("remove fork") + } + + fn created_channel(&mut self) -> Option<(Value, Value)> { + match self.stmt.take() { + ExecStmt::None => return None, + ExecStmt::CreatedChannel(ports) => return Some(ports), + _ => unreachable!(), + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum Mode { + NonSync, + Sync, + BlockedGet, + BlockedPut, +} + +pub(crate) struct CompPDL { + pub mode: Mode, + pub mode_port: PortId, // when blocked on a port + pub mode_value: ValueGroup, // when blocked on a put + pub prompt: Prompt, + pub exec_ctx: ExecCtx, +} + +impl CompPDL { + pub(crate) fn new(initial_state: Prompt) -> Self { + return Self{ + mode: Mode::NonSync, + mode_port: PortId::new_invalid(), + mode_value: ValueGroup::default(), + prompt: initial_state, + exec_ctx: ExecCtx{ + stmt: ExecStmt::None, + } + } + } + + pub(crate) fn run(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx) -> Result { + use EvalContinuation as EC; + + let run_result = self.execute_prompt(&sched_ctx)?; + + match run_result { + EC::Stepping => unreachable!(), // execute_prompt runs until this is no longer returned + EC::BranchInconsistent | EC::NewFork | EC::BlockFires(_) => todo!("remove these"), + // Results that can be returned in sync mode + EC::SyncBlockEnd => { + debug_assert_eq!(self.mode, Mode::Sync); + self.handle_sync_end(sched_ctx, comp_ctx); + }, + EC::BlockGet(port_id) => { + debug_assert_eq!(self.mode, Mode::Sync); + + let port_id = transform_port_id(port_id); + if let Some(message) = comp_ctx.take_message(port_id) { + // We can immediately receive and continue + debug_assert!(self.exec_ctx.stmt.is_none()); + self.exec_ctx.stmt = ExecStmt::PerformedGet(message); + return Ok(CompScheduling::Immediate); + } else { + // We need to wait + self.mode = Mode::BlockedGet; + self.mode_port = port_id; + return Ok(CompScheduling::Sleep); + } + }, + EC::Put(port_id, value) => { + debug_assert_eq!(self.mode, Mode::Sync); + let port_id = transform_port_id(port_id); + Self::send_message_and_wake_up(sched_ctx, comp_ctx, port_id, value); + }, + // Results that can be returned outside of sync mode + EC::ComponentTerminated => { + debug_assert_eq!(self.mode, Mode::NonSync); + return Ok(CompScheduling::Exit); + }, + EC::SyncBlockStart => { + debug_assert_eq!(self.mode, Mode::NonSync); + self.handle_sync_start(sched_ctx, comp_ctx); + }, + EC::NewComponent(definition_id, monomorph_idx, arguments) => { + debug_assert_eq!(self.mode, Mode::NonSync); + }, + EC::NewChannel => { + debug_assert_eq!(self.mode, Mode::NonSync); + debug_assert!(self.exec_ctx.stmt.is_none()); + let channel = comp_ctx.create_channel(); + self.exec_ctx.stmt = ExecStmt::CreatedChannel(( + Value::Output(port_id_to_eval(channel.putter_id)), + Value::Input(port_id_to_eval(channel.getter_id)) + )); + return Ok(CompScheduling::Immediate); + } + } + + return Ok(CompScheduling::Sleep); + } + + fn execute_prompt(&mut self, sched_ctx: &SchedulerCtx) -> EvalResult { + let mut step_result = EvalContinuation::Stepping; + while let EvalContinuation::Stepping = step_result { + step_result = self.prompt.step( + &sched_ctx.runtime.protocol.types, &sched_ctx.runtime.protocol.heap, + &sched_ctx.runtime.protocol.modules, &mut self.exec_ctx, + )?; + } + + return Ok(step_result) + } + + fn handle_sync_start(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx) { + + } + + fn handle_sync_end(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx) { + + } + + fn send_message_and_wake_up(sched_ctx: &SchedulerCtx, comp_ctx: &CompCtx, port_id: PortId, value: ValueGroup) { + use std::sync::atomic::Ordering; + + let (port_info, peer_info) = comp_ctx.find_peer(port_id); + peer_info.handle.inbox.push(Message::Data(DataMessage{ + source_port_id: port_id, + target_port_id: port_info.peer_id, + content: value, + })); + + let should_wake_up = peer_info.handle.sleeping.compare_exchange( + true, false, Ordering::AcqRel, Ordering::Relaxed + ).is_ok(); + + if should_wake_up { + let comp_key = unsafe{ peer_info.id.upgrade() }; + sched_ctx.runtime.enqueue_work(comp_key); + } + } + + fn create_component_and_transfer_ports(sched_ctx: &SchedulerCtx, creator_ctx: &mut CompCtx, prompt: Prompt, ports: &[PortId]) { + let component = CompPDL::new(prompt); + let (comp_key, component) = sched_ctx.runtime.create_pdl_component(component, true); + let created_ctx = &mut component.ctx; + + for port_id in ports.iter().copied() { + // Transfer port + let (port_info, peer_info) = Self::remove_port_from_component(creator_ctx, port_id); + Self::add_port_to_component(sched_ctx, created_ctx, port_info); + + // Maybe remove peer from the creator + if let Some(peer_info) = peer_info { + let remove_from_runtime = peer_info.handle.decrement_users(); + if remove_from_runtime { + let removed_comp_key = unsafe{ peer_info.id.upgrade() }; + sched_ctx.runtime.destroy_component(removed_comp_key); + } + } + } + + // Start scheduling + sched_ctx.runtime.enqueue_work(comp_key); + } + + /// Removes a port from a component. Also decrements the port counter in + /// the peer component's entry. If that hits 0 then it will be removed and + /// returned. If returned then the caller is responsible for decrementing + /// the atomic counters of the peer component's handle. + fn remove_port_from_component(comp_ctx: &mut CompCtx, port_id: PortId) -> (Port, Option) { + use std::sync::atomic::Ordering; + + let port_index = comp_ctx.get_port_index(port_id).unwrap(); + let port_info = comp_ctx.ports.remove(port_index); + + // If the component owns the peer, then we don't have to decrement the + // number of peers (because we don't have an entry for ourselves) + if port_info.peer_comp_id == comp_ctx.id { + return (port_info, None); + } + + let peer_index = comp_ctx.get_peer_index(port_info.peer_comp_id).unwrap(); + let peer_info = &mut comp_ctx.peers[peer_index]; + peer_info.num_associated_ports -= 1; + + // Check if we still have other ports referencing this peer + if peer_info.num_associated_ports != 0 { + return (port_info, None); + } + + let peer_info = comp_ctx.peers.remove(peer_index); + return (port_info, Some(peer_info)); + } + + fn add_port_to_component(sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx, port_info: Port) { + // Add the port info + let peer_comp_id = port_info.peer_comp_id; + debug_assert!(!comp_ctx.ports.iter().any(|v| v.self_id == port_info.self_id)); + comp_ctx.ports.push(port_info); + + // Increment counters on peer, or create entry for peer if it doesn't + // exist yet. + match comp_ctx.peers.iter().position(|v| v.id == peer_comp_id) { + Some(peer_index) => { + let peer_info = &mut comp_ctx.peers[peer_index]; + peer_info.num_associated_ports += 1; + }, + None => { + let handle = sched_ctx.runtime.get_component_public(peer_comp_id); + handle.increment_users(); + comp_ctx.peers.push(Peer{ + id: peer_comp_id, + num_associated_ports: 1, + handle, + }); + } + } + } +} + +#[inline] +fn port_id_from_eval(port_id: EvalPortId) -> PortId { + return PortId(port_id.id); +} + +#[inline] +fn port_id_to_eval(port_id: PortId) -> EvalPortId { + return EvalPortId{ id: port_id.0 }; +} + +/// Recursively goes through the value group, attempting to find ports. +/// Duplicates will only be added once. +pub(crate) fn find_ports_in_value_group(value_group: &ValueGroup, ports: &mut Vec) { + // Helper to check a value for a port and recurse if needed. + use crate::protocol::eval::Value; + + fn find_port_in_value(group: &ValueGroup, value: &Value, ports: &mut Vec) { + match value { + Value::Input(port_id) | Value::Output(port_id) => { + // This is an actual port + let cur_port = PortId(port_id.id); + for prev_port in ports.iter() { + if *prev_port == cur_port { + // Already added + return; + } + } + + ports.push(cur_port); + }, + Value::Array(heap_pos) | + Value::Message(heap_pos) | + Value::String(heap_pos) | + Value::Struct(heap_pos) | + Value::Union(_, heap_pos) => { + // Reference to some dynamic thing which might contain ports, + // so recurse + let heap_region = &group.regions[*heap_pos as usize]; + for embedded_value in heap_region { + find_port_in_value(group, embedded_value, ports); + } + }, + _ => {}, // values we don't care about + } + } + + // Clear the ports, then scan all the available values + ports.clear(); + for value in &value_group.values { + find_port_in_value(value_group, value, ports); + } +} \ No newline at end of file