Changeset - 432fcde4e554
[Not reviewed]
0 6 0
MH - 3 years ago 2022-05-15 14:41:30
contact@maxhenger.nl
Fix async component creation error in tcp listener
6 files changed with 70 insertions and 25 deletions:
0 comments (0 inline, 0 general)
src/runtime2/component/component.rs
Show inline comments
 
@@ -1102,492 +1102,490 @@ pub(crate) fn perform_create_component(
 
            let port_info = created_ctx.get_port(pair.created_handle);
 
            if pair.is_open && port_info.peer_comp_id != instantiator_ctx.id && port_info.peer_comp_id != created_ctx.id {
 
                // Peer component is not the instantiator, and it is not the
 
                // new component itself
 
                let message = control.add_reroute_entry(
 
                    instantiator_ctx.id, port_info.peer_port_id, port_info.peer_comp_id,
 
                    pair.instantiator_id, pair.created_id, created_ctx.id,
 
                    schedule_entry_id
 
                );
 
                let peer_handle = created_ctx.get_peer_handle(port_info.peer_comp_id);
 
                let peer_info = created_ctx.get_peer(peer_handle);
 

	
 
                peer_info.handle.send_message_logged(sched_ctx, message, true);
 
            }
 
        }
 
    } else {
 
        // We can schedule the component immediately, we do not have to wait
 
        // for any peers: there are none.
 
        sched_ctx.runtime.enqueue_work(created_key);
 
    }
 

	
 
    exec_state.mode = CompMode::NonSync;
 
    exec_state.mode_component = (ProcedureDefinitionId::new_invalid(), TypeId::new_invalid());
 
}
 

	
 
#[inline]
 
pub(crate) fn default_handle_exit(_exec_state: &CompExecState) -> CompScheduling {
 
    debug_assert_eq!(_exec_state.mode, CompMode::Exit);
 
    return CompScheduling::Exit;
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Internal messaging/state utilities
 
// -----------------------------------------------------------------------------
 

	
 
struct PortPair {
 
    instantiator_id: PortId,
 
    instantiator_handle: LocalPortHandle,
 
    created_id: PortId,
 
    created_handle: LocalPortHandle,
 
    is_open: bool,
 
}
 

	
 
pub(crate) fn ports_not_blocked(comp_ctx: &CompCtx, ports: &EncounteredPorts) -> bool {
 
    for (_port_locations, port_id) in ports {
 
        let port_handle = comp_ctx.get_port_handle(*port_id);
 
        let port_info = comp_ctx.get_port(port_handle);
 

	
 
        if port_info.state.is_blocked_due_to_port_change() {
 
            return false;
 
        }
 
    }
 

	
 
    return true;
 
}
 

	
 
fn transfer_messages(
 
    inbox_main: &mut InboxMain, inbox_backup: &mut InboxBackup, port_pair: &PortPair,
 
    instantiator_ctx: &mut CompCtx, created_ctx: &mut CompCtx, created_component: &mut dyn Component
 
) {
 
    let instantiator_port_index = instantiator_ctx.get_port_index(port_pair.instantiator_handle);
 
    if let Some(mut message) = inbox_main.remove(instantiator_port_index) {
 
        message.data_header.target_port = port_pair.created_id;
 
        created_component.adopt_message(created_ctx, message);
 
    }
 

	
 
    let mut message_index = 0;
 
    while message_index < inbox_backup.len() {
 
        let message = &inbox_backup[message_index];
 
        if message.data_header.target_port == port_pair.instantiator_id {
 
            // Transfer the message
 
            let mut message = inbox_backup.remove(message_index);
 
            message.data_header.target_port = port_pair.created_id;
 
            created_component.adopt_message(created_ctx, message);
 
        } else {
 
            // Message does not belong to the port pair that we're
 
            // transferring to the new component.
 
            message_index += 1;
 
        }
 
    }
 
}
 

	
 
/// Performs the default action of printing the provided error, and then putting
 
/// the component in the state where it will shut down. Only to be used for
 
/// builtin components: their error message construction is simpler (and more
 
/// common) as they don't have any source code.
 
pub(crate) fn default_handle_error_for_builtin(
 
    exec_state: &mut CompExecState, sched_ctx: &SchedulerCtx,
 
    location_and_message: (PortInstruction, String)
 
) {
 
    let (_location, message) = location_and_message;
 
    sched_ctx.error(&message);
 

	
 
    let exit_reason = if exec_state.mode.is_in_sync_block() {
 
        ExitReason::ErrorInSync
 
    } else {
 
        ExitReason::ErrorNonSync
 
    };
 

	
 
    exec_state.set_as_start_exit(exit_reason);
 
}
 

	
 
/// Sends a message without any transmitted ports. Does not check if sending
 
/// is actually valid.
 
fn send_message_without_ports(
 
    sending_port_handle: LocalPortHandle, value: ValueGroup,
 
    comp_ctx: &CompCtx, sched_ctx: &SchedulerCtx, consensus: &mut Consensus,
 
) {
 
    let port_info = comp_ctx.get_port(sending_port_handle);
 
    debug_assert!(port_info.state.can_send());
 
    let peer_handle = comp_ctx.get_peer_handle(port_info.peer_comp_id);
 
    let peer_info = comp_ctx.get_peer(peer_handle);
 

	
 
    let annotated_message = consensus.annotate_data_message(comp_ctx, port_info, value);
 
    peer_info.handle.send_message_logged(sched_ctx, Message::Data(annotated_message), true);
 
}
 

	
 
/// Prepares sending a message that contains ports. Only once a particular
 
/// protocol has completed (where we notify all the peers that the ports will
 
/// be transferred) will we actually send the message to the recipient.
 
fn start_send_message_with_ports(
 
    sending_port_id: PortId, sending_port_instruction: PortInstruction, value: ValueGroup,
 
    exec_state: &mut CompExecState, comp_ctx: &mut CompCtx, sched_ctx: &SchedulerCtx,
 
    control: &mut ControlLayer
 
) -> Result<(), (PortInstruction, String)> {
 
    debug_assert_eq!(exec_state.mode, CompMode::Sync); // busy in sync, trying to send
 

	
 
    // Retrieve ports we're going to transfer
 
    let sending_port_handle = comp_ctx.get_port_handle(sending_port_id);
 
    let sending_port_info = comp_ctx.get_port_mut(sending_port_handle);
 
    sending_port_info.last_instruction = sending_port_instruction;
 

	
 
    let mut transmit_ports = Vec::new();
 
    find_ports_in_value_group(&value, &mut transmit_ports);
 
    debug_assert!(!transmit_ports.is_empty()); // required from caller
 

	
 
    // Enter the state where we'll wait until all transferred ports are not
 
    // blocked.
 
    exec_state.set_as_blocked_put_with_ports(sending_port_id, value);
 

	
 
    if ports_not_blocked(comp_ctx, &transmit_ports) {
 
        // Ports are not blocked, so we can send them right away.
 
        perform_send_message_with_ports_notify_peers(
 
            exec_state, comp_ctx, sched_ctx, control, transmit_ports
 
        )?;
 
    } // else: wait until they become unblocked
 

	
 
    return Ok(())
 
}
 

	
 
fn perform_send_message_with_ports_notify_peers(
 
    exec_state: &mut CompExecState, comp_ctx: &mut CompCtx, sched_ctx: &SchedulerCtx,
 
    control: &mut ControlLayer, transmit_ports: EncounteredPorts
 
) -> Result<(), (PortInstruction, String)> {
 
    // Check we're in the correct state in debug mode
 
    debug_assert_eq!(exec_state.mode, CompMode::PutPortsBlockedTransferredPorts);
 
    debug_assert!(ports_not_blocked(comp_ctx, &transmit_ports));
 

	
 
    // Set up the final Ack that triggers us to send our final message
 
    let unblock_put_control_id = control.add_unblock_put_with_ports_entry();
 
    for (_, port_id) in &transmit_ports {
 
        let transmit_port_handle = comp_ctx.get_port_handle(*port_id);
 
        let transmit_port_info = comp_ctx.get_port_mut(transmit_port_handle);
 
        let peer_comp_id = transmit_port_info.peer_comp_id;
 
        let peer_port_id = transmit_port_info.peer_port_id;
 

	
 

	
 
        // Note: we checked earlier that we are currently in sync mode. Now we
 
        // will check if we've already used the port we're about to transmit.
 
        if !transmit_port_info.last_instruction.is_none() {
 
            let sending_port_handle = comp_ctx.get_port_handle(exec_state.mode_port);
 
            let sending_port_instruction = comp_ctx.get_port(sending_port_handle).last_instruction;
 
            return Err((
 
                sending_port_instruction,
 
                String::from("Cannot transmit one of the ports in this message, as it is used in this sync round")
 
            ));
 
        }
 

	
 
        if transmit_port_info.state.is_set(PortStateFlag::Transmitted) {
 
            let sending_port_handle = comp_ctx.get_port_handle(exec_state.mode_port);
 
            let sending_port_instruction = comp_ctx.get_port(sending_port_handle).last_instruction;
 
            return Err((
 
                sending_port_instruction,
 
                String::from("Cannot transmit one of the ports in this message, as that port is already transmitted")
 
            ));
 
        }
 

	
 
        // Set the flag for transmission
 
        transmit_port_info.state.set(PortStateFlag::Transmitted);
 

	
 
        // Block the peer of the port
 
        let message = control.create_port_transfer_message(unblock_put_control_id, comp_ctx.id, peer_port_id);
 
        println!("DEBUG: Port transfer message\nControl ID: {:?}\nMessage: {:?}", unblock_put_control_id, message);
 
        let peer_handle = comp_ctx.get_peer_handle(peer_comp_id);
 
        let peer_info = comp_ctx.get_peer(peer_handle);
 

	
 
        peer_info.handle.send_message_logged(sched_ctx, message, true);
 
    }
 

	
 
    // We've set up the protocol, once all the PPC's are blocked we are supposed
 
    // to transfer the message to the recipient. So store it temporarily
 
    exec_state.mode = CompMode::PutPortsBlockedAwaitingAcks;
 

	
 
    return Ok(());
 
}
 

	
 
/// Performs the transmission of a data message that contains ports. These were
 
/// all stored in the component's execution state by the
 
/// `prepare_send_message_with_ports` function. Port must be ready to send!
 
fn perform_send_message_with_ports_to_receiver(
 
    exec_state: &mut CompExecState, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx, consensus: &mut Consensus,
 
    inbox_main: &mut InboxMain, inbox_backup: &mut InboxBackup
 
) -> Result<(), (PortInstruction, String)> {
 
    debug_assert_eq!(exec_state.mode, CompMode::PutPortsBlockedSendingPort);
 

	
 
    // Find all ports again
 
    let mut transmit_ports = Vec::new();
 
    find_ports_in_value_group(&exec_state.mode_value, &mut transmit_ports);
 

	
 
    // Retrieve the port over which we're going to send the message
 
    let port_handle = comp_ctx.get_port_handle(exec_state.mode_port);
 
    let port_info = comp_ctx.get_port(port_handle);
 

	
 
    if !port_info.state.is_open() {
 
        return Err((
 
            port_info.last_instruction,
 
            String::from("cannot send over this port, as it is closed")
 
        ));
 
    }
 

	
 
    debug_assert!(!port_info.state.is_blocked_due_to_port_change()); // caller should have checked this
 
    let peer_handle = comp_ctx.get_peer_handle(port_info.peer_comp_id);
 

	
 
    // Change state back to its default
 
    exec_state.mode = CompMode::Sync;
 
    let message_value = exec_state.mode_value.take();
 
    exec_state.mode_port = PortId::new_invalid();
 

	
 
    // Annotate the data message
 
    let mut annotated_message = consensus.annotate_data_message(comp_ctx, port_info, message_value);
 

	
 
    // And further enhance the message by adding data about the ports that are
 
    // being transferred
 
    for (port_locations, transmit_port_id) in transmit_ports {
 
        let transmit_port_handle = comp_ctx.get_port_handle(transmit_port_id);
 
        let transmit_port_info = comp_ctx.get_port(transmit_port_handle);
 

	
 
        let transmit_messages = take_port_messages(comp_ctx, transmit_port_id, inbox_main, inbox_backup);
 

	
 
        let mut transmit_port_state = transmit_port_info.state;
 
        debug_assert!(transmit_port_state.is_set(PortStateFlag::Transmitted));
 
        transmit_port_state.clear(PortStateFlag::Transmitted);
 

	
 
        annotated_message.ports.push(TransmittedPort{
 
            locations: port_locations,
 
            messages: transmit_messages,
 
            peer_comp: transmit_port_info.peer_comp_id,
 
            peer_port: transmit_port_info.peer_port_id,
 
            kind: transmit_port_info.kind,
 
            state: transmit_port_state
 
        });
 

	
 
        comp_ctx.change_port_peer(sched_ctx, transmit_port_handle, None);
 
    }
 

	
 
    // And finally, send the message to the peer
 
    let peer_info = comp_ctx.get_peer(peer_handle);
 
    peer_info.handle.send_message_logged(sched_ctx, Message::Data(annotated_message), true);
 

	
 
    return Ok(());
 
}
 

	
 
/// Handles an `Ack` for the control layer.
 
fn default_handle_ack(
 
    exec_state: &mut CompExecState, control: &mut ControlLayer, control_id: ControlId,
 
    sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx, consensus: &mut Consensus,
 
    inbox_main: &mut InboxMain, inbox_backup: &mut InboxBackup
 
) -> Result<(), (PortInstruction, String)>{
 
    // Since an `Ack` may cause another one, handle them in a loop
 
    let mut to_ack = control_id;
 

	
 
    loop {
 
        let (action, new_to_ack) = control.handle_ack(to_ack, sched_ctx, comp_ctx);
 
        match action {
 
            AckAction::SendMessage(target_comp, message) => {
 
                // FIX @NoDirectHandle
 
                let mut handle = sched_ctx.runtime.get_component_public(target_comp);
 
                handle.send_message_logged(sched_ctx, Message::Control(message), true);
 
                let _should_remove = handle.decrement_users();
 
                debug_assert!(_should_remove.is_none());
 
            },
 
            AckAction::ScheduleComponent(to_schedule) => {
 
                // FIX @NoDirectHandle
 
                let mut handle = sched_ctx.runtime.get_component_public(to_schedule);
 

	
 
                // Note that the component is intentionally not
 
                // sleeping, so we just wake it up
 
                debug_assert!(!handle.sleeping.load(std::sync::atomic::Ordering::Acquire));
 
                let key = unsafe { to_schedule.upgrade() };
 
                sched_ctx.runtime.enqueue_work(key);
 
                let _should_remove = handle.decrement_users();
 
                debug_assert!(_should_remove.is_none());
 
            },
 
            AckAction::UnblockPutWithPorts => {
 
                // Send the message (containing ports) stored in the component
 
                // execution state to the recipient
 
                println!("DEBUG: Unblocking put with ports");
 
                debug_assert_eq!(exec_state.mode, CompMode::PutPortsBlockedAwaitingAcks);
 
                exec_state.mode = CompMode::PutPortsBlockedSendingPort;
 
                let port_handle = comp_ctx.get_port_handle(exec_state.mode_port);
 

	
 
                // Little bit of a hack, we didn't really unblock the sending
 
                // port, but this will mesh nicely with waiting for the sending
 
                // port to become unblocked.
 
                default_handle_recently_unblocked_port(
 
                    exec_state, control, consensus, port_handle, sched_ctx,
 
                    comp_ctx, inbox_main, inbox_backup
 
                )?;
 
            },
 
            AckAction::None => {}
 
        }
 

	
 
        match new_to_ack {
 
            Some(new_to_ack) => to_ack = new_to_ack,
 
            None => break,
 
        }
 
    }
 

	
 
    return Ok(());
 
}
 

	
 
/// Little helper for sending the most common kind of `Ack`
 
fn default_send_ack(
 
    causer_of_ack_id: ControlId, peer_handle: LocalPeerHandle,
 
    sched_ctx: &SchedulerCtx, comp_ctx: &CompCtx
 
) {
 
    let peer_info = comp_ctx.get_peer(peer_handle);
 
    peer_info.handle.send_message_logged(sched_ctx, Message::Control(ControlMessage{
 
        id: causer_of_ack_id,
 
        sender_comp_id: comp_ctx.id,
 
        target_port_id: None,
 
        content: ControlMessageContent::Ack
 
    }), true);
 
}
 

	
 
/// Handles the unblocking of a putter port. In case there is a pending message
 
/// on that port then it will be sent. There are two reasons for calling this
 
/// function: either a port was blocked (i.e. the Blocked state flag was
 
/// cleared), or the component is ready to send a message containing ports
 
/// (stored in the execution state). In this latter case we might still have
 
/// a blocked port.
 
fn default_handle_recently_unblocked_port(
 
    exec_state: &mut CompExecState, control: &mut ControlLayer, consensus: &mut Consensus,
 
    port_handle: LocalPortHandle, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx,
 
    inbox_main: &mut InboxMain, inbox_backup: &mut InboxBackup
 
) -> Result<(), (PortInstruction, String)> {
 
    let port_info = comp_ctx.get_port_mut(port_handle);
 
    let port_id = port_info.self_id;
 

	
 
    if port_info.state.is_blocked() {
 
        // Port is still blocked. We wait until the next control message where
 
        // we unblock the port.
 
        return Ok(());
 
    }
 

	
 
    if exec_state.is_blocked_on_put_without_ports(port_id) {
 
        // Annotate the message that we're going to send
 
        let port_info = comp_ctx.get_port(port_handle); // for immutable access
 
        debug_assert_eq!(port_info.kind, PortKind::Putter);
 
        let to_send = exec_state.mode_value.take();
 
        let to_send = consensus.annotate_data_message(comp_ctx, port_info, to_send);
 

	
 
        // Retrieve peer to send the message
 
        let peer_handle = comp_ctx.get_peer_handle(port_info.peer_comp_id);
 
        let peer_info = comp_ctx.get_peer(peer_handle);
 
        peer_info.handle.send_message_logged(sched_ctx, Message::Data(to_send), true);
 

	
 
        // Return to the regular execution mode
 
        exec_state.mode = CompMode::Sync;
 
        exec_state.mode_port = PortId::new_invalid();
 
    } else if exec_state.mode == CompMode::PutPortsBlockedTransferredPorts {
 
        // We are waiting until all of the transferred ports become unblocked,
 
        // check so here.
 
        let mut transfer_ports = Vec::new();
 
        find_ports_in_value_group(&exec_state.mode_value, &mut transfer_ports);
 
        if ports_not_blocked(comp_ctx, &transfer_ports) {
 
            perform_send_message_with_ports_notify_peers(
 
                exec_state, comp_ctx, sched_ctx, control, transfer_ports
 
            )?;
 
        }
 
    } else if exec_state.mode == CompMode::PutPortsBlockedSendingPort && exec_state.mode_port == port_id {
 
        // We checked above that the port became unblocked, so we can send the
 
        // message
 
        perform_send_message_with_ports_to_receiver(
 
            exec_state, sched_ctx, comp_ctx, consensus, inbox_main, inbox_backup
 
        )?;
 
    } else if exec_state.is_blocked_on_create_component() {
 
        let mut ports = Vec::new();
 
        find_ports_in_value_group(&exec_state.mode_value, &mut ports);
 
        if ports_not_blocked(comp_ctx, &ports) {
 
            perform_create_component(
 
                exec_state, sched_ctx, comp_ctx, control, inbox_main, inbox_backup
 
            );
 
        }
 
    }
 

	
 
    return Ok(());
 
}
 

	
 
#[inline]
 
pub(crate) fn port_id_from_eval(port_id: EvalPortId) -> PortId {
 
    return PortId(port_id.id);
 
}
 

	
 
#[inline]
 
pub(crate) fn port_id_to_eval(port_id: PortId) -> EvalPortId {
 
    return EvalPortId{ id: port_id.0 };
 
}
 

	
 
// TODO: Optimize double vec
 
type EncounteredPorts = Vec<(Vec<ValueId>, PortId)>;
 

	
 
/// Recursively goes through the value group, attempting to find ports.
 
/// Duplicates will only be added once.
 
pub(crate) fn find_ports_in_value_group(value_group: &ValueGroup, ports: &mut EncounteredPorts) {
 
    // Helper to check a value for a port and recurse if needed.
 
    fn find_port_in_value(group: &ValueGroup, value: &Value, value_location: ValueId, ports: &mut EncounteredPorts) {
 
        match value {
 
            Value::Input(port_id) | Value::Output(port_id) => {
 
                // This is an actual port
 
                let cur_port = PortId(port_id.id);
 
                for prev_port in ports.iter_mut() {
 
                    if prev_port.1 == cur_port {
 
                        // Already added
 
                        prev_port.0.push(value_location);
 
                        return;
 
                    }
 
                }
 

	
 
                ports.push((vec![value_location], cur_port));
 
            },
 
            Value::Array(heap_pos) |
 
            Value::Message(heap_pos) |
 
            Value::String(heap_pos) |
 
            Value::Struct(heap_pos) |
 
            Value::Union(_, heap_pos) => {
 
                // Reference to some dynamic thing which might contain ports,
 
                // so recurse
 
                let heap_region = &group.regions[*heap_pos as usize];
 
                for (value_index, embedded_value) in heap_region.iter().enumerate() {
 
                    let value_location = ValueId::Heap(*heap_pos, value_index as u32);
 
                    find_port_in_value(group, embedded_value, value_location, ports);
 
                }
 
            },
 
            _ => {}, // values we don't care about
 
        }
 
    }
 

	
 
    // Clear the ports, then scan all the available values
 
    ports.clear();
 
    for (value_index, value) in value_group.values.iter().enumerate() {
 
        find_port_in_value(value_group, value, ValueId::Stack(value_index as u32), ports);
 
    }
 
}
 

	
 
/// Goes through the inbox of a component and takes out all the messages that
 
/// are targeted at a specific port
 
pub(crate) fn take_port_messages(
 
    comp_ctx: &CompCtx, port_id: PortId,
 
    inbox_main: &mut InboxMain, inbox_backup: &mut InboxBackup
 
) -> Vec<DataMessage> {
 
    let mut messages = Vec::new();
 
    let port_handle = comp_ctx.get_port_handle(port_id);
 
    let port_index = comp_ctx.get_port_index(port_handle);
 

	
 
    if let Some(message) = inbox_main[port_index].take() {
 
        messages.push(message);
 
    }
 

	
 
    let mut message_index = 0;
 
    while message_index < inbox_backup.len() {
 
        let message = &inbox_backup[message_index];
 
        if message.data_header.target_port == port_id {
 
            let message = inbox_backup.remove(message_index);
 
            messages.push(message);
 
        } else {
 
            message_index += 1;
 
        }
 
    }
 

	
 
    return messages;
 
}
 
\ No newline at end of file
src/runtime2/component/component_internet.rs
Show inline comments
 
@@ -110,644 +110,667 @@ impl Component for ComponentTcpClient {
 
        match message {
 
            Message::Data(message) => {
 
                self.handle_incoming_data_message(sched_ctx, comp_ctx, message);
 
            },
 
            Message::Sync(message) => {
 
                let decision = self.consensus.receive_sync_message(sched_ctx, comp_ctx, message);
 
                component::default_handle_sync_decision(sched_ctx, &mut self.exec_state, comp_ctx, decision, &mut self.consensus);
 
            },
 
            Message::Control(message) => {
 
                if let Err(location_and_message) = component::default_handle_control_message(
 
                    &mut self.exec_state, &mut self.control, &mut self.consensus,
 
                    message, sched_ctx, comp_ctx, &mut self.inbox_main, &mut self.inbox_backup
 
                ) {
 
                    component::default_handle_error_for_builtin(&mut self.exec_state, sched_ctx, location_and_message);
 
                }
 
            },
 
            Message::Poll => {
 
                sched_ctx.info("Received polling event");
 
            },
 
        }
 
    }
 

	
 
    fn run(&mut self, sched_ctx: &mut SchedulerCtx, comp_ctx: &mut CompCtx) -> CompScheduling {
 
        sched_ctx.info(&format!("Running component ComponentTcpClient (mode: {:?}, sync state: {:?})", self.exec_state.mode, self.sync_state));
 

	
 
        match self.exec_state.mode {
 
            CompMode::BlockedSelect |
 
            CompMode::PutPortsBlockedTransferredPorts |
 
            CompMode::PutPortsBlockedAwaitingAcks |
 
            CompMode::PutPortsBlockedSendingPort |
 
            CompMode::NewComponentBlocked => {
 
                // Not possible: we never enter this state
 
                unreachable!();
 
            },
 
            CompMode::NonSync => {
 
                // When in non-sync mode
 
                match &self.socket_state {
 
                    ClientSocketState::Connected(_socket) => {
 
                        if self.sync_state == ClientSyncState::FinishSyncThenQuit {
 
                            // Previous request was to let the component shut down
 
                            self.exec_state.set_as_start_exit(ExitReason::Termination);
 
                        } else {
 
                            // Reset for a new request
 
                            self.sync_state = ClientSyncState::AwaitingCmd;
 
                            component::default_handle_sync_start(
 
                                &mut self.exec_state, &mut self.inbox_main, sched_ctx, comp_ctx, &mut self.consensus
 
                            );
 
                        }
 
                        return CompScheduling::Immediate;
 
                    },
 
                    ClientSocketState::Error => {
 
                        // Could potentially send an error message to the
 
                        // connected component.
 
                        self.exec_state.set_as_start_exit(ExitReason::ErrorNonSync);
 
                        return CompScheduling::Immediate;
 
                    }
 
                }
 
            },
 
            CompMode::Sync => {
 
                // When in sync mode: wait for a command to come in
 
                match self.sync_state {
 
                    ClientSyncState::AwaitingCmd => {
 
                        match component::default_attempt_get(
 
                            &mut self.exec_state, self.pdl_input_port_id, PortInstruction::NoSource,
 
                            &mut self.inbox_main, &mut self.inbox_backup, sched_ctx, comp_ctx,
 
                            &mut self.control, &mut self.consensus
 
                        ) {
 
                            GetResult::Received(message) => {
 
                                let (tag_value, embedded_heap_pos) = message.content.values[0].as_union();
 
                                if tag_value == self.input_union_send_tag_value {
 
                                    // Retrieve bytes from the message
 
                                    self.byte_buffer.clear();
 
                                    let union_content = &message.content.regions[embedded_heap_pos as usize];
 
                                    debug_assert_eq!(union_content.len(), 1);
 
                                    let array_heap_pos = union_content[0].as_array();
 
                                    let array_values = &message.content.regions[array_heap_pos as usize];
 
                                    self.byte_buffer.reserve(array_values.len());
 
                                    for value in array_values {
 
                                        self.byte_buffer.push(value.as_uint8());
 
                                    }
 

	
 
                                    self.sync_state = ClientSyncState::Putting;
 
                                } else if tag_value == self.input_union_receive_tag_value {
 
                                    // Component requires a `recv`
 
                                    self.sync_state = ClientSyncState::Getting;
 
                                } else if tag_value == self.input_union_finish_tag_value {
 
                                    // Component requires us to end the sync round
 
                                    self.sync_state = ClientSyncState::FinishSync;
 
                                } else if tag_value == self.input_union_shutdown_tag_value {
 
                                    // Component wants to close the connection
 
                                    self.sync_state = ClientSyncState::FinishSyncThenQuit;
 
                                } else {
 
                                    unreachable!("got tag_value {}", tag_value)
 
                                }
 

	
 
                                return CompScheduling::Immediate;
 
                            },
 
                            GetResult::NoMessage => {
 
                                return CompScheduling::Sleep;
 
                            },
 
                            GetResult::Error(location_and_message) => {
 
                                component::default_handle_error_for_builtin(&mut self.exec_state, sched_ctx, location_and_message);
 
                                return CompScheduling::Immediate;
 
                            }
 
                        }
 
                    },
 
                    ClientSyncState::Putting => {
 
                        // We're supposed to send a user-supplied message fully
 
                        // over the socket. But we might end up blocking. In
 
                        // that case the component goes to sleep until it is
 
                        // polled.
 
                        let socket = self.socket_state.get_socket();
 
                        while !self.byte_buffer.is_empty() {
 
                            match socket.send(&self.byte_buffer) {
 
                                Ok(bytes_sent) => {
 
                                    self.byte_buffer.drain(..bytes_sent);
 
                                },
 
                                Err(err) => {
 
                                    if err.kind() == IoErrorKind::WouldBlock {
 
                                        return CompScheduling::Sleep; // wait until notified
 
                                    } else {
 
                                        todo!("handle socket.send error {:?}", err)
 
                                    }
 
                                }
 
                            }
 
                        }
 

	
 
                        // If here then we're done putting the data, we can
 
                        // finish the sync round
 
                        component::default_handle_sync_end(&mut self.exec_state, sched_ctx, comp_ctx, &mut self.consensus);
 
                        return CompScheduling::Requeue;
 
                    },
 
                    ClientSyncState::Getting => {
 
                        // We're going to try and receive a single message. If
 
                        // this causes us to end up blocking the component
 
                        // goes to sleep until it is polled.
 
                        const BUFFER_SIZE: usize = 1024; // TODO: Move to config
 

	
 
                        let socket = self.socket_state.get_socket();
 
                        self.byte_buffer.resize(BUFFER_SIZE, 0);
 
                        match socket.receive(&mut self.byte_buffer) {
 
                            Ok(num_received) => {
 
                                self.byte_buffer.resize(num_received, 0);
 
                                let message_content = self.bytes_to_data_message_content(&self.byte_buffer);
 
                                let send_result = component::default_send_data_message(
 
                                    &mut self.exec_state, self.pdl_output_port_id, PortInstruction::NoSource,
 
                                    message_content, sched_ctx, &mut self.consensus, &mut self.control, comp_ctx
 
                                );
 

	
 
                                if let Err(location_and_message) = send_result {
 
                                    component::default_handle_error_for_builtin(&mut self.exec_state, sched_ctx, location_and_message);
 
                                    return CompScheduling::Immediate;
 
                                } else {
 
                                    let scheduling = send_result.unwrap();
 
                                    self.sync_state = ClientSyncState::AwaitingCmd;
 
                                    return scheduling;
 
                                }
 
                            },
 
                            Err(err) => {
 
                                if err.kind() == IoErrorKind::WouldBlock {
 
                                    return CompScheduling::Sleep; // wait until polled
 
                                } else {
 
                                    todo!("handle socket.receive error {:?}", err)
 
                                }
 
                            }
 
                        }
 
                    },
 
                    ClientSyncState::FinishSync | ClientSyncState::FinishSyncThenQuit => {
 
                        component::default_handle_sync_end(&mut self.exec_state, sched_ctx, comp_ctx, &mut self.consensus);
 
                        return CompScheduling::Requeue;
 
                    },
 
                }
 
            },
 
            CompMode::BlockedGet => {
 
                // Entered when awaiting a new command
 
                debug_assert_eq!(self.sync_state, ClientSyncState::AwaitingCmd);
 
                return CompScheduling::Sleep;
 
            },
 
            CompMode::SyncEnd | CompMode::BlockedPut =>
 
                return CompScheduling::Sleep,
 
            CompMode::StartExit =>
 
                return component::default_handle_start_exit(&mut self.exec_state, &mut self.control, sched_ctx, comp_ctx, &mut self.consensus),
 
            CompMode::BusyExit =>
 
                return component::default_handle_busy_exit(&mut self.exec_state, &mut self.control, sched_ctx),
 
            CompMode::Exit =>
 
                return component::default_handle_exit(&self.exec_state),
 
        }
 
    }
 
}
 

	
 
impl ComponentTcpClient {
 
    pub(crate) fn new(arguments: ValueGroup) -> Self {
 
        debug_assert_eq!(arguments.values.len(), 4);
 
        // Two possible cases here: if the number of arguments is 3, then we
 
        // get: (socket_handle, input_port, output_port). If the number of
 
        // arguments is 4, then we get: (ip, port, input_port, output_port).
 
        assert!(arguments.values.len() == 3 || arguments.values.len() == 4);
 

	
 
        // Parsing arguments
 
        let (socket, input_port, output_port) = if arguments.values.len() == 3 {
 
            let socket_handle = arguments.values[0].as_sint32();
 
            let socket = SocketTcpClient::new_from_handle(socket_handle);
 

	
 
            let input_port = component::port_id_from_eval(arguments.values[1].as_input());
 
            let output_port = component::port_id_from_eval(arguments.values[2].as_output());
 

	
 
            (socket, input_port, output_port)
 
        } else {
 
            let (ip_address, port) = ip_addr_and_port_from_args(&arguments, 0, 1);
 
            let socket = SocketTcpClient::new(ip_address, port);
 

	
 
            let input_port = component::port_id_from_eval(arguments.values[2].as_input());
 
            let output_port = component::port_id_from_eval(arguments.values[3].as_output());
 

	
 
        let socket = SocketTcpClient::new(ip_address, port);
 
            (socket, input_port, output_port)
 
        };
 

	
 
        if let Err(socket) = socket {
 
            todo!("friendly error reporting: failed to open socket (reason: {:?})", socket);
 
        }
 

	
 
        return Self{
 
            socket_state: ClientSocketState::Connected(socket.unwrap()),
 
            sync_state: ClientSyncState::AwaitingCmd,
 
            poll_ticket: None,
 
            inbox_main: vec![None, None],
 
            inbox_backup: Vec::new(),
 
            input_union_send_tag_value: -1,
 
            input_union_receive_tag_value: -1,
 
            input_union_finish_tag_value: -1,
 
            input_union_shutdown_tag_value: -1,
 
            pdl_input_port_id: input_port,
 
            pdl_output_port_id: output_port,
 
            exec_state: CompExecState::new(),
 
            control: ControlLayer::default(),
 
            consensus: Consensus::new(),
 
            byte_buffer: Vec::new(),
 
        }
 
    }
 

	
 
    pub(crate) fn new_with_existing_connection(socket: SocketTcpClient, input_port: PortId, output_port: PortId) -> Self {
 
        return Self{
 
            socket_state: ClientSocketState::Connected(socket),
 
            sync_state: ClientSyncState::AwaitingCmd,
 
            poll_ticket: None,
 
            inbox_main: vec![None, None],
 
            inbox_backup: Vec::new(),
 
            input_union_send_tag_value: -1,
 
            input_union_receive_tag_value: -1,
 
            input_union_finish_tag_value: -1,
 
            input_union_shutdown_tag_value: -1,
 
            pdl_input_port_id: input_port,
 
            pdl_output_port_id: output_port,
 
            exec_state: CompExecState::new(),
 
            control: ControlLayer::default(),
 
            consensus: Consensus::new(),
 
            byte_buffer: Vec::new(),
 
        }
 
    }
 

	
 
    // Handles incoming data from the PDL side (hence, going into the socket)
 
    fn handle_incoming_data_message(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx, message: DataMessage) {
 
        if self.exec_state.mode.is_in_sync_block() {
 
            self.consensus.handle_incoming_data_message(comp_ctx, &message);
 
        }
 

	
 
        match component::default_handle_incoming_data_message(
 
            &mut self.exec_state, &mut self.inbox_main, comp_ctx, message, sched_ctx, &mut self.control
 
        ) {
 
            IncomingData::PlacedInSlot => {},
 
            IncomingData::SlotFull(message) => {
 
                self.inbox_backup.push(message);
 
            }
 
        }
 
    }
 

	
 
    fn data_message_to_bytes(&self, message: DataMessage, bytes: &mut Vec<u8>) {
 
        debug_assert_eq!(message.data_header.target_port, self.pdl_input_port_id);
 
        debug_assert_eq!(message.content.values.len(), 1);
 

	
 
        if let Value::Array(array_pos) = message.content.values[0] {
 
            let region = &message.content.regions[array_pos as usize];
 
            bytes.reserve(region.len());
 
            for value in region {
 
                bytes.push(value.as_uint8());
 
            }
 
        } else {
 
            unreachable!();
 
        }
 
    }
 

	
 
    fn bytes_to_data_message_content(&self, buffer: &[u8]) -> ValueGroup {
 
        // Turn bytes into silly executor-style array
 
        let mut values = Vec::with_capacity(buffer.len());
 
        for byte in buffer.iter().copied() {
 
            values.push(Value::UInt8(byte));
 
        }
 

	
 
        // Put in a value group
 
        let mut value_group = ValueGroup::default();
 
        value_group.regions.push(values);
 
        value_group.values.push(Value::Array(0));
 

	
 
        return value_group;
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// ComponentTcpListener
 
// -----------------------------------------------------------------------------
 

	
 
enum ListenerSocketState {
 
    Connected(SocketTcpListener),
 
    Error,
 
}
 

	
 
impl ListenerSocketState {
 
    fn get_socket(&self) -> &SocketTcpListener {
 
        match self {
 
            ListenerSocketState::Connected(v) => return v,
 
            ListenerSocketState::Error => unreachable!(),
 
        }
 
    }
 
}
 

	
 
struct PendingComponent {
 
    client: SocketTcpClient,
 
    client: i32, // OS socket handle
 
    cmd_rx: PortId,
 
    data_tx: PortId,
 
}
 

	
 
enum ListenerSyncState {
 
    AwaitingCmd,
 
    AcceptCommandReceived, // just received `Accept` command
 
    AcceptChannelGenerated, // created channel, waiting to end the sync round
 
    AcceptGenerateComponent, // sync ended, back in non-sync, now generate component
 
    FinishSyncThenQuit,
 
}
 

	
 
pub struct ComponentTcpListener {
 
    // Properties for the tcp socket
 
    socket_state: ListenerSocketState,
 
    sync_state: ListenerSyncState,
 
    pending_component: Option<PendingComponent>,
 
    poll_ticket: Option<PollTicket>,
 
    inbox_main: InboxMain,
 
    inbox_backup: InboxBackup,
 
    pdl_input_port_id: PortId, // input port, receives commands
 
    pdl_output_port_id: PortId, // output port, sends connections
 
    // Type information extracted from protocol
 
    tcp_client_definition: (ProcedureDefinitionId, TypeId),
 
    input_union_accept_tag: i64,
 
    input_union_shutdown_tag: i64,
 
    output_struct_rx_index: usize,
 
    output_struct_tx_index: usize,
 
    // Generic component state
 
    exec_state: CompExecState,
 
    control: ControlLayer,
 
    consensus: Consensus,
 
}
 

	
 
impl Component for ComponentTcpListener {
 
    fn on_creation(&mut self, id: CompId, sched_ctx: &SchedulerCtx) {
 
        // Retrieve type information for the message with ports we're going to send
 
        let pd = &sched_ctx.runtime.protocol;
 

	
 
        self.tcp_client_definition = sched_ctx.runtime.protocol.find_procedure(b"std.internet", b"tcp_client")
 
            .expect("'tcp_client' component in the 'std.internet' module");
 

	
 
        let cmd_type = pd.find_type(b"std.internet", b"ListenerCmd")
 
            .expect("'ListenerCmd' type in the 'std.internet' module");
 
        let cmd_type = cmd_type.as_union();
 

	
 
        self.input_union_accept_tag = cmd_type.get_variant_tag_value(b"Accept").unwrap();
 
        self.input_union_shutdown_tag = cmd_type.get_variant_tag_value(b"Shutdown").unwrap();
 

	
 
        let conn_type = pd.find_type(b"std.internet", b"TcpConnection")
 
            .expect("'TcpConnection' type in the 'std.internet' module");
 
        let conn_type = conn_type.as_struct();
 

	
 
        assert_eq!(conn_type.get_num_struct_fields(), 2);
 
        self.output_struct_rx_index = conn_type.get_struct_field_index(b"rx").unwrap();
 
        self.output_struct_tx_index = conn_type.get_struct_field_index(b"tx").unwrap();
 

	
 
        // Register socket for async events
 
        if let ListenerSocketState::Connected(socket) = &self.socket_state {
 
            let self_handle = sched_ctx.runtime.get_component_public(id);
 
            let poll_ticket = sched_ctx.polling.register(socket, self_handle, true, false)
 
                .expect("registering tcp listener");
 

	
 
            debug_assert!(self.poll_ticket.is_none());
 
            self.poll_ticket = Some(poll_ticket);
 
        }
 
    }
 

	
 
    fn on_shutdown(&mut self, sched_ctx: &SchedulerCtx) {
 
        if let Some(poll_ticket) = self.poll_ticket.take() {
 
            sched_ctx.polling.unregister(poll_ticket);
 
        }
 
    }
 

	
 
    fn adopt_message(&mut self, _comp_ctx: &mut CompCtx, _message: DataMessage) {
 
        unreachable!();
 
    }
 

	
 
    fn handle_message(&mut self, sched_ctx: &mut SchedulerCtx, comp_ctx: &mut CompCtx, message: Message) {
 
        match message {
 
            Message::Data(message) => {
 
                self.handle_incoming_data_message(sched_ctx, comp_ctx, message);
 
            },
 
            Message::Sync(message) => {
 
                let decision = self.consensus.receive_sync_message(sched_ctx, comp_ctx, message);
 
                component::default_handle_sync_decision(sched_ctx, &mut self.exec_state, comp_ctx, decision, &mut self.consensus);
 
            },
 
            Message::Control(message) => {
 
                if let Err(location_and_message) = component::default_handle_control_message(
 
                    &mut self.exec_state, &mut self.control, &mut self.consensus,
 
                    message, sched_ctx, comp_ctx, &mut self.inbox_main, &mut self.inbox_backup
 
                ) {
 
                    component::default_handle_error_for_builtin(&mut self.exec_state, sched_ctx, location_and_message);
 
                }
 
            },
 
            Message::Poll => {
 
                sched_ctx.info("Received polling event");
 
            },
 
        }
 
    }
 

	
 
    fn run(&mut self, sched_ctx: &mut SchedulerCtx, comp_ctx: &mut CompCtx) -> CompScheduling {
 
        sched_ctx.info(&format!("Running component ComponentTcpListener (mode: {:?})", self.exec_state.mode));
 

	
 
        match self.exec_state.mode {
 
            CompMode::BlockedSelect
 
                => unreachable!(),
 
            CompMode::PutPortsBlockedTransferredPorts |
 
            CompMode::PutPortsBlockedAwaitingAcks |
 
            CompMode::PutPortsBlockedSendingPort |
 
            CompMode::NewComponentBlocked
 
                => return CompScheduling::Sleep,
 
            CompMode::NonSync => {
 
                match &self.socket_state {
 
                    ListenerSocketState::Connected(_socket) => {
 
                        match self.sync_state {
 
                            ListenerSyncState::AwaitingCmd => {
 
                                component::default_handle_sync_start(
 
                                    &mut self.exec_state, &mut self.inbox_main, sched_ctx, comp_ctx, &mut self.consensus
 
                                );
 
                            },
 
                            ListenerSyncState::AcceptCommandReceived |
 
                            ListenerSyncState::AcceptChannelGenerated => unreachable!(),
 
                            ListenerSyncState::AcceptGenerateComponent => {
 
                                // Now that we're outside the sync round, create the tcp client
 
                                // component
 
                                let pending = self.pending_component.take().unwrap();
 
                                let socket_component: Box<dyn Component> = Box::new(ComponentTcpClient::new_with_existing_connection(
 
                                    pending.client, pending.cmd_rx, pending.data_tx
 
                                ));
 
                                component::special_create_component(
 

	
 
                                let arguments = ValueGroup::new_stack(vec![
 
                                    Value::SInt32(pending.client),
 
                                    Value::Input(port_id_to_eval(pending.cmd_rx)),
 
                                    Value::Output(port_id_to_eval(pending.data_tx)),
 
                                ]);
 
                                component::default_start_create_component(
 
                                    &mut self.exec_state, sched_ctx, comp_ctx, &mut self.control,
 
                                    &mut self.inbox_main, &mut self.inbox_backup, socket_component,
 
                                    vec![pending.cmd_rx, pending.data_tx]
 
                                    &mut self.inbox_main, &mut self.inbox_backup,
 
                                    self.tcp_client_definition.0, self.tcp_client_definition.1,
 
                                    arguments
 
                                );
 
                                self.sync_state = ListenerSyncState::AwaitingCmd; // superfluous, see ListenerSyncState.take()
 
                                self.sync_state = ListenerSyncState::AwaitingCmd;
 
                            },
 
                            ListenerSyncState::FinishSyncThenQuit => {
 
                                self.exec_state.set_as_start_exit(ExitReason::Termination);
 
                            },
 
                        }
 

	
 
                        return CompScheduling::Immediate;
 
                    },
 
                    ListenerSocketState::Error => {
 
                        self.exec_state.set_as_start_exit(ExitReason::ErrorNonSync);
 
                        return CompScheduling::Immediate;
 
                    }
 
                }
 
            },
 
            CompMode::Sync => {
 
                match self.sync_state {
 
                    ListenerSyncState::AwaitingCmd => {
 
                        match component::default_attempt_get(
 
                            &mut self.exec_state, self.pdl_input_port_id, PortInstruction::NoSource,
 
                            &mut self.inbox_main, &mut self.inbox_backup, sched_ctx, comp_ctx,
 
                            &mut self.control, &mut self.consensus
 
                        ) {
 
                            GetResult::Received(message) => {
 
                                let (tag_value, _) = message.content.values[0].as_union();
 
                                if tag_value == self.input_union_accept_tag {
 
                                    self.sync_state = ListenerSyncState::AcceptCommandReceived;
 
                                } else if tag_value == self.input_union_shutdown_tag {
 
                                    self.sync_state = ListenerSyncState::FinishSyncThenQuit;
 
                                } else {
 
                                    unreachable!("got tag_value {}", tag_value);
 
                                }
 

	
 
                                return CompScheduling::Immediate;
 
                            },
 
                            GetResult::NoMessage => {
 
                                return CompScheduling::Sleep;
 
                            },
 
                            GetResult::Error(location_and_message) => {
 
                                component::default_handle_error_for_builtin(&mut self.exec_state, sched_ctx, location_and_message);
 
                                return CompScheduling::Immediate;
 
                            }
 
                        }
 
                    },
 
                    ListenerSyncState::AcceptCommandReceived => {
 
                        let socket = self.socket_state.get_socket();
 
                        match socket.accept() {
 
                            Ok(client) => {
 
                            Ok(client_handle) => {
 
                                // Create the channels (and the inbox entries, to stay consistent
 
                                // with the expectations from the `component` module's functions)
 
                                let client = client.unwrap();
 
                                let cmd_channel = comp_ctx.create_channel();
 
                                let data_channel = comp_ctx.create_channel();
 

	
 
                                let port_ids = [
 
                                    cmd_channel.putter_id, cmd_channel.getter_id,
 
                                    data_channel.putter_id, data_channel.getter_id,
 
                                ];
 
                                for port_id in port_ids {
 
                                    let expected_port_index = self.inbox_main.len();
 
                                    let port_handle = comp_ctx.get_port_handle(port_id);
 
                                    self.inbox_main.push(None);
 
                                    self.consensus.notify_of_new_port(expected_port_index, port_handle, comp_ctx);
 
                                }
 

	
 
                                // Construct the message containing the appropriate ports that will
 
                                // be sent to the component commanding this listener.
 
                                let mut values = ValueGroup::new_stack(Vec::with_capacity(1));
 
                                values.values.push(Value::Struct(0));
 
                                values.regions.push(vec![Value::Unassigned, Value::Unassigned]);
 
                                values.regions[0][self.output_struct_tx_index] = Value::Output(port_id_to_eval(cmd_channel.putter_id));
 
                                values.regions[0][self.output_struct_rx_index] = Value::Input(port_id_to_eval(data_channel.getter_id));
 
                                if let Err(location_and_message) = component::default_send_data_message(
 
                                    &mut self.exec_state, self.pdl_output_port_id, PortInstruction::NoSource, values,
 
                                    sched_ctx, &mut self.consensus, &mut self.control, comp_ctx
 
                                ) {
 
                                    component::default_handle_error_for_builtin(
 
                                        &mut self.exec_state, sched_ctx, location_and_message
 
                                    );
 
                                }
 

	
 
                                // Prepare for finishing the consensus round, and once finished,
 
                                // create the tcp client component
 
                                self.sync_state = ListenerSyncState::AcceptChannelGenerated;
 
                                debug_assert!(self.pending_component.is_none());
 
                                self.pending_component = Some(PendingComponent{
 
                                    client,
 
                                    client: client_handle,
 
                                    cmd_rx: cmd_channel.getter_id,
 
                                    data_tx: data_channel.putter_id
 
                                });
 

	
 
                                return CompScheduling::Requeue;
 
                            },
 
                            Err(err) => {
 
                                if err.kind() == IoErrorKind::WouldBlock {
 
                                    return CompScheduling::Sleep;
 
                                } else {
 
                                    todo!("handle listener.accept error {:?}", err)
 
                                }
 
                            }
 
                        }
 
                    },
 
                    ListenerSyncState::AcceptChannelGenerated => {
 
                        component::default_handle_sync_end(&mut self.exec_state, sched_ctx, comp_ctx, &mut self.consensus);
 
                        self.sync_state = ListenerSyncState::AcceptGenerateComponent;
 
                        return CompScheduling::Requeue;
 
                    }
 
                    ListenerSyncState::FinishSyncThenQuit => {
 
                        component::default_handle_sync_end(&mut self.exec_state, sched_ctx, comp_ctx, &mut self.consensus);
 
                        return CompScheduling::Requeue;
 
                    },
 
                    ListenerSyncState::AcceptGenerateComponent => unreachable!(),
 
                }
 
            },
 
            CompMode::BlockedGet => {
 
                return CompScheduling::Sleep;
 
            },
 
            CompMode::SyncEnd | CompMode::BlockedPut
 
                => return CompScheduling::Sleep,
 
            CompMode::StartExit =>
 
                return component::default_handle_start_exit(&mut self.exec_state, &mut self.control, sched_ctx, comp_ctx, &mut self.consensus),
 
            CompMode::BusyExit =>
 
                return component::default_handle_busy_exit(&mut self.exec_state, &mut self.control, sched_ctx),
 
            CompMode::Exit =>
 
                return component::default_handle_exit(&self.exec_state),
 
        }
 
    }
 
}
 

	
 
impl ComponentTcpListener {
 
    pub(crate) fn new(arguments: ValueGroup) -> Self {
 
        debug_assert_eq!(arguments.values.len(), 4);
 

	
 
        // Parsing arguments
 
        let (ip_address, port) = ip_addr_and_port_from_args(&arguments, 0, 1);
 
        let input_port = component::port_id_from_eval(arguments.values[2].as_input());
 
        let output_port = component::port_id_from_eval(arguments.values[3].as_output());
 

	
 
        let socket = SocketTcpListener::new(ip_address, port);
 
        if let Err(socket) = socket {
 
            todo!("friendly error reporting: failed to open socket (reason: {:?})", socket);
 
        }
 

	
 
        return Self {
 
            socket_state: ListenerSocketState::Connected(socket.unwrap()),
 
            sync_state: ListenerSyncState::AwaitingCmd,
 
            pending_component: None,
 
            poll_ticket: None,
 
            inbox_main: vec![None, None],
 
            inbox_backup: InboxBackup::new(),
 
            pdl_input_port_id: input_port,
 
            pdl_output_port_id: output_port,
 
            tcp_client_definition: (ProcedureDefinitionId::new_invalid(), TypeId::new_invalid()),
 
            input_union_accept_tag: -1,
 
            input_union_shutdown_tag: -1,
 
            output_struct_tx_index: 0,
 
            output_struct_rx_index: 0,
 
            exec_state: CompExecState::new(),
 
            control: ControlLayer::default(),
 
            consensus: Consensus::new(),
 
        }
 
    }
 

	
 
    fn handle_incoming_data_message(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx, message: DataMessage) {
 
        if self.exec_state.mode.is_in_sync_block() {
 
            self.consensus.handle_incoming_data_message(comp_ctx, &message);
 
        }
 

	
 
        match component::default_handle_incoming_data_message(
 
            &mut self.exec_state, &mut self.inbox_main, comp_ctx, message, sched_ctx, &mut self.control
 
        ) {
 
            IncomingData::PlacedInSlot => {},
 
            IncomingData::SlotFull(message) => {
 
                self.inbox_backup.push(message);
 
            }
 
        }
 
    }
 
}
 

	
 
fn ip_addr_and_port_from_args(
 
    arguments: &ValueGroup, ip_index: usize, port_index: usize
 
) -> (IpAddr, u16) {
 
    debug_assert!(ip_index < arguments.values.len());
 
    debug_assert!(port_index < arguments.values.len());
 

	
 
    // Parsing IP address
 
    let ip_heap_pos = arguments.values[0].as_array();
 
    let ip_elements = &arguments.regions[ip_heap_pos as usize];
 

	
 
    let ip_address = match ip_elements.len() {
 
        0 => IpAddr::V4(Ipv4Addr::UNSPECIFIED),
 
        4 => IpAddr::V4(Ipv4Addr::new(
 
            ip_elements[0].as_uint8(), ip_elements[1].as_uint8(),
 
            ip_elements[2].as_uint8(), ip_elements[3].as_uint8()
 
        )),
 
        _ => todo!("friendly error reporting: ip should contain 4 octets (or 0 for unspecified)")
 
    };
 

	
 
    let port = arguments.values[port_index].as_uint16();
 

	
 
    return (ip_address, port);
 
}
 

	
src/runtime2/poll/mod.rs
Show inline comments
 
@@ -71,275 +71,275 @@ impl Poller {
 
        const MAX_TIMEOUT: u128 = c_int::MAX as u128;
 

	
 
        let timeout_millis = timeout.as_millis();
 
        let timeout_millis = if timeout_millis > MAX_TIMEOUT {
 
            -1 // effectively infinite
 
        } else {
 
            timeout_millis as c_int
 
        };
 

	
 
        debug_assert!(events.is_empty());
 
        debug_assert!(events.capacity() > 0 && events.capacity() < i32::MAX as usize);
 
        let num_events = syscall_result(unsafe{
 
            libc::epoll_wait(self.handle, events.as_mut_ptr(), events.capacity() as i32, timeout_millis)
 
        })?;
 

	
 
        unsafe{
 
            debug_assert!(num_events >= 0);
 
            events.set_len(num_events as usize);
 
        }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn events_from_rw_flags(read: bool, write: bool) -> u32 {
 
        let mut events = libc::EPOLLET;
 
        if read {
 
            events |= libc::EPOLLIN | libc::EPOLLRDHUP;
 
        }
 
        if write {
 
            events |= libc::EPOLLOUT;
 
        }
 

	
 
        return events as u32;
 
    }
 
}
 

	
 
#[cfg(unix)]
 
impl Drop for Poller {
 
    fn drop(&mut self) {
 
        unsafe{ libc::close(self.handle); }
 
    }
 
}
 

	
 
#[inline]
 
fn syscall_result(result: c_int) -> io::Result<c_int> {
 
    if result < 0 {
 
        return Err(io::Error::last_os_error());
 
    } else {
 
        return Ok(result);
 
    }
 
}
 

	
 
#[cfg(not(unix))]
 
struct Poller {
 
    // Not implemented for OS's other than unix
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Polling Thread
 
// -----------------------------------------------------------------------------
 

	
 
enum PollCmd {
 
    Register(CompHandle, UserData),
 
    Unregister(FileDescriptor, UserData),
 
    Shutdown,
 
}
 

	
 
pub struct PollingThread {
 
    poller: Arc<Poller>,
 
    runtime: Arc<RuntimeInner>,
 
    queue: QueueDynMpsc<PollCmd>,
 
    log_level: LogLevel,
 
}
 

	
 
impl PollingThread {
 
    pub(crate) fn new(runtime: Arc<RuntimeInner>, log_level: LogLevel) -> Result<(PollingThreadHandle, PollingClientFactory), RtError> {
 
        let poller = Poller::new()
 
            .map_err(|e| rt_error!("failed to create poller, because: {}", e))?;
 
        let poller = Arc::new(poller);
 
        let queue = QueueDynMpsc::new(64);
 
        let queue_producers = queue.producer_factory();
 

	
 
        let mut thread_data = PollingThread{
 
            poller: poller.clone(),
 
            runtime: runtime.clone(),
 
            queue,
 
            log_level,
 
        };
 
        let thread_handle = thread::Builder::new()
 
            .name(String::from("poller"))
 
            .spawn(move || { thread_data.run() })
 
            .map_err(|reason|
 
                rt_error!("failed to start polling thread, because: {}", reason)
 
            )?;
 

	
 
        let thread_handle = PollingThreadHandle{
 
            queue: Some(queue_producers.producer()),
 
            handle: Some(thread_handle),
 
        };
 
        let client_factory = PollingClientFactory{
 
            poller,
 
            generation_counter: Arc::new(AtomicU32::new(0)),
 
            queue_factory: queue_producers,
 
        };
 

	
 
        return Ok((thread_handle, client_factory));
 
    }
 

	
 
    pub(crate) fn run(&mut self) {
 
        use std::io::ErrorKind;
 
        use crate::runtime2::communication::Message;
 

	
 
        const NUM_EVENTS: usize = 256;
 
        const EPOLL_DURATION: time::Duration = time::Duration::from_millis(250);
 

	
 
        // @performance: Lot of improvements possible here, a HashMap is likely
 
        // a horrible way to do this.
 
        let mut events = Vec::with_capacity(NUM_EVENTS);
 
        let mut lookup = HashMap::with_capacity(64);
 
        self.log("Starting polling thread");
 

	
 
        loop {
 
            // Retrieve events first (because the PollingClient will first
 
            // register at epoll, and then push a command into the queue).
 
            loop {
 
                let wait_result = self.poller.wait(&mut events, EPOLL_DURATION);
 
                match wait_result {
 
                    Ok(()) => break,
 
                    Err(reason) => {
 
                        match reason.kind() {
 
                            ErrorKind::Interrupted => {
 
                                // Happens when we're debugging and set a break-
 
                                // point, we want to continue waiting
 
                            },
 
                            _ => {
 
                                panic!("failed to poll: {}", reason);
 
                            }
 
                        }
 
                    }
 
                }
 
            }
 

	
 
            // Then handle everything in the command queue.
 
            while let Some(command) = self.queue.pop() {
 
                match command {
 
                    PollCmd::Register(handle, user_data) => {
 
                        self.log(&format!("Registering component {:?} as {}", handle.id(), user_data.0));
 
                        let key = Self::user_data_as_key(user_data);
 
                        debug_assert!(!lookup.contains_key(&key));
 
                        lookup.insert(key, handle);
 
                    },
 
                    PollCmd::Unregister(_file_descriptor, user_data) => {
 
                        let key = Self::user_data_as_key(user_data);
 
                        debug_assert!(lookup.contains_key(&key));
 
                        let mut handle = lookup.remove(&key).unwrap();
 
                        self.log(&format!("Unregistering component {:?} as {}", handle.id(), user_data.0));
 
                        if let Some(key) = handle.decrement_users() {
 
                            self.runtime.destroy_component(key);
 
                        }
 
                    },
 
                    PollCmd::Shutdown => {
 
                        // The contract is that all scheduler threads shutdown
 
                        // before the polling thread. This happens when all
 
                        // components are removed.
 
                        self.log("Received shutdown signal");
 
                        debug_assert!(lookup.is_empty());
 
                        return;
 
                    }
 
                }
 
            }
 

	
 
            // Now process all of the events. Because we might have had a
 
            // `Register` command followed by an `Unregister` command (e.g. a
 
            // component has died), we might get events that are not associated
 
            // with an entry in the lookup.
 
            for event in events.drain(..) {
 
                let key = event.u64;
 
                if let Some(handle) = lookup.get(&key) {
 
                    let events = event.events;
 
                    self.log(&format!("Sending poll to {:?} (event: {:x})", handle.id(), events));
 
                    handle.send_message(&self.runtime, Message::Poll, true);
 
                }
 
            }
 
        }
 
    }
 

	
 
    #[inline]
 
    fn user_data_as_key(data: UserData) -> u64 {
 
        return data.0;
 
    }
 

	
 
    fn log(&self, message: &str) {
 
        if self.log_level >= LogLevel::Info {
 
        if LogLevel::Info >= self.log_level {
 
            println!("[polling] {}", message);
 
        }
 
    }
 
}
 

	
 
// bit convoluted, but it works
 
pub(crate) struct PollingThreadHandle {
 
    // requires Option, because:
 
    queue: Option<QueueDynProducer<PollCmd>>, // destructor needs to be called
 
    handle: Option<thread::JoinHandle<()>>, // we need to call `join`
 
}
 

	
 
impl PollingThreadHandle {
 
    pub(crate) fn shutdown(&mut self) -> thread::Result<()> {
 
        debug_assert!(self.handle.is_some(), "polling thread already destroyed");
 
        self.queue.take().unwrap().push(PollCmd::Shutdown);
 
        return self.handle.take().unwrap().join();
 
    }
 
}
 

	
 
impl Drop for PollingThreadHandle {
 
    fn drop(&mut self) {
 
        debug_assert!(self.queue.is_none() && self.handle.is_none());
 
    }
 
}
 

	
 
// oh my god, now I'm writing factory objects. I'm not feeling too well
 
pub(crate) struct PollingClientFactory {
 
    poller: Arc<Poller>,
 
    generation_counter: Arc<AtomicU32>,
 
    queue_factory: QueueDynProducerFactory<PollCmd>,
 
}
 

	
 
impl PollingClientFactory {
 
    pub(crate) fn client(&self) -> PollingClient {
 
        return PollingClient{
 
            poller: self.poller.clone(),
 
            generation_counter: self.generation_counter.clone(),
 
            queue: self.queue_factory.producer(),
 
        };
 
    }
 
}
 

	
 
pub(crate) struct PollTicket(FileDescriptor, u64);
 

	
 
/// A structure that allows the owner to register components at the polling
 
/// thread. Because of assumptions in the communication queue all of these
 
/// clients should be dropped before stopping the polling thread.
 
pub(crate) struct PollingClient {
 
    poller: Arc<Poller>,
 
    generation_counter: Arc<AtomicU32>,
 
    queue: QueueDynProducer<PollCmd>,
 
}
 

	
 
impl PollingClient {
 
    pub(crate) fn register<F: AsFileDescriptor>(&self, entity: &F, handle: CompHandle, read: bool, write: bool) -> Result<PollTicket, RtError> {
 
        let generation = self.generation_counter.fetch_add(1, Ordering::Relaxed);
 
        let user_data = user_data_for_component(handle.id().0, generation);
 
        self.queue.push(PollCmd::Register(handle, user_data));
 

	
 
        let file_descriptor = entity.as_file_descriptor();
 
        self.poller.register(file_descriptor, user_data, read, write)
 
            .map_err(|e| rt_error!("failed to register for polling, because: {}", e))?;
 

	
 
        return Ok(PollTicket(file_descriptor, user_data.0));
 
    }
 

	
 
    pub(crate) fn unregister(&self, ticket: PollTicket) -> Result<(), RtError> {
 
        let file_descriptor = ticket.0;
 
        let user_data = UserData(ticket.1);
 
        self.queue.push(PollCmd::Unregister(file_descriptor, user_data));
 
        self.poller.unregister(file_descriptor)
 
            .map_err(|e| rt_error!("failed to unregister polling, because: {}", e))?;
 

	
 
        return Ok(());
 
    }
 
}
 

	
 
#[inline]
 
fn user_data_for_component(component_id: u32, generation: u32) -> UserData {
 
    return UserData((generation as u64) << 32 | (component_id as u64));
 
}
 
\ No newline at end of file
src/runtime2/stdlib/internet.rs
Show inline comments
 
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
 
use std::mem::size_of;
 
use std::io::Error as IoError;
 

	
 
use libc::{
 
    c_int,
 
    sockaddr_in, sockaddr_in6, in_addr, in6_addr,
 
    socket, bind, listen, accept, connect, close,
 
};
 

	
 
use crate::runtime2::poll::{AsFileDescriptor, FileDescriptor};
 

	
 
#[derive(Debug)]
 
pub enum SocketError {
 
    Opening,
 
    Modifying,
 
    Binding,
 
    Listening,
 
    Connecting,
 
    Accepted,
 
    Accepting,
 
}
 

	
 
enum SocketState {
 
    Opened,
 
    Listening,
 
}
 

	
 
const SOCKET_BLOCKING: bool = false;
 

	
 
/// TCP (client) connection
 
pub struct SocketTcpClient {
 
    socket_handle: libc::c_int,
 
    is_blocking: bool,
 
}
 

	
 
impl SocketTcpClient {
 
    pub fn new(ip: IpAddr, port: u16) -> Result<Self, SocketError> {
 

	
 
        let socket_handle = create_and_connect_socket(
 
            libc::SOCK_STREAM, libc::IPPROTO_TCP, ip, port
 
        )?;
 
        if !set_socket_blocking(socket_handle, SOCKET_BLOCKING) {
 
            unsafe{ libc::close(socket_handle); }
 
            return Err(SocketError::Modifying);
 
        }
 

	
 
        println!(" CREATE  [{:04}] client", socket_handle);
 
        return Ok(SocketTcpClient{
 
            socket_handle,
 
            is_blocking: SOCKET_BLOCKING,
 
        })
 
    }
 

	
 
    fn new_from_handle(socket_handle: libc::c_int) -> Result<Self, SocketError> {
 
    pub(crate) fn new_from_handle(socket_handle: libc::c_int) -> Result<Self, SocketError> {
 
        if !set_socket_blocking(socket_handle, SOCKET_BLOCKING) {
 
            unsafe{ libc::close(socket_handle); }
 
            return Err(SocketError::Modifying);
 
        }
 

	
 
        return Ok(SocketTcpClient{
 
            socket_handle,
 
            is_blocking: SOCKET_BLOCKING,
 
        })
 
    }
 

	
 
    pub fn send(&self, message: &[u8]) -> Result<usize, IoError> {
 
        let result = unsafe{
 
            let message_pointer = message.as_ptr().cast();
 
            libc::send(self.socket_handle, message_pointer, message.len() as libc::size_t, 0)
 
        };
 
        if result < 0 {
 
            return Err(IoError::last_os_error());
 
        }
 

	
 
        return Ok(result as usize);
 
    }
 

	
 
    /// Receives data from the TCP socket. Returns the number of bytes received.
 
    /// More bytes may be present even thought `used < buffer.len()`.
 
    pub fn receive(&self, buffer: &mut [u8]) -> Result<usize, IoError> {
 
        let result = unsafe {
 
            let message_pointer = buffer.as_mut_ptr().cast();
 
            libc::recv(self.socket_handle, message_pointer, buffer.len(), 0)
 
        };
 
        if result < 0 {
 
            return Err(IoError::last_os_error());
 
        }
 

	
 
        return Ok(result as usize);
 
    }
 
}
 

	
 
impl Drop for SocketTcpClient {
 
    fn drop(&mut self) {
 
        println!("DESTRUCT [{:04}] client", self.socket_handle);
 
        debug_assert!(self.socket_handle >= 0);
 
        unsafe{ close(self.socket_handle) };
 
    }
 
}
 

	
 
impl AsFileDescriptor for SocketTcpClient {
 
    fn as_file_descriptor(&self) -> FileDescriptor {
 
        return self.socket_handle;
 
    }
 
}
 

	
 
/// TCP listener. Yielding new connections
 
pub struct SocketTcpListener {
 
    socket_handle: libc::c_int,
 
    is_blocking: bool,
 
}
 

	
 
impl SocketTcpListener {
 
    pub fn new(ip: IpAddr, port: u16) -> Result<Self, SocketError> {
 
        // Create and bind
 
        let socket_handle = create_and_bind_socket(
 
            libc::SOCK_STREAM, libc::IPPROTO_TCP, ip, port
 
        )?;
 
        if !set_socket_blocking(socket_handle, SOCKET_BLOCKING) {
 
            unsafe{ libc::close(socket_handle); }
 
            return Err(SocketError::Modifying);
 
        }
 

	
 
        // Listen
 
        unsafe {
 
            let result = listen(socket_handle, libc::SOMAXCONN);
 
            if result < 0 {
 
                unsafe{ libc::close(socket_handle); }
 
                return Err(SocketError::Listening);
 
            }
 
        }
 

	
 

	
 
        println!(" CREATE  [{:04}] listener", socket_handle);
 
        return Ok(SocketTcpListener{
 
            socket_handle,
 
            is_blocking: SOCKET_BLOCKING,
 
        });
 
    }
 

	
 
    pub fn accept(&self) -> Result<Result<SocketTcpClient, SocketError>, IoError> {
 
    pub fn accept(&self) -> Result<libc::c_int, IoError> {
 
        let (mut address, mut address_size) = create_sockaddr_in_empty();
 
        let address_pointer = &mut address as *mut sockaddr_in;
 
        let socket_handle = unsafe { accept(self.socket_handle, address_pointer.cast(), &mut address_size) };
 
        if socket_handle < 0 {
 
            return Err(IoError::last_os_error());
 
        }
 

	
 
        return Ok(SocketTcpClient::new_from_handle(socket_handle));
 
        println!(" CREATE  [{:04}] client (from listener)", socket_handle);
 
        return Ok(socket_handle);
 
    }
 
}
 

	
 
impl Drop for SocketTcpListener {
 
    fn drop(&mut self) {
 
        println!("DESTRUCT [{:04}] listener", self.socket_handle);
 
        debug_assert!(self.socket_handle >= 0);
 
        unsafe{ close(self.socket_handle) };
 
    }
 
}
 

	
 
impl AsFileDescriptor for SocketTcpListener {
 
    fn as_file_descriptor(&self) -> FileDescriptor {
 
        return self.socket_handle;
 
    }
 
}
 

	
 
/// Raw socket receiver. Essentially a listener that accepts a single connection
 
struct SocketRawRx {
 
    listen_handle: c_int,
 
    accepted_handle: c_int,
 
}
 

	
 
impl SocketRawRx {
 
    pub fn new(ip: Option<Ipv4Addr>, port: u16) -> Result<Self, SocketError> {
 
        let ip = ip.unwrap_or(Ipv4Addr::UNSPECIFIED); // unspecified is the same as INADDR_ANY
 
        let address = unsafe{ in_addr{
 
            s_addr: std::mem::transmute(ip.octets()),
 
        }};
 
        let socket_address = sockaddr_in{
 
            sin_family: libc::AF_INET as libc::sa_family_t,
 
            sin_port: htons(port),
 
            sin_addr: address,
 
            sin_zero: [0; 8],
 
        };
 

	
 
        unsafe {
 
            let socket_handle = create_and_bind_socket(libc::SOCK_RAW, 0, IpAddr::V4(ip), port)?;
 

	
 
            let result = listen(socket_handle, 3);
 
            if result < 0 { return Err(SocketError::Listening); }
 

	
 
            return Ok(SocketRawRx{
 
                listen_handle: socket_handle,
 
                accepted_handle: -1,
 
            });
 
        }
 
    }
 

	
 
    // pub fn try_accept(&mut self, timeout_ms: u32) -> Result<(), SocketError> {
 
    //     if self.accepted_handle >= 0 {
 
    //         // Already accepted a connection
 
    //         return Err(SocketError::Accepted);
 
    //     }
 
    //
 
    //     let mut socket_address = sockaddr_in{
 
    //         sin_family: 0,
 
    //         sin_port: 0,
 
    //         sin_addr: in_addr{ s_addr: 0 },
 
    //         sin_zero: [0; 8]
 
    //     };
 
    //     let mut size = size_of::<sockaddr_in>() as u32;
 
    //     unsafe {
 
    //         let result = accept(self.listen_handle, &mut socket_address as *mut _, &mut size as *mut _);
 
    //         if result < 0 {
 
    //             return Err(SocketError::Accepting);
 
    //         }
 
    //     }
 
    //
 
    //     return Ok(());
 
    // }
 
}
 

	
 
impl Drop for SocketRawRx {
 
    fn drop(&mut self) {
 
        if self.accepted_handle >= 0 {
 
            unsafe {
 
                close(self.accepted_handle);
 
            }
 
        }
 

	
 
        if self.listen_handle >= 0 {
 
            unsafe {
 
                close(self.listen_handle);
 
            }
 
        }
 
    }
 
}
 

	
 
// The following is essentially stolen from `mio`'s io_source.rs file.
 
#[cfg(unix)]
 
trait AsRawFileDescriptor {
 
    fn as_raw_file_descriptor(&self) -> c_int;
 
}
 

	
 
impl AsRawFileDescriptor for SocketTcpClient {
 
    fn as_raw_file_descriptor(&self) -> c_int {
 
        return self.socket_handle;
 
    }
 
}
 

	
 
/// Performs the `socket` and `bind` calls.
 
fn create_and_bind_socket(socket_type: libc::c_int, protocol: libc::c_int, ip: IpAddr, port: u16) -> Result<libc::c_int, SocketError> {
 
    let family = socket_family_from_ip(ip);
 

	
 
    unsafe {
 
        let socket_handle = socket(family, socket_type, protocol);
 
        if socket_handle < 0 {
 
            return Err(SocketError::Opening);
 
        }
 

	
 
        let result = match ip {
 
            IpAddr::V4(ip) => {
 
                let (socket_address, address_size) = create_sockaddr_in_v4(ip, port);
 
                let socket_pointer = &socket_address as *const sockaddr_in;
 
                bind(socket_handle, socket_pointer.cast(), address_size)
 
            },
 
            IpAddr::V6(ip) => {
 
                let (socket_address, address_size) = create_sockaddr_in_v6(ip, port);
 
                let socket_pointer= &socket_address as *const sockaddr_in6;
 
                bind(socket_handle, socket_pointer.cast(), address_size)
 
            }
 
        };
 
        if result < 0 {
 
            close(socket_handle);
 
            return Err(SocketError::Binding);
 
        }
 

	
 
        return Ok(socket_handle);
 
    }
 
}
 

	
 
/// Performs the `socket` and `connect` calls
 
fn create_and_connect_socket(socket_type: libc::c_int, protocol: libc::c_int, ip: IpAddr, port: u16) -> Result<libc::c_int, SocketError> {
 
    let family = socket_family_from_ip(ip);
 
    unsafe {
 
        let socket_handle = socket(family, socket_type, protocol);
 
        if socket_handle < 0 {
 
            return Err(SocketError::Opening);
 
        }
 

	
 
        let result = match ip {
 
            IpAddr::V4(ip) => {
 
                let (socket_address, address_size) = create_sockaddr_in_v4(ip, port);
 
                let socket_pointer = &socket_address as *const sockaddr_in;
 
                connect(socket_handle, socket_pointer.cast(), address_size)
 
            },
 
            IpAddr::V6(ip) => {
 
                let (socket_address, address_size) = create_sockaddr_in_v6(ip, port);
 
                let socket_pointer= &socket_address as *const sockaddr_in6;
 
                connect(socket_handle, socket_pointer.cast(), address_size)
 
            }
 
        };
 
        if result < 0 {
 
            close(socket_handle);
 
            return Err(SocketError::Connecting);
 
        }
 

	
 
        return Ok(socket_handle);
 
    }
 
}
 

	
 
#[inline]
 
fn create_sockaddr_in_empty() -> (sockaddr_in, libc::socklen_t) {
 
    let socket_address = sockaddr_in{
 
        sin_family: 0,
 
        sin_port: 0,
 
        sin_addr: in_addr { s_addr: 0 },
 
        sin_zero: [0; 8],
 
    };
 
    let address_size = size_of::<sockaddr_in>();
 

	
 
    return (socket_address, address_size as _);
 
}
 
#[inline]
 
fn create_sockaddr_in_v4(ip: Ipv4Addr, port: u16) -> (sockaddr_in, libc::socklen_t) {
 
    let address = unsafe{
 
        in_addr{
 
            s_addr: std::mem::transmute(ip.octets())
 
        }
 
    };
 

	
 
    let socket_address = sockaddr_in{
 
        sin_family: libc::AF_INET as libc::sa_family_t,
 
        sin_port: htons(port),
 
        sin_addr: address,
 
        sin_zero: [0; 8]
 
    };
 
    let address_size = size_of::<sockaddr_in>();
 

	
 
    return (socket_address, address_size as _);
 
}
 

	
 
#[inline]
 
fn create_sockaddr_in_v6(ip: Ipv6Addr, port: u16) -> (sockaddr_in6, libc::socklen_t) {
 
    // flow label is advised to be, according to RFC6437 a (somewhat
 
    // secure) random number taken from a uniform distribution
 
    let flow_info = rand::random();
src/runtime2/tests/internet.rs
Show inline comments
 
use super::*;
 

	
 
// silly test to make sure that the PDL will never be an issue when doing TCP
 
// stuff with the actual components
 
#[test]
 
fn test_stdlib_file() {
 
    compile_and_create_component("
 
    import std.internet as inet;
 

	
 
    comp fake_listener_once(out<inet::TcpConnection> tx) {
 
        channel cmd_tx -> cmd_rx;
 
        channel data_tx -> data_rx;
 
        new fake_socket(cmd_rx, data_tx);
 
        sync put(tx, inet::TcpConnection{
 
            tx: cmd_tx,
 
            rx: data_rx,
 
        });
 
    }
 

	
 
    comp fake_socket(in<inet::ClientCmd> cmds, out<u8[]> tx) {
 
        auto to_send = {};
 

	
 
        auto shutdown = false;
 
        while (!shutdown) {
 
            auto keep_going = true;
 
            sync {
 
                while (keep_going) {
 
                    auto cmd = get(cmds);
 
                    if (let inet::ClientCmd::Send(data) = cmd) {
 
                        to_send = data;
 
                        keep_going = false;
 
                    } else if (let inet::ClientCmd::Receive = cmd) {
 
                        put(tx, to_send);
 
                    } else if (let inet::ClientCmd::Finish = cmd) {
 
                        keep_going = false;
 
                    } else if (let inet::ClientCmd::Shutdown = cmd) {
 
                        keep_going = false;
 
                        shutdown = true;
 
                    }
 
                }
 
            }
 
        }
 
    }
 

	
 
    comp fake_client(inet::TcpConnection conn) {
 
        sync put(conn.tx, inet::ClientCmd::Send({1, 3, 3, 7}));
 
        sync {
 
            put(conn.tx, inet::ClientCmd::Receive);
 
            auto val = get(conn.rx);
 
            while (val[0] != 1 || val[1] != 3 || val[2] != 3 || val[3] != 7) {
 
                print(\"this is going very wrong\");
 
            }
 
            put(conn.tx, inet::ClientCmd::Finish);
 
        }
 
        sync put(conn.tx, inet::ClientCmd::Shutdown);
 
    }
 

	
 
    comp constructor() {
 
        channel conn_tx -> conn_rx;
 
        new fake_listener_once(conn_tx);
 

	
 
        // Same crap as before:
 
        channel cmd_tx -> unused_cmd_rx;
 
        channel unused_data_tx -> data_rx;
 
        auto connection = inet::TcpConnection{ tx: cmd_tx, rx: data_rx };
 

	
 
        sync {
 
            connection = get(conn_rx);
 
        }
 

	
 
        new fake_client(connection);
 
    }
 
    ", "constructor", no_args());
 
}
 

	
 
#[test]
 
fn test_tcp_listener_and_client() {
 
    compile_and_create_component("
 
    import std.internet::*;
 

	
 
    func listen_port() -> u16 {
 
        return 2393;
 
        return 2392;
 
    }
 

	
 
    comp server(u32 num_connections) {
 
    comp server(u32 num_connections, in<()> shutdown) {
 
        // Start tcp listener
 
        channel listen_cmd_tx -> listen_cmd_rx;
 
        channel listen_conn_tx -> listen_conn_rx;
 
        new tcp_listener({}, listen_port(), listen_cmd_rx, listen_conn_tx);
 

	
 
        // Fake channels such that we can create a dummy connection variable
 
        channel client_cmd_tx -> unused_client_cmd_rx;
 
        channel unused_client_data_tx -> client_data_rx;
 
        auto new_connection = TcpConnection{
 
            tx: client_cmd_tx,
 
            rx: client_data_rx,
 
        };
 

	
 
        auto connection_counter = 0;
 
        while (connection_counter < num_connections) {
 
            // Wait until we get a connection
 
            print(\"server: waiting for an accepted connection\");
 
            sync {
 
                put(listen_cmd_tx, ListenerCmd::Accept);
 
                new_connection = get(listen_conn_rx);
 
            }
 

	
 
            // We have a new connection, spawn an 'echoer' for it
 
            print(\"server: spawning an echo'ing component\");
 
            new echo_machine(new_connection);
 
            connection_counter += 1;
 
        }
 

	
 
        // Shut down the listener
 
        print(\"server: shutting down listener\");
 
        sync auto v = get(shutdown);
 
        sync put(listen_cmd_tx, ListenerCmd::Shutdown);
 
    }
 

	
 
    // Waits for a single TCP byte (to simplify potentially having to
 
    // concatenate requests) and echos it
 
    comp echo_machine(TcpConnection conn) {
 
        auto data_to_echo = {};
 

	
 
        // Wait for a message
 
        sync {
 
            print(\"echo: receiving data\");
 
            put(conn.tx, ClientCmd::Receive);
 
            data_to_echo = get(conn.rx);
 
            put(conn.tx, ClientCmd::Finish);
 
        }
 

	
 
        // Echo the message
 
        print(\"echo: sending back data\");
 
        sync put(conn.tx, ClientCmd::Send(data_to_echo));
 

	
 
        // Ask the tcp connection to shut down
 
        print(\"echo: shutting down\");
 
        sync put(conn.tx, ClientCmd::Shutdown);
 
    }
 

	
 
    comp echo_requester(u8 byte_to_send) {
 
    comp echo_requester(u8 byte_to_send, out<()> done) {
 
        channel cmd_tx -> cmd_rx;
 
        channel data_tx -> data_rx;
 
        new tcp_client({127, 0, 0, 1}, listen_port(), cmd_rx, data_tx);
 

	
 
        // Send the message
 
        print(\"requester: sending bytes\");
 
        sync put(cmd_tx, ClientCmd::Send({ byte_to_send }));
 

	
 
        // Receive the echo'd byte
 
        auto received_byte = byte_to_send + 1;
 
        sync {
 
            print(\"requester: receiving echo response\");
 
            put(cmd_tx, ClientCmd::Receive);
 
            received_byte = get(data_rx)[0];
 
            put(cmd_tx, ClientCmd::Finish);
 
        }
 

	
 
        // Silly check, as always
 
        while (byte_to_send != received_byte) {
 
            print(\"requester: Oh no! The echo is an otherworldly distorter\");
 
        }
 

	
 
        // Shut down the TCP connection
 
        print(\"requester: shutting down TCP component\");
 
        sync put(cmd_tx, ClientCmd::Shutdown);
 
        sync put(done, ());
 
    }
 

	
 
    comp constructor() {
 
        auto num_connections = 1;
 
        new server(num_connections);
 
        channel shutdown_listener_tx -> shutdown_listener_rx;
 
        new server(num_connections, shutdown_listener_rx);
 

	
 
        auto connection_index = 0;
 
        auto all_done = {};
 
        while (connection_index < num_connections) {
 
            new echo_requester(cast(connection_index));
 
            channel done_tx -> done_rx;
 
            new echo_requester(cast(connection_index), done_tx);
 
            connection_index += 1;
 
            all_done @= {done_rx};
 
        }
 

	
 
        auto counter = 0;
 
        while (counter < length(all_done)) {
 
            print(\"constructor: waiting for requester to exit\");
 
            sync auto v = get(all_done[counter]);
 
            counter += 1;
 
        }
 

	
 
        print(\"constructor: instructing listener to exit\");
 
        sync put(shutdown_listener_tx, ());
 
    }
 
    ", "constructor", no_args());
 
}
 
\ No newline at end of file
src/runtime2/tests/mod.rs
Show inline comments
 
use crate::protocol::*;
 
use crate::protocol::eval::*;
 
use crate::runtime2::runtime::*;
 
use crate::runtime2::component::{CompCtx, CompPDL};
 

	
 
mod messaging;
 
mod error_handling;
 
mod transfer_ports;
 
mod internet;
 

	
 
const LOG_LEVEL: LogLevel = LogLevel::Debug;
 
const NUM_THREADS: u32 = 1;
 

	
 
pub(crate) fn compile_and_create_component(source: &str, routine_name: &str, args: ValueGroup) {
 
    let protocol = ProtocolDescription::parse(source.as_bytes())
 
        .expect("successful compilation");
 
    let runtime = Runtime::new(NUM_THREADS, LOG_LEVEL, protocol)
 
    let runtime = Runtime::new(NUM_THREADS, LogLevel::None, protocol)
 
        .expect("successful runtime startup");
 
    create_component(&runtime, "", routine_name, args);
 
}
 

	
 
pub(crate) fn create_component(rt: &Runtime, module_name: &str, routine_name: &str, args: ValueGroup) {
 
    let prompt = rt.inner.protocol.new_component(
 
        module_name.as_bytes(), routine_name.as_bytes(), args
 
    ).expect("create prompt");
 
    let reserved = rt.inner.start_create_component();
 
    let ctx = CompCtx::new(&reserved);
 
    let component = Box::new(CompPDL::new(prompt, 0));
 
    let (key, _) = rt.inner.finish_create_component(reserved, component, ctx, false);
 
    rt.inner.enqueue_work(key);
 
}
 

	
 
pub(crate) fn no_args() -> ValueGroup { ValueGroup::new_stack(Vec::new()) }
 

	
 
#[test]
 
fn test_component_creation() {
 
    let pd = ProtocolDescription::parse(b"
 
    comp nothing_at_all() {
 
        s32 a = 5;
 
        auto b = 5 + a;
 
    }
 
    ").expect("compilation");
 
    let rt = Runtime::new(1, LOG_LEVEL, pd).unwrap();
 

	
 
    for _i in 0..20 {
 
        create_component(&rt, "", "nothing_at_all", no_args());
 
    }
 
}
 

	
 
#[test]
 
fn test_simple_select() {
 
    let pd = ProtocolDescription::parse(b"
 
    func infinite_assert<T>(T val, T expected) -> () {
 
        while (val != expected) { print(\"nope!\"); }
 
        return ();
 
    }
 

	
 
    comp receiver(in<u32> in_a, in<u32> in_b, u32 num_sends) {
 
        auto num_from_a = 0;
 
        auto num_from_b = 0;
 
        while (num_from_a + num_from_b < 2 * num_sends) {
 
            sync select {
 
                auto v = get(in_a) -> {
 
                    print(\"got something from A\");
 
                    auto _ = infinite_assert(v, num_from_a);
 
                    num_from_a += 1;
 
                }
 
                auto v = get(in_b) -> {
 
                    print(\"got something from B\");
 
                    auto _ = infinite_assert(v, num_from_b);
 
                    num_from_b += 1;
 
                }
 
            }
 
        }
 
    }
 

	
 
    comp sender(out<u32> tx, u32 num_sends) {
 
        auto index = 0;
 
        while (index < num_sends) {
 
            sync {
 
                put(tx, index);
 
                index += 1;
 
            }
 
        }
 
    }
 

	
 
    comp constructor() {
 
        auto num_sends = 1;
 
        channel tx_a -> rx_a;
 
        channel tx_b -> rx_b;
 
        new sender(tx_a, num_sends);
 
        new receiver(rx_a, rx_b, num_sends);
 
        new sender(tx_b, num_sends);
 
    }
 
    ").expect("compilation");
 
    let rt = Runtime::new(3, LOG_LEVEL, pd).unwrap();
 
    create_component(&rt, "", "constructor", no_args());
 
}
 

	
 
#[test]
 
fn test_unguarded_select() {
 
    let pd = ProtocolDescription::parse(b"
 
    comp constructor_outside_select() {
 
        u32 index = 0;
 
        while (index < 5) {
 
            sync select { auto v = () -> print(\"hello\"); }
 
            index += 1;
 
        }
 
    }
 

	
 
    comp constructor_inside_select() {
 
        u32 index = 0;
 
        while (index < 5) {
 
            sync select { auto v = () -> index += 1; }
 
        }
 
    }
 
    ").expect("compilation");
 
    let rt = Runtime::new(3, LOG_LEVEL, pd).unwrap();
 
    create_component(&rt, "", "constructor_outside_select", no_args());
 
    create_component(&rt, "", "constructor_inside_select", no_args());
 
}
 

	
 
#[test]
 
fn test_empty_select() {
 
    let pd = ProtocolDescription::parse(b"
 
    comp constructor() {
 
        u32 index = 0;
 
        while (index < 5) {
 
            sync select {}
 
            index += 1;
 
        }
 
    }
 
    ").expect("compilation");
 
    let rt = Runtime::new(3, LOG_LEVEL, pd).unwrap();
 
    create_component(&rt, "", "constructor", no_args());
 
}
 

	
 
#[test]
 
fn test_random_u32_temporary_thingo() {
 
    let pd = ProtocolDescription::parse(b"
 
    import std.random::random_u32;
 

	
 
    comp random_taker(in<u32> generator, u32 num_values) {
 
        auto i = 0;
 
        while (i < num_values) {
 
            sync {
 
                auto a = get(generator);
 
            }
 
            i += 1;
 
        }
 
    }
 

	
 
    comp constructor() {
 
        channel tx -> rx;
 
        auto num_values = 25;
 
        new random_u32(tx, 1, 100, num_values);
 
        new random_taker(rx, num_values);
 
    }
 
    ").expect("compilation");
 
    let rt = Runtime::new(1, LOG_LEVEL, pd).unwrap();
 
    create_component(&rt, "", "constructor", no_args());
 
}
 

	
 
#[test]
 
fn test_tcp_socket_http_request() {
 
    let _pd = ProtocolDescription::parse(b"
 
    import std.internet::*;
 

	
 
    comp requester(out<ClientCmd> cmd_tx, in<u8[]> data_rx) {
 
        print(\"*** TCPSocket: Sending request\");
 
        sync {
 
            put(cmd_tx, ClientCmd::Send(b\"GET / HTTP/1.1\\r\\n\\r\\n\"));
 
        }
 

	
 
        print(\"*** TCPSocket: Receiving response\");
 
        auto buffer = {};
 
        auto done_receiving = false;
 
        sync while (!done_receiving) {
 
            put(cmd_tx, ClientCmd::Receive);
 
            auto data = get(data_rx);
 
            buffer @= data;
 

	
 
            // Completely crap detection of end-of-document. But here we go, we
 
            // try to detect the trailing </html>. Proper way would be to parse
 
            // for 'content-length' or 'content-encoding'
 
            s32 index = 0;
 
            s32 partial_length = cast(length(data) - 7);
 
            while (index < partial_length) {
 
                // No string conversion yet, so check byte buffer one byte at
 
                // a time.
 
                auto c1 = data[index];
 
                if (c1 == cast('<')) {
 
                    auto c2 = data[index + 1];
 
                    auto c3 = data[index + 2];
 
                    auto c4 = data[index + 3];
 
                    auto c5 = data[index + 4];
 
                    auto c6 = data[index + 5];
 
                    auto c7 = data[index + 6];
 
                    if ( // i.e. if (data[index..] == '</html>'
 
                        c2 == cast('/') && c3 == cast('h') && c4 == cast('t') &&
 
                        c5 == cast('m') && c6 == cast('l') && c7 == cast('>')
 
                    ) {
 
                        print(\"*** TCPSocket: Detected </html>\");
 
                        put(cmd_tx, ClientCmd::Finish);
 
                        done_receiving = true;
 
                    }
 
                }
 
                index += 1;
 
            }
0 comments (0 inline, 0 general)