Changeset - a4ae7002404f
[Not reviewed]
0 5 0
MH - 3 years ago 2022-04-21 16:55:59
contact@maxhenger.nl
Handle post-ClosePort errors
5 files changed with 74 insertions and 15 deletions:
0 comments (0 inline, 0 general)
src/protocol/eval/executor.rs
Show inline comments
 
@@ -113,193 +113,193 @@ impl Frame {
 
                self.serialize_expression(heap, expr.test);
 
            },
 
            Expression::Binary(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Unary(expr) => {
 
                self.serialize_expression(heap, expr.expression);
 
            },
 
            Expression::Indexing(expr) => {
 
                self.serialize_expression(heap, expr.index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Slicing(expr) => {
 
                self.serialize_expression(heap, expr.from_index);
 
                self.serialize_expression(heap, expr.to_index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Select(expr) => {
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Literal(expr) => {
 
                // Here we only care about literals that have subexpressions
 
                match &expr.value {
 
                    Literal::Null | Literal::True | Literal::False |
 
                    Literal::Character(_) | Literal::Bytestring(_) | Literal::String(_) |
 
                    Literal::Integer(_) | Literal::Enum(_) => {
 
                        // No subexpressions
 
                    },
 
                    Literal::Struct(literal) => {
 
                        // Note: fields expressions are evaluated in programmer-
 
                        // specified order. But struct construction expects them
 
                        // in type-defined order. I might want to come back to
 
                        // this.
 
                        let mut _num_pushed = 0;
 
                        for want_field_idx in 0..literal.fields.len() {
 
                            for field in &literal.fields {
 
                                if field.field_idx == want_field_idx {
 
                                    _num_pushed += 1;
 
                                    self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                                    self.serialize_expression(heap, field.value);
 
                                }
 
                            }
 
                        }
 
                        debug_assert_eq!(_num_pushed, literal.fields.len())
 
                    },
 
                    Literal::Union(literal) => {
 
                        for value_expr_id in &literal.values {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    },
 
                    Literal::Array(value_expr_ids) => {
 
                        for value_expr_id in value_expr_ids {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    },
 
                    Literal::Tuple(value_expr_ids) => {
 
                        for value_expr_id in value_expr_ids {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    }
 
                }
 
            },
 
            Expression::Cast(expr) => {
 
                self.serialize_expression(heap, expr.subject);
 
            }
 
            Expression::Call(expr) => {
 
                for arg_expr_id in &expr.arguments {
 
                    self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                    self.serialize_expression(heap, *arg_expr_id);
 
                }
 
            },
 
            Expression::Variable(_expr) => {
 
                // No subexpressions
 
            }
 
        }
 
    }
 
}
 

	
 
pub type EvalResult = Result<EvalContinuation, EvalError>;
 

	
 
#[derive(Debug)]
 
pub enum EvalContinuation {
 
    // Returned in both sync and non-sync modes
 
    Stepping,
 
    // Returned only in sync mode
 
    BranchInconsistent,
 
    SyncBlockEnd,
 
    NewFork,
 
    BlockFires(PortId),
 
    BlockGet(ExpressionId, PortId),
 
    Put(ExpressionId, PortId, ValueGroup),
 
    SelectStart(u32, u32), // (num_cases, num_ports_total)
 
    SelectRegisterPort(u32, u32, PortId), // (case_index, port_index_in_case, port_id)
 
    SelectRegisterPort(ExpressionId, u32, u32, PortId), // (call_expr_id, case_index, port_index_in_case, port_id)
 
    SelectWait, // wait until select can continue
 
    // Returned only in non-sync mode
 
    ComponentTerminated,
 
    SyncBlockStart,
 
    NewComponent(ProcedureDefinitionId, TypeId, ValueGroup),
 
    NewChannel,
 
}
 

	
 
// Note: cloning is fine, methinks. cloning all values and the heap regions then
 
// we end up with valid "pointers" to heap regions.
 
#[derive(Debug, Clone)]
 
pub struct Prompt {
 
    pub(crate) frames: Vec<Frame>,
 
    pub(crate) store: Store,
 
}
 

	
 
impl Prompt {
 
    pub fn new(types: &TypeTable, heap: &Heap, def: ProcedureDefinitionId, type_id: TypeId, args: ValueGroup) -> Self {
 
        let mut prompt = Self{
 
            frames: Vec::new(),
 
            store: Store::new(),
 
        };
 

	
 
        // Maybe do typechecking in the future?
 
        let monomorph_index = types.get_monomorph(type_id).variant.as_procedure().monomorph_index;
 
        let new_frame = Frame::new(heap, def, type_id, monomorph_index);
 
        let max_stack_size = new_frame.max_stack_size;
 
        prompt.frames.push(new_frame);
 
        args.into_store(&mut prompt.store);
 
        prompt.store.reserve_stack(max_stack_size);
 

	
 
        prompt
 
    }
 

	
 
    /// Big 'ol function right here. Didn't want to break it up unnecessarily.
 
    /// It consists of, in sequence: executing any expressions that should be
 
    /// executed before the next statement can be evaluated, then a section that
 
    /// performs debug printing, and finally a section that takes the next
 
    /// statement and executes it. If the statement requires any expressions to
 
    /// be evaluated, then they will be added such that the next time `step` is
 
    /// called, all of these expressions are indeed evaluated.
 
    pub(crate) fn step(&mut self, types: &TypeTable, heap: &Heap, modules: &[Module], ctx: &mut impl RunContext) -> EvalResult {
 
        // Helper function to transfer multiple values from the expression value
 
        // array into a heap region (e.g. constructing arrays or structs).
 
        fn transfer_expression_values_front_into_heap(cur_frame: &mut Frame, store: &mut Store, num_values: usize) -> HeapPos {
 
            let heap_pos = store.alloc_heap();
 

	
 
            // Do the transformation first (because Rust...)
 
            for val_idx in 0..num_values {
 
                cur_frame.expr_values[val_idx] = store.read_take_ownership(cur_frame.expr_values[val_idx].clone());
 
            }
 

	
 
            // And now transfer to the heap region
 
            let values = &mut store.heap_regions[heap_pos as usize].values;
 
            debug_assert!(values.is_empty());
 
            values.reserve(num_values);
 
            for _ in 0..num_values {
 
                values.push(cur_frame.expr_values.pop_front().unwrap());
 
            }
 

	
 
            heap_pos
 
        }
 

	
 
        // Helper function to make sure that an index into an aray is valid.
 
        fn array_inclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx >= array_len as i64;
 
        }
 

	
 
        fn array_exclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx > array_len as i64;
 
        }
 

	
 
        fn construct_array_error(prompt: &Prompt, modules: &[Module], heap: &Heap, expr_id: ExpressionId, heap_pos: u32, idx: i64) -> EvalError {
 
            let array_len = prompt.store.heap_regions[heap_pos as usize].values.len();
 
            return EvalError::new_error_at_expr(
 
                prompt, modules, heap, expr_id,
 
                format!("index {} is out of bounds: array length is {}", idx, array_len)
 
            )
 
        }
 

	
 
        // Checking if we're at the end of execution
 
        let cur_frame = self.frames.last_mut().unwrap();
 
        if cur_frame.position.is_invalid() {
 
            if heap[cur_frame.definition].kind == ProcedureKind::Function {
 
                todo!("End of function without return, return an evaluation error");
 
            }
 
            return Ok(EvalContinuation::ComponentTerminated);
 
        }
 

	
 
        debug_log!("Taking step in '{}'", heap[cur_frame.definition].identifier.value.as_str());
 

	
 
        // Execute all pending expressions
 
        while !cur_frame.expr_stack.is_empty() {
 
            let next = cur_frame.expr_stack.pop_back().unwrap();
 
@@ -664,193 +664,193 @@ impl Prompt {
 
                                    let port_value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let port_value_deref = self.store.maybe_read_ref(&port_value).clone();
 
                                    let port_id = port_value_deref.as_port_id();
 

	
 
                                    match ctx.fires(port_id) {
 
                                        None => {
 
                                            cur_frame.expr_values.push_front(port_value);
 
                                            cur_frame.expr_stack.push_back(ExprInstruction::EvalExpr(expr_id));
 
                                            return Ok(EvalContinuation::BlockFires(port_id));
 
                                        },
 
                                        Some(value) => {
 
                                            cur_frame.expr_values.push_back(value);
 
                                        }
 
                                    }
 
                                },
 
                                Method::Create => {
 
                                    let length_value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let length_value = self.store.maybe_read_ref(&length_value);
 
                                    let length = if length_value.is_signed_integer() {
 
                                        let length_value = length_value.as_signed_integer();
 
                                        if length_value < 0 {
 
                                            return Err(EvalError::new_error_at_expr(
 
                                                self, modules, heap, expr_id,
 
                                                format!("got length '{}', can only create a message with a non-negative length", length_value)
 
                                            ));
 
                                        }
 

	
 
                                        length_value as u64
 
                                    } else {
 
                                        debug_assert!(length_value.is_unsigned_integer());
 
                                        length_value.as_unsigned_integer()
 
                                    };
 

	
 
                                    let heap_pos = self.store.alloc_heap();
 
                                    let values = &mut self.store.heap_regions[heap_pos as usize].values;
 
                                    debug_assert!(values.is_empty());
 
                                    values.resize(length as usize, Value::UInt8(0));
 
                                    cur_frame.expr_values.push_back(Value::Message(heap_pos));
 
                                },
 
                                Method::Length => {
 
                                    let value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let value_heap_pos = value.get_heap_pos();
 
                                    let value = self.store.maybe_read_ref(&value);
 

	
 
                                    let heap_pos = match value {
 
                                        Value::Array(pos) => *pos,
 
                                        Value::String(pos) => *pos,
 
                                        _ => unreachable!("length(...) on {:?}", value),
 
                                    };
 

	
 
                                    let len = self.store.heap_regions[heap_pos as usize].values.len();
 

	
 
                                    // TODO: @PtrInt
 
                                    cur_frame.expr_values.push_back(Value::UInt32(len as u32));
 
                                    self.store.drop_value(value_heap_pos);
 
                                },
 
                                Method::Assert => {
 
                                    let value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let value = self.store.maybe_read_ref(&value).clone();
 
                                    if !value.as_bool() {
 
                                        return Ok(EvalContinuation::BranchInconsistent)
 
                                    }
 
                                },
 
                                Method::Print => {
 
                                    // Convert the runtime-variant of a string
 
                                    // into an actual string.
 
                                    let value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let mut is_literal_string = value.get_heap_pos().is_some();
 
                                    let value = self.store.maybe_read_ref(&value);
 
                                    let value_heap_pos = value.as_string();
 
                                    let elements = &self.store.heap_regions[value_heap_pos as usize].values;
 

	
 
                                    let mut message = String::with_capacity(elements.len());
 
                                    for element in elements {
 
                                        message.push(element.as_char());
 
                                    }
 

	
 
                                    // Drop the heap-allocated value from the
 
                                    // store
 
                                    if is_literal_string {
 
                                        self.store.drop_heap_pos(value_heap_pos);
 
                                    }
 

	
 
                                    println!("{}", message);
 
                                },
 
                                Method::SelectStart => {
 
                                    let num_cases = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_uint32();
 
                                    let num_ports = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_uint32();
 

	
 
                                    return Ok(EvalContinuation::SelectStart(num_cases, num_ports));
 
                                },
 
                                Method::SelectRegisterCasePort => {
 
                                    let case_index = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_uint32();
 
                                    let port_index = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_uint32();
 
                                    let port_value = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_port_id();
 

	
 
                                    return Ok(EvalContinuation::SelectRegisterPort(case_index, port_index, port_value));
 
                                    return Ok(EvalContinuation::SelectRegisterPort(expr_id, case_index, port_index, port_value));
 
                                },
 
                                Method::SelectWait => {
 
                                    match ctx.performed_select_wait() {
 
                                        Some(select_index) => {
 
                                            cur_frame.expr_values.push_back(Value::UInt32(select_index));
 
                                        },
 
                                        None => {
 
                                            cur_frame.expr_stack.push_back(ExprInstruction::EvalExpr(expr.this.upcast()));
 
                                            return Ok(EvalContinuation::SelectWait)
 
                                        },
 
                                    }
 
                                },
 
                                Method::ComponentRandomU32 | Method::ComponentTcpClient => {
 
                                    debug_assert_eq!(heap[expr.procedure].parameters.len(), cur_frame.expr_values.len());
 
                                    debug_assert_eq!(heap[cur_frame.position].as_new().expression, expr.this);
 
                                },
 
                                Method::UserComponent => {
 
                                    // This is actually handled by the evaluation
 
                                    // of the statement.
 
                                    debug_assert_eq!(heap[expr.procedure].parameters.len(), cur_frame.expr_values.len());
 
                                    debug_assert_eq!(heap[cur_frame.position].as_new().expression, expr.this);
 
                                },
 
                                Method::UserFunction => {
 
                                    // Push a new frame. Note that all expressions have
 
                                    // been pushed to the front, so they're in the order
 
                                    // of the definition.
 
                                    let num_args = expr.arguments.len();
 

	
 
                                    // Determine stack boundaries
 
                                    let cur_stack_boundary = self.store.cur_stack_boundary;
 
                                    let new_stack_boundary = self.store.stack.len();
 

	
 
                                    // Push new boundary and function arguments for new frame
 
                                    self.store.stack.push(Value::PrevStackBoundary(cur_stack_boundary as isize));
 
                                    for _ in 0..num_args {
 
                                        let argument = self.store.read_take_ownership(cur_frame.expr_values.pop_front().unwrap());
 
                                        self.store.stack.push(argument);
 
                                    }
 

	
 
                                    // Determine the monomorph index of the function we're calling
 
                                    let mono_data = &heap[cur_frame.definition].monomorphs[cur_frame.monomorph_index];
 
                                    let (type_id, monomorph_index) = mono_data.expr_info[expr.type_index as usize].variant.as_procedure();
 

	
 
                                    // Push the new frame and reserve its stack size
 
                                    let new_frame = Frame::new(heap, expr.procedure, type_id, monomorph_index);
 
                                    let new_stack_size = new_frame.max_stack_size;
 
                                    self.frames.push(new_frame);
 
                                    self.store.cur_stack_boundary = new_stack_boundary;
 
                                    self.store.reserve_stack(new_stack_size);
 

	
 
                                    // To simplify the logic a little bit we will now
 
                                    // return and ask our caller to call us again
 
                                    return Ok(EvalContinuation::Stepping);
 
                                }
 
                            }
 
                        },
 
                        Expression::Variable(expr) => {
 
                            let variable = &heap[expr.declaration.unwrap()];
 
                            let ref_value = if expr.used_as_binding_target {
 
                                Value::Binding(variable.unique_id_in_scope as StackPos)
 
                            } else {
 
                                Value::Ref(ValueId::Stack(variable.unique_id_in_scope as StackPos))
 
                            };
 
                            cur_frame.expr_values.push_back(ref_value);
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        debug_log!("Frame [{:?}] at {:?}", cur_frame.definition, cur_frame.position);
 
        if debug_enabled!() {
 
            debug_log!("Expression value stack (size = {}):", cur_frame.expr_values.len());
 
            for (_stack_idx, _stack_val) in cur_frame.expr_values.iter().enumerate() {
 
                debug_log!("  [{:03}] {:?}", _stack_idx, _stack_val);
 
            }
 

	
 
            debug_log!("Stack (size = {}):", self.store.stack.len());
 
            for (_stack_idx, _stack_val) in self.store.stack.iter().enumerate() {
 
                debug_log!("  [{:03}] {:?}", _stack_idx, _stack_val);
 
            }
 

	
 
            debug_log!("Heap:");
 
            for (_heap_idx, _heap_region) in self.store.heap_regions.iter().enumerate() {
 
                let _is_free = self.store.free_regions.iter().any(|idx| *idx as usize == _heap_idx);
 
                debug_log!("  [{:03}] in_use: {}, len: {}, vals: {:?}", _heap_idx, !_is_free, _heap_region.values.len(), &_heap_region.values);
 
            }
 
        }
 
        // No (more) expressions to evaluate. So evaluate statement (that may
 
        // depend on the result on the last evaluated expression(s))
 
        let stmt = &heap[cur_frame.position];
 
        let return_value = match stmt {
 
            Statement::Block(stmt) => {
 
                debug_assert!(stmt.statements.is_empty() || stmt.next == stmt.statements[0]);
 
                cur_frame.position = stmt.next;
 
                Ok(EvalContinuation::Stepping)
src/protocol/parser/pass_rewriting.rs
Show inline comments
 
@@ -124,353 +124,361 @@ impl Visitor for PassRewriting {
 
            let case = &ctx.heap[select_id].cases[case_index];
 
            let case_guard_id = case.guard;
 
            let case_body_id = case.body;
 
            let case_scope_id = case.scope;
 

	
 
            // Create the if-statement for the result of the select statement
 
            let compare_expr_id = create_ast_equality_comparison_expr(ctx, containing_procedure_id, select_var_id, select_var_type_id, case_index as u64);
 
            let true_case = IfStatementCase{
 
                body: case_guard_id, // which is linked up to the body
 
                scope: case_scope_id,
 
            };
 
            let (if_stmt_id, end_if_stmt_id) = create_ast_if_stmt(ctx, compare_expr_id.upcast(), true_case, None);
 

	
 
            // Link up body statement to end-if
 
            set_ast_statement_next(ctx, case_body_id, end_if_stmt_id.upcast());
 

	
 
            return (if_stmt_id, end_if_stmt_id, case_scope_id);
 
        }
 

	
 
        // Precreate the block that will end up containing all of the
 
        // transformed statements. Also precreate the scope associated with it
 
        let (outer_block_id, outer_end_block_id, outer_scope_id) =
 
            create_ast_block_stmt(ctx, Vec::new());
 

	
 
        // The "select" and the "end select" statement will act like trampolines
 
        // that jump to the replacement block. So set the child/parent
 
        // relationship already.
 
        // --- for the statements
 
        let select_stmt = &mut ctx.heap[id];
 
        select_stmt.next = outer_block_id.upcast();
 
        let end_select_stmt_id = select_stmt.end_select;
 
        let select_stmt_relative_pos = select_stmt.relative_pos_in_parent;
 

	
 
        let outer_end_block_stmt = &mut ctx.heap[outer_end_block_id];
 
        outer_end_block_stmt.next = end_select_stmt_id.upcast();
 

	
 
        // --- for the scopes
 
        link_new_child_to_existing_parent_scope(ctx, &mut self.scope_buffer, self.current_scope, outer_scope_id, select_stmt_relative_pos);
 

	
 
        // Create statements that will create temporary variables for all of the
 
        // ports passed to the "get" calls in the select case guards.
 
        let select_stmt = &ctx.heap[id];
 
        let total_num_cases = select_stmt.cases.len();
 
        let mut total_num_ports = 0;
 
        let end_select_stmt_id = select_stmt.end_select;
 
        let _end_select = &ctx.heap[end_select_stmt_id];
 

	
 
        // Put heap IDs into temporary buffers to handle borrowing rules
 
        let mut call_id_section = self.call_expr_buffer.start_section();
 
        let mut expr_id_section = self.expression_buffer.start_section();
 

	
 
        for case in select_stmt.cases.iter() {
 
            total_num_ports += case.involved_ports.len();
 
            for (call_id, expr_id) in case.involved_ports.iter().copied() {
 
                call_id_section.push(call_id);
 
                expr_id_section.push(expr_id);
 
            }
 
        }
 

	
 
        // Transform all of the call expressions by takings its argument (the
 
        // port from which we `get`) and turning it into a temporary variable.
 
        let mut transformed_stmts = Vec::with_capacity(total_num_ports); // TODO: Recompute this preallocated length, put assert at the end
 
        let mut locals = Vec::with_capacity(total_num_ports);
 

	
 
        for port_var_idx in 0..call_id_section.len() {
 
            let get_call_expr_id = call_id_section[port_var_idx];
 
            let port_expr_id = expr_id_section[port_var_idx];
 
            let port_type_index = ctx.heap[port_expr_id].type_index();
 
            let port_type_ref = TypeIdReference::IndirectSameAsExpr(port_type_index);
 

	
 
            // Move the port expression such that it gets assigned to a temporary variable
 
            let variable_id = create_ast_variable(ctx, outer_scope_id);
 
            let variable_decl_stmt_id = create_ast_variable_declaration_stmt(ctx, self.current_procedure_id, variable_id, port_type_ref, port_expr_id);
 

	
 
            // Replace the original port expression in the call with a reference
 
            // to the replacement variable
 
            let variable_expr_id = create_ast_variable_expr(ctx, self.current_procedure_id, variable_id, port_type_ref);
 
            let call_expr = &mut ctx.heap[get_call_expr_id];
 
            call_expr.arguments[0] = variable_expr_id.upcast();
 

	
 
            transformed_stmts.push(variable_decl_stmt_id.upcast().upcast());
 
            locals.push((variable_id, port_type_ref));
 
        }
 

	
 
        // Insert runtime calls that facilitate the semantics of the select
 
        // block.
 

	
 
        // Create the call that indicates the start of the select block
 
        {
 
            let num_cases_expression_id = create_ast_literal_integer_expr(ctx, self.current_procedure_id, total_num_cases as u64, ctx.arch.uint32_type_id);
 
            let num_ports_expression_id = create_ast_literal_integer_expr(ctx, self.current_procedure_id, total_num_ports as u64, ctx.arch.uint32_type_id);
 
            let arguments = vec![
 
                num_cases_expression_id.upcast(),
 
                num_ports_expression_id.upcast()
 
            ];
 

	
 
            let call_expression_id = create_ast_call_expr(ctx, self.current_procedure_id, Method::SelectStart, &mut self.expression_buffer, arguments);
 
            let call_expression_id = create_ast_call_expr(ctx, InputSpan::new(), self.current_procedure_id, Method::SelectStart, &mut self.expression_buffer, arguments);
 
            let call_statement_id = create_ast_expression_stmt(ctx, call_expression_id.upcast());
 

	
 
            transformed_stmts.push(call_statement_id.upcast());
 
        }
 

	
 
        // Create calls for each select case that will register the ports that
 
        // we are waiting on at the runtime.
 
        {
 
            let mut total_port_index = 0;
 
            for case_index in 0..total_num_cases {
 
                let case = &ctx.heap[id].cases[case_index];
 
                let case_num_ports = case.involved_ports.len();
 

	
 
                for case_port_index in 0..case_num_ports {
 
                    // Arguments to runtime call
 
                    let original_get_call_id = case.involved_ports[case_port_index].0;
 
                    let original_get_call_span = ctx.heap[original_get_call_id].full_span;
 

	
 
                    let (port_variable_id, port_variable_type) = locals[total_port_index]; // so far this variable contains the temporary variables for the port expressions
 
                    let case_index_expr_id = create_ast_literal_integer_expr(ctx, self.current_procedure_id, case_index as u64, ctx.arch.uint32_type_id);
 
                    let port_index_expr_id = create_ast_literal_integer_expr(ctx, self.current_procedure_id, case_port_index as u64, ctx.arch.uint32_type_id);
 
                    let port_variable_expr_id = create_ast_variable_expr(ctx, self.current_procedure_id, port_variable_id, port_variable_type);
 
                    let runtime_call_arguments = vec![
 
                        case_index_expr_id.upcast(),
 
                        port_index_expr_id.upcast(),
 
                        port_variable_expr_id.upcast()
 
                    ];
 

	
 
                    // Create runtime call, then store it
 
                    let runtime_call_expr_id = create_ast_call_expr(ctx, self.current_procedure_id, Method::SelectRegisterCasePort, &mut self.expression_buffer, runtime_call_arguments);
 
                    let runtime_call_expr_id = create_ast_call_expr(ctx, original_get_call_span, self.current_procedure_id, Method::SelectRegisterCasePort, &mut self.expression_buffer, runtime_call_arguments);
 
                    let runtime_call_stmt_id = create_ast_expression_stmt(ctx, runtime_call_expr_id.upcast());
 

	
 
                    transformed_stmts.push(runtime_call_stmt_id.upcast());
 

	
 
                    total_port_index += 1;
 
                }
 
            }
 
        }
 

	
 
        // Create the variable that will hold the result of a completed select
 
        // block. Then create the runtime call that will produce this result
 
        let select_variable_id = create_ast_variable(ctx, outer_scope_id);
 
        let select_variable_type = TypeIdReference::DirectTypeId(ctx.arch.uint32_type_id);
 
        locals.push((select_variable_id, select_variable_type));
 

	
 
        {
 
            let runtime_call_expr_id = create_ast_call_expr(ctx, self.current_procedure_id, Method::SelectWait, &mut self.expression_buffer, Vec::new());
 
            let runtime_call_expr_id = create_ast_call_expr(ctx, InputSpan::new(), self.current_procedure_id, Method::SelectWait, &mut self.expression_buffer, Vec::new());
 
            let variable_stmt_id = create_ast_variable_declaration_stmt(ctx, self.current_procedure_id, select_variable_id, select_variable_type, runtime_call_expr_id.upcast());
 
            transformed_stmts.push(variable_stmt_id.upcast().upcast());
 
        }
 

	
 
        call_id_section.forget();
 
        expr_id_section.forget();
 

	
 
        // Now we transform each of the select block case's guard and code into
 
        // a chained if-else statement.
 
        let relative_pos = transformed_stmts.len() as i32;
 
        if total_num_cases > 0 {
 
            let (if_stmt_id, end_if_stmt_id, scope_id) = transform_select_case_code(ctx, self.current_procedure_id, id, 0, select_variable_id, select_variable_type);
 
            link_existing_child_to_new_parent_scope(ctx, &mut self.scope_buffer, outer_scope_id, scope_id, relative_pos);
 
            let first_end_if_stmt = &mut ctx.heap[end_if_stmt_id];
 
            first_end_if_stmt.next = outer_end_block_id.upcast();
 

	
 
            let mut last_if_stmt_id = if_stmt_id;
 
            let mut last_end_if_stmt_id = end_if_stmt_id;
 
            let mut last_parent_scope_id = outer_scope_id;
 
            let mut last_relative_pos = transformed_stmts.len() as i32 + 1;
 
            transformed_stmts.push(last_if_stmt_id.upcast());
 

	
 
            for case_index in 1..total_num_cases {
 
                let (if_stmt_id, end_if_stmt_id, scope_id) = transform_select_case_code(ctx, self.current_procedure_id, id, case_index, select_variable_id, select_variable_type);
 
                let false_case_scope_id = ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::If(last_if_stmt_id, false)));
 
                link_existing_child_to_new_parent_scope(ctx, &mut self.scope_buffer, false_case_scope_id, scope_id, 0);
 
                link_new_child_to_existing_parent_scope(ctx, &mut self.scope_buffer, last_parent_scope_id, false_case_scope_id, last_relative_pos);
 
                set_ast_if_statement_false_body(ctx, last_if_stmt_id, last_end_if_stmt_id, IfStatementCase{ body: if_stmt_id.upcast(), scope: false_case_scope_id });
 

	
 
                let end_if_stmt = &mut ctx.heap[end_if_stmt_id];
 
                end_if_stmt.next = last_end_if_stmt_id.upcast();
 

	
 
                last_if_stmt_id = if_stmt_id;
 
                last_end_if_stmt_id = end_if_stmt_id;
 
                last_parent_scope_id = false_case_scope_id;
 
                last_relative_pos = 0;
 
            }
 
        }
 

	
 
        // Final steps: set the statements of the replacement block statement,
 
        // link all of those statements together, and update the scopes.
 
        let first_stmt_id = transformed_stmts[0];
 
        let mut last_stmt_id = transformed_stmts[0];
 
        for stmt_id in transformed_stmts.iter().skip(1).copied() {
 
            set_ast_statement_next(ctx, last_stmt_id, stmt_id);
 
            last_stmt_id = stmt_id;
 
        }
 

	
 
        if total_num_cases == 0 {
 
            // If we don't have any cases, then we didn't connect the statements
 
            // up to the end of the outer block, so do that here
 
            set_ast_statement_next(ctx, last_stmt_id, outer_end_block_id.upcast());
 
        }
 

	
 
        let outer_block_stmt = &mut ctx.heap[outer_block_id];
 
        outer_block_stmt.next = first_stmt_id;
 
        outer_block_stmt.statements = transformed_stmts;
 

	
 
        return Ok(())
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Utilities to create compiler-generated AST nodes
 
// -----------------------------------------------------------------------------
 

	
 
#[derive(Clone, Copy)]
 
enum TypeIdReference {
 
    DirectTypeId(TypeId),
 
    IndirectSameAsExpr(i32), // by type index
 
}
 

	
 
fn create_ast_variable(ctx: &mut Ctx, scope_id: ScopeId) -> VariableId {
 
    let variable_id = ctx.heap.alloc_variable(|this| Variable{
 
        this,
 
        kind: VariableKind::Local,
 
        parser_type: ParserType{
 
            elements: Vec::new(),
 
            full_span: InputSpan::new(),
 
        },
 
        identifier: Identifier::new_empty(InputSpan::new()),
 
        relative_pos_in_parent: -1,
 
        unique_id_in_scope: -1,
 
    });
 
    let scope = &mut ctx.heap[scope_id];
 
    scope.variables.push(variable_id);
 

	
 
    return variable_id;
 
}
 

	
 
fn create_ast_variable_expr(ctx: &mut Ctx, containing_procedure_id: ProcedureDefinitionId, variable_id: VariableId, variable_type_id: TypeIdReference) -> VariableExpressionId {
 
    let variable_type_index = add_new_procedure_expression_type(ctx, containing_procedure_id, variable_type_id);
 
    return ctx.heap.alloc_variable_expression(|this| VariableExpression{
 
        this,
 
        identifier: Identifier::new_empty(InputSpan::new()),
 
        declaration: Some(variable_id),
 
        used_as_binding_target: false,
 
        parent: ExpressionParent::None,
 
        type_index: variable_type_index,
 
    });
 
}
 

	
 
fn create_ast_call_expr(ctx: &mut Ctx, containing_procedure_id: ProcedureDefinitionId, method: Method, buffer: &mut ScopedBuffer<ExpressionId>, arguments: Vec<ExpressionId>) -> CallExpressionId {
 
/// Creates an AST call expression. The provided expression span is useful for
 
/// the cases where we perform compiler-builtin function calls that, when they
 
/// fail, should provide an error pointing at a specific point in the source
 
/// code. The `containing_procedure_id` is the procedure whose instructions are
 
/// going to contain this new call expression.
 
fn create_ast_call_expr(ctx: &mut Ctx, span: InputSpan, containing_procedure_id: ProcedureDefinitionId, method: Method, buffer: &mut ScopedBuffer<ExpressionId>, arguments: Vec<ExpressionId>) -> CallExpressionId {
 
    let call_type_id = match method {
 
        Method::SelectStart => ctx.arch.void_type_id,
 
        Method::SelectRegisterCasePort => ctx.arch.void_type_id,
 
        Method::SelectWait => ctx.arch.uint32_type_id, // TODO: Not pretty, this. Pretty error prone
 
        _ => unreachable!(), // if this goes of, add the appropriate method here.
 
    };
 

	
 
    let expression_ids = buffer.start_section_initialized(&arguments);
 
    let call_type_index = add_new_procedure_expression_type(ctx, containing_procedure_id, TypeIdReference::DirectTypeId(call_type_id));
 
    let call_expression_id = ctx.heap.alloc_call_expression(|this| CallExpression{
 
        func_span: InputSpan::new(),
 
        this,
 
        full_span: InputSpan::new(),
 
        full_span: span,
 
        parser_type: ParserType{
 
            elements: Vec::new(),
 
            full_span: InputSpan::new(),
 
        },
 
        method,
 
        arguments,
 
        procedure: ProcedureDefinitionId::new_invalid(),
 
        parent: ExpressionParent::None,
 
        type_index: call_type_index,
 
    });
 

	
 
    for argument_index in 0..expression_ids.len() {
 
        let argument_id = expression_ids[argument_index];
 
        let argument_expr = &mut ctx.heap[argument_id];
 
        *argument_expr.parent_mut() = ExpressionParent::Expression(call_expression_id.upcast(), argument_index as u32);
 
    }
 

	
 
    return call_expression_id;
 
}
 

	
 
fn create_ast_literal_integer_expr(ctx: &mut Ctx, containing_procedure_id: ProcedureDefinitionId, unsigned_value: u64, type_id: TypeId) -> LiteralExpressionId {
 
    let literal_type_index = add_new_procedure_expression_type(ctx, containing_procedure_id, TypeIdReference::DirectTypeId(type_id));
 
    return ctx.heap.alloc_literal_expression(|this| LiteralExpression{
 
        this,
 
        span: InputSpan::new(),
 
        value: Literal::Integer(LiteralInteger{
 
            unsigned_value,
 
            negated: false,
 
        }),
 
        parent: ExpressionParent::None,
 
        type_index: literal_type_index,
 
    });
 
}
 

	
 
fn create_ast_equality_comparison_expr(
 
    ctx: &mut Ctx, containing_procedure_id: ProcedureDefinitionId,
 
    variable_id: VariableId, variable_type: TypeIdReference, value: u64
 
) -> BinaryExpressionId {
 
    let var_expr_id = create_ast_variable_expr(ctx, containing_procedure_id, variable_id, variable_type);
 
    let int_expr_id = create_ast_literal_integer_expr(ctx, containing_procedure_id, value, ctx.arch.uint32_type_id);
 
    let cmp_type_index = add_new_procedure_expression_type(ctx, containing_procedure_id, TypeIdReference::DirectTypeId(ctx.arch.bool_type_id));
 
    let cmp_expr_id = ctx.heap.alloc_binary_expression(|this| BinaryExpression{
 
        this,
 
        operator_span: InputSpan::new(),
 
        full_span: InputSpan::new(),
 
        left: var_expr_id.upcast(),
 
        operation: BinaryOperator::Equality,
 
        right: int_expr_id.upcast(),
 
        parent: ExpressionParent::None,
 
        type_index: cmp_type_index,
 
    });
 

	
 
    let var_expr = &mut ctx.heap[var_expr_id];
 
    var_expr.parent = ExpressionParent::Expression(cmp_expr_id.upcast(), 0);
 
    let int_expr = &mut ctx.heap[int_expr_id];
 
    int_expr.parent = ExpressionParent::Expression(cmp_expr_id.upcast(), 1);
 

	
 
    return cmp_expr_id;
 
}
 

	
 
fn create_ast_expression_stmt(ctx: &mut Ctx, expression_id: ExpressionId) -> ExpressionStatementId {
 
    let statement_id = ctx.heap.alloc_expression_statement(|this| ExpressionStatement{
 
        this,
 
        span: InputSpan::new(),
 
        expression: expression_id,
 
        next: StatementId::new_invalid(),
 
    });
 

	
 
    let expression = &mut ctx.heap[expression_id];
 
    *expression.parent_mut() = ExpressionParent::ExpressionStmt(statement_id);
 

	
 
    return statement_id;
 
}
 

	
 
fn create_ast_variable_declaration_stmt(
 
    ctx: &mut Ctx, containing_procedure_id: ProcedureDefinitionId,
 
    variable_id: VariableId, variable_type: TypeIdReference, initial_value_expr_id: ExpressionId
 
) -> MemoryStatementId {
 
    // Create the assignment expression, assigning the initial value to the variable
 
    let variable_expr_id = create_ast_variable_expr(ctx, containing_procedure_id, variable_id, variable_type);
 
    let void_type_index = add_new_procedure_expression_type(ctx, containing_procedure_id, TypeIdReference::DirectTypeId(ctx.arch.void_type_id));
 
    let assignment_expr_id = ctx.heap.alloc_assignment_expression(|this| AssignmentExpression{
 
        this,
 
        operator_span: InputSpan::new(),
 
        full_span: InputSpan::new(),
 
        left: variable_expr_id.upcast(),
 
        operation: AssignmentOperator::Set,
 
        right: initial_value_expr_id,
 
        parent: ExpressionParent::None,
 
        type_index: void_type_index,
 
    });
 

	
 
    // Create the memory statement
 
    let memory_stmt_id = ctx.heap.alloc_memory_statement(|this| MemoryStatement{
 
        this,
 
        span: InputSpan::new(),
src/runtime2/component/component.rs
Show inline comments
 
@@ -305,362 +305,368 @@ pub(crate) fn default_handle_incoming_data_message(
 
        debug_assert!(port_info.state == PortState::Open || port_info.state.is_blocked()); // i.e. not closed, but will go off if more states are added in the future
 

	
 
        if port_info.state == PortState::Open {
 
            comp_ctx.set_port_state(port_handle, PortState::BlockedDueToFullBuffers);
 
            let (peer_handle, message) =
 
                control.initiate_port_blocking(comp_ctx, port_handle);
 
            let peer = comp_ctx.get_peer(peer_handle);
 
            peer.handle.send_message(&sched_ctx.runtime, Message::Control(message), true);
 
        }
 

	
 
        return IncomingData::SlotFull(incoming_message)
 
    }
 
}
 

	
 
/// Default handling that has been received through a `get`. Will check if any
 
/// more messages are waiting, and if the corresponding port was blocked because
 
/// of full buffers (hence, will use the control layer to make sure the peer
 
/// will become unblocked).
 
pub(crate) fn default_handle_received_data_message(
 
    targeted_port: PortId, port_instruction: PortInstruction,
 
    slot: &mut Option<DataMessage>, inbox_backup: &mut Vec<DataMessage>,
 
    comp_ctx: &mut CompCtx, sched_ctx: &SchedulerCtx, control: &mut ControlLayer
 
) -> Result<(), (PortInstruction, String)> {
 
    debug_assert!(slot.is_none()); // because we've just received from it
 

	
 
    // Modify last-known location where port instruction was retrieved
 
    let port_handle = comp_ctx.get_port_handle(targeted_port);
 
    let port_info = comp_ctx.get_port_mut(port_handle);
 
    port_info.last_instruction = port_instruction;
 

	
 
    if port_info.state == PortState::Closed {
 
        return Err((
 
            port_info.last_instruction,
 
            format!("Cannot 'get' because the channel is closed"))
 
        );
 
    }
 

	
 
    // Check if there are any more messages in the backup buffer
 
    let port_info = comp_ctx.get_port(port_handle);
 
    for message_index in 0..inbox_backup.len() {
 
        let message = &inbox_backup[message_index];
 
        if message.data_header.target_port == targeted_port {
 
            // One more message, place it in the slot
 
            let message = inbox_backup.remove(message_index);
 
            debug_assert!(port_info.state.is_blocked()); // since we're removing another message from the backup
 
            *slot = Some(message);
 

	
 
            return Ok(());
 
        }
 
    }
 

	
 
    // Did not have any more messages, so if we were blocked, then we need to
 
    // unblock the port now (and inform the peer of this unblocking)
 
    if port_info.state == PortState::BlockedDueToFullBuffers {
 
        comp_ctx.set_port_state(port_handle, PortState::Open);
 
        let (peer_handle, message) = control.cancel_port_blocking(comp_ctx, port_handle);
 
        let peer_info = comp_ctx.get_peer(peer_handle);
 
        peer_info.handle.send_message(&sched_ctx.runtime, Message::Control(message), true);
 
    }
 

	
 
    return Ok(());
 
}
 

	
 
/// Handles control messages in the default way. Note that this function may
 
/// take a lot of actions in the name of the caller: pending messages may be
 
/// sent, ports may become blocked/unblocked, etc. So the execution
 
/// (`CompExecState`), control (`ControlLayer`) and consensus (`Consensus`)
 
/// state may all change.
 
pub(crate) fn default_handle_control_message(
 
    exec_state: &mut CompExecState, control: &mut ControlLayer, consensus: &mut Consensus,
 
    message: ControlMessage, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx
 
) -> Result<(), (PortInstruction, String)> {
 
    match message.content {
 
        ControlMessageContent::Ack => {
 
            default_handle_ack(control, message.id, sched_ctx, comp_ctx);
 
        },
 
        ControlMessageContent::BlockPort(port_id) => {
 
            // One of our messages was accepted, but the port should be
 
            // blocked.
 
            let port_handle = comp_ctx.get_port_handle(port_id);
 
            let port_info = comp_ctx.get_port(port_handle);
 
            debug_assert_eq!(port_info.kind, PortKind::Putter);
 
            if port_info.state == PortState::Open {
 
                // only when open: we don't do this when closed, and we we don't do this if we're blocked due to peer changes
 
                comp_ctx.set_port_state(port_handle, PortState::BlockedDueToFullBuffers);
 
            }
 
        },
 
        ControlMessageContent::ClosePort(content) => {
 
            // Request to close the port. We immediately comply and remove
 
            // the component handle as well
 
            let port_handle = comp_ctx.get_port_handle(content.port_to_close);
 

	
 
            // We're closing the port, so we will always update the peer of the
 
            // port (in case of error messages)
 
            let port_info = comp_ctx.get_port_mut(port_handle);
 
            port_info.peer_comp_id = message.sender_comp_id;
 
            port_info.close_at_sync_end = true; // might be redundant (we might set it closed now)
 

	
 
            let port_info = comp_ctx.get_port(port_handle);
 
            let peer_comp_id = port_info.peer_comp_id;
 
            let peer_handle = comp_ctx.get_peer_handle(peer_comp_id);
 

	
 
            // One exception to sending an `Ack` is if we just closed the
 
            // port ourselves, meaning that the `ClosePort` messages got
 
            // sent to one another.
 
            if let Some(control_id) = control.has_close_port_entry(port_handle, comp_ctx) {
 
                // The two components (sender and this component) are closing
 
                // the channel at the same time.
 
                default_handle_ack(control, control_id, sched_ctx, comp_ctx);
 
            } else {
 
                // Respond to the message
 
                let last_instruction = port_info.last_instruction;
 
                let port_was_used = last_instruction != PortInstruction::None;
 
                default_send_ack(message.id, peer_handle, sched_ctx, comp_ctx);
 
                comp_ctx.remove_peer(sched_ctx, port_handle, peer_comp_id, false); // do not remove if closed
 
                comp_ctx.set_port_state(port_handle, PortState::Closed); // now set to closed
 

	
 
                // Make sure that we've not reached an error condition. Note
 
                // that if this condition is not met, then we don't error out
 
                // now, but we may error out in the next sync block when we
 
                // try to `put`/`get` on the port. This condition makes sure
 
                // that if we have a successful sync round, followed by the peer
 
                // closing the port, that we don't consider the sync round to
 
                // have failed by mistake.
 
                let error_due_to_port_use =  
 
                if content.closed_in_sync_round && exec_state.mode.is_in_sync_block() && port_was_used {
 
                    return Err((
 
                        last_instruction,
 
                        format!("Peer component (id:{}) shut down, so previous communication cannot have succeeded", peer_comp_id.0)
 
                    ));
 
                }
 
            }
 
        },
 
        ControlMessageContent::UnblockPort(port_id) => {
 
            // We were previously blocked (or already closed)
 
            let port_handle = comp_ctx.get_port_handle(port_id);
 
            let port_info = comp_ctx.get_port(port_handle);
 
            debug_assert_eq!(port_info.kind, PortKind::Putter);
 
            if port_info.state == PortState::BlockedDueToFullBuffers {
 
                default_handle_unblock_put(exec_state, consensus, port_handle, sched_ctx, comp_ctx);
 
            }
 
        },
 
        ControlMessageContent::PortPeerChangedBlock(port_id) => {
 
            // The peer of our port has just changed. So we are asked to
 
            // temporarily block the port (while our original recipient is
 
            // potentially rerouting some of the in-flight messages) and
 
            // Ack. Then we wait for the `unblock` call.
 
            debug_assert_eq!(message.target_port_id, Some(port_id));
 
            let port_handle = comp_ctx.get_port_handle(port_id);
 
            comp_ctx.set_port_state(port_handle, PortState::BlockedDueToPeerChange);
 

	
 
            let port_info = comp_ctx.get_port(port_handle);
 
            let peer_handle = comp_ctx.get_peer_handle(port_info.peer_comp_id);
 

	
 
            default_send_ack(message.id, peer_handle, sched_ctx, comp_ctx);
 
        },
 
        ControlMessageContent::PortPeerChangedUnblock(new_port_id, new_comp_id) => {
 
            let port_handle = comp_ctx.get_port_handle(message.target_port_id.unwrap());
 
            let port_info = comp_ctx.get_port(port_handle);
 
            debug_assert!(port_info.state == PortState::BlockedDueToPeerChange);
 
            let old_peer_id = port_info.peer_comp_id;
 

	
 
            comp_ctx.remove_peer(sched_ctx, port_handle, old_peer_id, false);
 

	
 
            let port_info = comp_ctx.get_port_mut(port_handle);
 
            port_info.peer_comp_id = new_comp_id;
 
            port_info.peer_port_id = new_port_id;
 
            comp_ctx.add_peer(port_handle, sched_ctx, new_comp_id, None);
 
            default_handle_unblock_put(exec_state, consensus, port_handle, sched_ctx, comp_ctx);
 
        }
 
    }
 

	
 
    return Ok(());
 
}
 

	
 
/// Handles a component initiating the exiting procedure, and closing all of its
 
/// ports. Should only be called once per component (which is ensured by
 
/// checking and modifying the mode in the execution state).
 
#[must_use]
 
pub(crate) fn default_handle_start_exit(
 
    exec_state: &mut CompExecState, control: &mut ControlLayer,
 
    sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx, consensus: &mut Consensus
 
) -> CompScheduling {
 
    debug_assert_eq!(exec_state.mode, CompMode::StartExit);
 
    sched_ctx.log("Component starting exit");
 
    exec_state.mode = CompMode::BusyExit;
 
    let exit_inside_sync = exec_state.exit_reason.is_in_sync();
 

	
 
    // If exiting while inside sync mode, report to the leader of the current
 
    // round that we've failed.
 
    if exit_inside_sync {
 
        let decision = consensus.notify_sync_end_failure(sched_ctx, comp_ctx);
 
        default_handle_sync_decision(sched_ctx, exec_state, decision, consensus);
 
    }
 

	
 
    // Iterating over ports by index to work around borrowing rules
 
    for port_index in 0..comp_ctx.num_ports() {
 
        let port = comp_ctx.get_port_by_index_mut(port_index);
 
        if port.state == PortState::Closed {
 
            // Already closed, or in the process of being closed
 
            continue;
 
        }
 

	
 
        // Mark as closed
 
        let port_id = port.self_id;
 
        port.state = PortState::Closed;
 

	
 
        // Notify peer of closing
 
        let port_handle = comp_ctx.get_port_handle(port_id);
 
        let (peer, message) = control.initiate_port_closing(port_handle, exit_inside_sync, comp_ctx);
 
        let peer_info = comp_ctx.get_peer(peer);
 
        peer_info.handle.send_message(&sched_ctx.runtime, Message::Control(message), true);
 
    }
 

	
 
    return CompScheduling::Immediate; // to check if we can shut down immediately
 
}
 

	
 
/// Handles a component waiting until all peers are notified that it is quitting
 
/// (i.e. after calling `default_handle_start_exit`).
 
#[must_use]
 
pub(crate) fn default_handle_busy_exit(
 
    exec_state: &mut CompExecState, control: &ControlLayer,
 
    sched_ctx: &SchedulerCtx
 
) -> CompScheduling {
 
    debug_assert_eq!(exec_state.mode, CompMode::BusyExit);
 
    if control.has_acks_remaining() {
 
        sched_ctx.log("Component busy exiting, still has `Ack`s remaining");
 
        return CompScheduling::Sleep;
 
    } else {
 
        sched_ctx.log("Component busy exiting, now shutting down");
 
        exec_state.mode = CompMode::Exit;
 
        return CompScheduling::Exit;
 
    }
 
}
 

	
 
/// Handles a potential synchronous round decision. If there was a decision then
 
/// the `Some(success)` value indicates whether the round succeeded or not.
 
/// Might also end up changing the `ExecState`.
 
///
 
/// Might be called in two cases:
 
/// 1. The component is in regular execution mode, at the end of a sync round,
 
///     and is waiting for a solution to the round.
 
/// 2. The component has encountered an error during a sync round and is
 
///     exiting, hence is waiting for a "Failure" message from the leader.
 
pub(crate) fn default_handle_sync_decision(
 
    sched_ctx: &SchedulerCtx, exec_state: &mut CompExecState,
 
    sched_ctx: &SchedulerCtx, exec_state: &mut CompExecState, comp_ctx: &mut CompCtx,
 
    decision: SyncRoundDecision, consensus: &mut Consensus
 
) -> Option<bool> {
 
    let success = match decision {
 
        SyncRoundDecision::None => return None,
 
        SyncRoundDecision::Solution => true,
 
        SyncRoundDecision::Failure => false,
 
    };
 

	
 
    debug_assert!(
 
        exec_state.mode == CompMode::SyncEnd || (
 
            exec_state.mode.is_busy_exiting() && exec_state.exit_reason.is_error()
 
        ) || (
 
            exec_state.mode.is_in_sync_block() && decision == SyncRoundDecision::Failure
 
        )
 
    );
 

	
 
    sched_ctx.log(&format!("Handling decision {:?} (in mode: {:?})", decision, exec_state.mode));
 
    consensus.notify_sync_decision(decision);
 
    if success {
 
        // We cannot get a success message if the component has encountered an
 
        // error.
 
        for port_index in 0..comp_ctx.num_ports() {
 
            let port_info = comp_ctx.get_port_by_index_mut(port_index);
 
            if port_info.close_at_sync_end {
 
                port_info.state = PortState::Closed;
 
            }
 
        }
 
        debug_assert_eq!(exec_state.mode, CompMode::SyncEnd);
 
        exec_state.mode = CompMode::NonSync;
 
        return Some(true);
 
    } else {
 
        // We may get failure both in all possible cases. But we should only
 
        // modify the execution state if we're not already in exit mode
 
        if !exec_state.mode.is_busy_exiting() {
 
            sched_ctx.error("failed synchronous round, initiating exit");
 
            exec_state.set_as_start_exit(ExitReason::ErrorNonSync);
 
        }
 
        return Some(false);
 
    }
 
}
 

	
 
/// Performs the default action of printing the provided error, and then putting
 
/// the component in the state where it will shut down. Only to be used for
 
/// builtin components: their error message construction is simpler (and more
 
/// common) as they don't have any source code.
 
pub(crate) fn default_handle_error_for_builtin(
 
    exec_state: &mut CompExecState, sched_ctx: &SchedulerCtx,
 
    location_and_message: (PortInstruction, String)
 
) {
 
    let (_location, message) = location_and_message;
 
    sched_ctx.error(&message);
 

	
 
    let exit_reason = if exec_state.mode.is_in_sync_block() {
 
        ExitReason::ErrorInSync
 
    } else {
 
        ExitReason::ErrorNonSync
 
    };
 

	
 
    exec_state.set_as_start_exit(exit_reason);
 
}
 

	
 
#[inline]
 
pub(crate) fn default_handle_exit(_exec_state: &CompExecState) -> CompScheduling {
 
    debug_assert_eq!(_exec_state.mode, CompMode::Exit);
 
    return CompScheduling::Exit;
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Internal messaging/state utilities
 
// -----------------------------------------------------------------------------
 

	
 
/// Handles an `Ack` for the control layer.
 
fn default_handle_ack(
 
    control: &mut ControlLayer, control_id: ControlId,
 
    sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx
 
) {
 
    // Since an `Ack` may cause another one, handle them in a loop
 
    let mut to_ack = control_id;
 
    loop {
 
        let (action, new_to_ack) = control.handle_ack(to_ack, sched_ctx, comp_ctx);
 
        match action {
 
            AckAction::SendMessage(target_comp, message) => {
 
                // FIX @NoDirectHandle
 
                let mut handle = sched_ctx.runtime.get_component_public(target_comp);
 
                handle.send_message(&sched_ctx.runtime, Message::Control(message), true);
 
                let _should_remove = handle.decrement_users();
 
                debug_assert!(_should_remove.is_none());
 
            },
 
            AckAction::ScheduleComponent(to_schedule) => {
 
                // FIX @NoDirectHandle
 
                let mut handle = sched_ctx.runtime.get_component_public(to_schedule);
 

	
 
                // Note that the component is intentionally not
 
                // sleeping, so we just wake it up
 
                debug_assert!(!handle.sleeping.load(std::sync::atomic::Ordering::Acquire));
 
                let key = unsafe { to_schedule.upgrade() };
 
                sched_ctx.runtime.enqueue_work(key);
 
                let _should_remove = handle.decrement_users();
 
                debug_assert!(_should_remove.is_none());
 
            },
 
            AckAction::None => {}
 
        }
 

	
 
        match new_to_ack {
 
            Some(new_to_ack) => to_ack = new_to_ack,
 
            None => break,
 
        }
 
    }
 
}
 

	
 
/// Little helper for sending the most common kind of `Ack`
 
fn default_send_ack(
 
    causer_of_ack_id: ControlId, peer_handle: LocalPeerHandle,
 
    sched_ctx: &SchedulerCtx, comp_ctx: &CompCtx
 
) {
 
    let peer_info = comp_ctx.get_peer(peer_handle);
 
    peer_info.handle.send_message(&sched_ctx.runtime, Message::Control(ControlMessage{
 
        id: causer_of_ack_id,
 
        sender_comp_id: comp_ctx.id,
 
        target_port_id: None,
 
        content: ControlMessageContent::Ack
 
    }), true);
 
}
src/runtime2/component/component_context.rs
Show inline comments
 
use crate::runtime2::scheduler::*;
 
use crate::runtime2::runtime::*;
 
use crate::runtime2::communication::*;
 

	
 
use crate::protocol::ExpressionId;
 

	
 
/// Helper struct to remember when the last operation on the port took place.
 
#[derive(Debug, PartialEq, Copy, Clone)]
 
pub enum PortInstruction {
 
    None,
 
    NoSource,
 
    SourceLocation(ExpressionId),
 
}
 

	
 
#[derive(Debug)]
 
pub struct Port {
 
    pub self_id: PortId,
 
    pub peer_comp_id: CompId, // eventually consistent
 
    pub peer_port_id: PortId, // eventually consistent
 
    pub kind: PortKind,
 
    pub state: PortState,
 
    pub last_instruction: PortInstruction,
 
    pub last_instruction: PortInstruction, // used during sync round in case port ends up being closed for error reporting
 
    pub close_at_sync_end: bool, // used during sync round when receiving a `ClosePort(not_in_sync_round)` message
 
    #[cfg(debug_assertions)] pub(crate) associated_with_peer: bool,
 
}
 

	
 
pub struct Peer {
 
    pub id: CompId,
 
    pub num_associated_ports: u32,
 
    pub(crate) handle: CompHandle,
 
}
 

	
 
/// Port and peer management structure. Will keep a local reference counter to
 
/// the ports associate with peers, additionally manages the atomic reference
 
/// counter associated with the peers' component handles.
 
pub struct CompCtx {
 
    pub id: CompId,
 
    ports: Vec<Port>,
 
    peers: Vec<Peer>,
 
    port_id_counter: u32,
 
}
 

	
 
#[derive(Copy, Clone, PartialEq, Eq)]
 
pub struct LocalPortHandle(PortId);
 

	
 
#[derive(Copy, Clone)]
 
pub struct LocalPeerHandle(CompId);
 

	
 
impl CompCtx {
 
    /// Creates a new component context based on a reserved entry in the
 
    /// component store. This reservation is used such that we already know our
 
    /// assigned ID.
 
    pub(crate) fn new(reservation: &CompReserved) -> Self {
 
        return Self{
 
            id: reservation.id(),
 
            ports: Vec::new(),
 
            peers: Vec::new(),
 
            port_id_counter: 0,
 
        }
 
    }
 

	
 
    /// Creates a new channel that is fully owned by the component associated
 
    /// with this context.
 
    pub(crate) fn create_channel(&mut self) -> Channel {
 
        let putter_id = PortId(self.take_port_id());
 
        let getter_id = PortId(self.take_port_id());
 
        self.ports.push(Port{
 
            self_id: putter_id,
 
            peer_port_id: getter_id,
 
            kind: PortKind::Putter,
 
            state: PortState::Open,
 
            peer_comp_id: self.id,
 
            last_instruction: PortInstruction::None,
 
            close_at_sync_end: false,
 
            #[cfg(debug_assertions)] associated_with_peer: false,
 
        });
 
        self.ports.push(Port{
 
            self_id: getter_id,
 
            peer_port_id: putter_id,
 
            kind: PortKind::Getter,
 
            state: PortState::Open,
 
            peer_comp_id: self.id,
 
            last_instruction: PortInstruction::None,
 
            close_at_sync_end: false,
 
            #[cfg(debug_assertions)] associated_with_peer: false,
 
        });
 

	
 
        return Channel{ putter_id, getter_id };
 
    }
 

	
 
    /// Adds a new port. Make sure to call `add_peer` afterwards.
 
    pub(crate) fn add_port(&mut self, peer_comp_id: CompId, peer_port_id: PortId, kind: PortKind, state: PortState) -> LocalPortHandle {
 
        let self_id = PortId(self.take_port_id());
 
        self.ports.push(Port{
 
            self_id, peer_comp_id, peer_port_id, kind, state,
 
            last_instruction: PortInstruction::None,
 
            close_at_sync_end: false,
 
            #[cfg(debug_assertions)] associated_with_peer: false,
 
        });
 
        return LocalPortHandle(self_id);
 
    }
 

	
 
    /// Removes a port. Make sure you called `remove_peer` first.
 
    pub(crate) fn remove_port(&mut self, port_handle: LocalPortHandle) -> Port {
 
        let port_index = self.must_get_port_index(port_handle);
 
        let port = self.ports.remove(port_index);
 
        dbg_code!(assert!(!port.associated_with_peer));
 
        return port;
 
    }
 

	
 
    /// Adds a new peer. This must be called for every port, no matter the
 
    /// component the channel is connected to. If a `CompHandle` is supplied,
 
    /// then it will be used to add the peer. Otherwise it will be retrieved
 
    /// from the runtime using its ID.
 
    pub(crate) fn add_peer(&mut self, port_handle: LocalPortHandle, sched_ctx: &SchedulerCtx, peer_comp_id: CompId, handle: Option<&CompHandle>) {
 
        let self_id = self.id;
 
        let port = self.get_port_mut(port_handle);
 
        debug_assert_eq!(port.peer_comp_id, peer_comp_id);
 
        dbg_code!(assert!(!port.associated_with_peer));
 
        if !Self::requires_peer_reference(port, self_id, false) {
 
            return;
 
        }
 

	
 
        dbg_code!(port.associated_with_peer = true);
 
        match self.get_peer_index_by_id(peer_comp_id) {
 
            Some(peer_index) => {
 
                let peer = &mut self.peers[peer_index];
 
                peer.num_associated_ports += 1;
 
            },
 
            None => {
 
                let handle = match handle {
 
                    Some(handle) => handle.clone(),
 
                    None => sched_ctx.runtime.get_component_public(peer_comp_id)
 
                };
 
                self.peers.push(Peer{
 
                    id: peer_comp_id,
 
                    num_associated_ports: 1,
 
                    handle,
 
                });
 
            }
 
        }
 
    }
 

	
 
    /// Removes a peer associated with a port.
 
    pub(crate) fn remove_peer(&mut self, sched_ctx: &SchedulerCtx, port_handle: LocalPortHandle, peer_id: CompId, also_remove_if_closed: bool) {
 
        let self_id = self.id;
 
        let port = self.get_port_mut(port_handle);
 
        debug_assert_eq!(port.peer_comp_id, peer_id);
 
        if !Self::requires_peer_reference(port, self_id, also_remove_if_closed) {
 
            return;
 
        }
 

	
 
        dbg_code!(assert!(port.associated_with_peer));
 
        dbg_code!(port.associated_with_peer = false);
 
        let peer_index = self.get_peer_index_by_id(peer_id).unwrap();
 
        let peer = &mut self.peers[peer_index];
 
        peer.num_associated_ports -= 1;
 
        if peer.num_associated_ports == 0 {
 
            let mut peer = self.peers.remove(peer_index);
 
            if let Some(key) = peer.handle.decrement_users() {
 
                debug_assert_ne!(key.downgrade(), self.id); // should be upheld by the code that shuts down a component
 
                sched_ctx.runtime.destroy_component(key);
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn set_port_state(&mut self, port_handle: LocalPortHandle, new_state: PortState) {
 
        let port_info = self.get_port_mut(port_handle);
 
        debug_assert_ne!(port_info.state, PortState::Closed); // because then we do not expect to change the state
 
        port_info.state = new_state;
 
    }
 

	
 
    pub(crate) fn get_port_handle(&self, port_id: PortId) -> LocalPortHandle {
 
        return LocalPortHandle(port_id);
 
    }
 

	
 
    // should perhaps be revised, used in main inbox
 
    pub(crate) fn get_port_index(&self, port_handle: LocalPortHandle) -> usize {
 
        return self.must_get_port_index(port_handle);
 
    }
 

	
 
    pub(crate) fn get_peer_handle(&self, peer_id: CompId) -> LocalPeerHandle {
 
        return LocalPeerHandle(peer_id);
 
    }
 

	
 
    pub(crate) fn get_port(&self, port_handle: LocalPortHandle) -> &Port {
 
        let index = self.must_get_port_index(port_handle);
 
        return &self.ports[index];
 
    }
 

	
 
    pub(crate) fn get_port_mut(&mut self, port_handle: LocalPortHandle) -> &mut Port {
 
        let index = self.must_get_port_index(port_handle);
 
        return &mut self.ports[index];
src/runtime2/component/component_pdl.rs
Show inline comments
 
@@ -225,367 +225,408 @@ pub(crate) struct CompPDL {
 
impl Component for CompPDL {
 
    fn on_creation(&mut self, _id: CompId, _sched_ctx: &SchedulerCtx) {
 
        // Intentionally empty
 
    }
 

	
 
    fn on_shutdown(&mut self, _sched_ctx: &SchedulerCtx) {
 
        // Intentionally empty
 
    }
 

	
 
    fn adopt_message(&mut self, comp_ctx: &mut CompCtx, message: DataMessage) {
 
        let port_handle = comp_ctx.get_port_handle(message.data_header.target_port);
 
        let port_index = comp_ctx.get_port_index(port_handle);
 
        if self.inbox_main[port_index].is_none() {
 
            self.inbox_main[port_index] = Some(message);
 
        } else {
 
            self.inbox_backup.push(message);
 
        }
 
    }
 

	
 
    fn handle_message(&mut self, sched_ctx: &mut SchedulerCtx, comp_ctx: &mut CompCtx, mut message: Message) {
 
        // sched_ctx.log(&format!("handling message: {:?}", message));
 
        if let Some(new_target) = self.control.should_reroute(&mut message) {
 
            let mut target = sched_ctx.runtime.get_component_public(new_target); // TODO: @NoDirectHandle
 
            target.send_message(&sched_ctx.runtime, message, false); // not waking up: we schedule once we've received all PortPeerChanged Acks
 
            let _should_remove = target.decrement_users();
 
            debug_assert!(_should_remove.is_none());
 
            return;
 
        }
 

	
 
        match message {
 
            Message::Data(message) => {
 
                self.handle_incoming_data_message(sched_ctx, comp_ctx, message);
 
            },
 
            Message::Control(message) => {
 
                if let Err(location_and_message) = component::default_handle_control_message(
 
                    &mut self.exec_state, &mut self.control, &mut self.consensus,
 
                    message, sched_ctx, comp_ctx
 
                ) {
 
                    self.handle_generic_component_error(sched_ctx, location_and_message);
 
                }
 
            },
 
            Message::Sync(message) => {
 
                self.handle_incoming_sync_message(sched_ctx, comp_ctx, message);
 
            },
 
            Message::Poll => {
 
                unreachable!(); // because we never register at the polling thread
 
            }
 
        }
 
    }
 

	
 
    fn run(&mut self, sched_ctx: &mut SchedulerCtx, comp_ctx: &mut CompCtx) -> CompScheduling {
 
        use EvalContinuation as EC;
 

	
 
        sched_ctx.log(&format!("Running component (mode: {:?})", self.exec_state.mode));
 

	
 
        // Depending on the mode don't do anything at all, take some special
 
        // actions, or fall through and run the PDL code.
 
        match self.exec_state.mode {
 
            CompMode::NonSync | CompMode::Sync => {
 
                // continue and run PDL code
 
            },
 
            CompMode::SyncEnd | CompMode::BlockedGet | CompMode::BlockedPut | CompMode::BlockedSelect => {
 
                return CompScheduling::Sleep;
 
            }
 
            CompMode::StartExit => return component::default_handle_start_exit(
 
                &mut self.exec_state, &mut self.control, sched_ctx, comp_ctx, &mut self.consensus
 
            ),
 
            CompMode::BusyExit => return component::default_handle_busy_exit(
 
                &mut self.exec_state, &self.control, sched_ctx
 
            ),
 
            CompMode::Exit => return component::default_handle_exit(&self.exec_state),
 
        }
 

	
 
        let run_result = self.execute_prompt(&sched_ctx);
 
        if let Err(error) = run_result {
 
            self.handle_component_error(sched_ctx, CompError::Executor(error));
 
            return CompScheduling::Immediate;
 
        }
 

	
 
        let run_result = run_result.unwrap();
 

	
 
        match run_result {
 
            EC::Stepping => unreachable!(), // execute_prompt runs until this is no longer returned
 
            EC::BranchInconsistent | EC::NewFork | EC::BlockFires(_) => todo!("remove these"),
 
            // Results that can be returned in sync mode
 
            EC::SyncBlockEnd => {
 
                debug_assert_eq!(self.exec_state.mode, CompMode::Sync);
 
                self.handle_sync_end(sched_ctx, comp_ctx);
 
                return CompScheduling::Immediate;
 
            },
 
            EC::BlockGet(expr_id, port_id) => {
 
                debug_assert_eq!(self.exec_state.mode, CompMode::Sync);
 
                debug_assert!(self.exec_ctx.stmt.is_none());
 

	
 
                let port_id = port_id_from_eval(port_id);
 
                let port_handle = comp_ctx.get_port_handle(port_id);
 
                comp_ctx.get_port_mut(port_handle).last_instruction = PortInstruction::SourceLocation(expr_id);
 

	
 
                let port_info = comp_ctx.get_port_mut(port_handle);
 
                port_info.last_instruction = PortInstruction::SourceLocation(expr_id);
 
                let port_is_closed = port_info.state == PortState::Closed;
 

	
 
                let port_index = comp_ctx.get_port_index(port_handle);
 
                if let Some(message) = &self.inbox_main[port_index] {
 
                    // Check if we can actually receive the message
 
                    if self.consensus.try_receive_data_message(sched_ctx, comp_ctx, message) {
 
                        // Message was received. Make sure any blocked peers and
 
                        // pending messages are handled.
 
                        let message = self.inbox_main[port_index].take().unwrap();
 
                        let receive_result = component::default_handle_received_data_message(
 
                            port_id, PortInstruction::SourceLocation(expr_id),
 
                            &mut self.inbox_main[port_index], &mut self.inbox_backup,
 
                            comp_ctx, sched_ctx, &mut self.control
 
                        );
 
                        if let Err(location_and_message) = receive_result {
 
                            self.handle_generic_component_error(sched_ctx, location_and_message);
 
                            return CompScheduling::Immediate
 
                        } else {
 
                            self.exec_ctx.stmt = ExecStmt::PerformedGet(message.content);
 
                            return CompScheduling::Immediate;
 
                        }
 
                    } else {
 
                        todo!("handle sync failure due to message deadlock");
 
                        let protocol = &sched_ctx.runtime.protocol;
 
                        self.handle_component_error(sched_ctx, CompError::Executor(EvalError::new_error_at_expr(
 
                            &self.prompt, &protocol.modules, &protocol.heap, expr_id,
 
                            String::from("Cannot get from this port, as this causes a deadlock. This happens if you `get` in a different order as another component `put`s")
 
                        ));
 
                        return CompScheduling::Sleep;
 
                    }
 
                } else if port_is_closed {
 
                    // No messages, but getting makes no sense as the port is
 
                    // closed.
 
                    let peer_id = comp_ctx.get_port(port_handle).peer_comp_id;
 
                    let protocol = &sched_ctx.runtime.protocol;
 
                    self.handle_component_error(sched_ctx, CompError::Executor(EvalError::new_error_at_expr(
 
                        &self.prompt, &protocol.modules, &protocol.heap, expr_id,
 
                        format!("Cannot get from this port, as the peer component (id:{}) shut down", peer_id.0)
 
                    )));
 
                    return CompScheduling::Immediate;
 
                } else {
 
                    // We need to wait
 
                    self.exec_state.set_as_blocked_get(port_id);
 
                    return CompScheduling::Sleep;
 
                }
 
            },
 
            EC::Put(expr_id, port_id, value) => {
 
                debug_assert_eq!(self.exec_state.mode, CompMode::Sync);
 
                sched_ctx.log(&format!("Putting value {:?}", value));
 

	
 
                // Send the message
 
                let target_port_id = port_id_from_eval(port_id);
 
                let send_result = component::default_send_data_message(
 
                    &mut self.exec_state, target_port_id,
 
                    PortInstruction::SourceLocation(expr_id), value,
 
                    sched_ctx, &mut self.consensus, comp_ctx
 
                );
 
                if let Err(location_and_message) = send_result {
 
                    self.handle_generic_component_error(sched_ctx, location_and_message);
 
                    return CompScheduling::Immediate;
 
                } else {
 
                    // When `run` is called again (potentially after becoming
 
                    // unblocked) we need to instruct the executor that we performed
 
                    // the `put`
 
                    let scheduling = send_result.unwrap();
 
                    self.exec_ctx.stmt = ExecStmt::PerformedPut;
 
                    return scheduling;
 
                }
 
            },
 
            EC::SelectStart(num_cases, _num_ports) => {
 
                debug_assert_eq!(self.exec_state.mode, CompMode::Sync);
 
                self.select_state.handle_select_start(num_cases);
 
                return CompScheduling::Requeue;
 
            },
 
            EC::SelectRegisterPort(case_index, port_index, port_id) => {
 
            EC::SelectRegisterPort(expr_id, case_index, port_index, port_id) => {
 
                debug_assert_eq!(self.exec_state.mode, CompMode::Sync);
 
                let port_id = port_id_from_eval(port_id);
 
                let port_handle = comp_ctx.get_port_handle(port_id);
 

	
 
                // Note: we register the "last_instruction" here already. This
 
                // way if we get a `ClosePort` message, the condition to fail
 
                // the synchronous round is satisfied.
 
                let port_info = comp_ctx.get_port_mut(port_handle);
 
                port_info.last_instruction = PortInstruction::SourceLocation(expr_id);
 
                let port_is_closed = port_info.state == PortState::Closed;
 

	
 
                // Register port as part of select guard
 
                if let Err(_err) = self.select_state.register_select_case_port(comp_ctx, case_index, port_index, port_id) {
 
                    todo!("handle registering a port multiple times");
 
                    // Failure occurs if a port is used twice in the same guard
 
                    let protocol = &sched_ctx.runtime.protocol;
 
                    self.handle_component_error(sched_ctx, CompError::Executor(EvalError::new_error_at_expr(
 
                        &self.prompt, &protocol.modules, &protocol.heap, expr_id,
 
                        String::from("Cannot have the one port appear in the same guard twice")
 
                    )));
 
                } else if port_is_closed {
 
                    // Port is closed
 
                    let peer_id = comp_ctx.get_port(port_handle).peer_comp_id;
 
                    let protocol = &sched_ctx.runtime.protocol;
 
                    self.handle_component_error(sched_ctx, CompError::Executor(EvalError::new_error_at_expr(
 
                        &self.prompt, &protocol.modules, &protocol.heap, expr_id,
 
                        format!("Cannot register port, as the peer component (id:{}) has shut down", peer_id.0)
 
                    )));
 
                }
 

	
 
                return CompScheduling::Immediate;
 
            },
 
            EC::SelectWait => {
 
                debug_assert_eq!(self.exec_state.mode, CompMode::Sync);
 
                let select_decision = self.select_state.handle_select_waiting_point(&self.inbox_main, comp_ctx);
 
                if let SelectDecision::Case(case_index) = select_decision {
 
                    // Reached a conclusion, so we can continue immediately
 
                    self.exec_ctx.stmt = ExecStmt::PerformedSelectWait(case_index);
 
                    self.exec_state.mode = CompMode::Sync;
 
                    return CompScheduling::Immediate;
 
                } else {
 
                    // No decision yet
 
                    self.exec_state.mode = CompMode::BlockedSelect;
 
                    return CompScheduling::Sleep;
 
                }
 
            },
 
            // Results that can be returned outside of sync mode
 
            EC::ComponentTerminated => {
 
                self.exec_state.set_as_start_exit(ExitReason::Termination);
 
                return CompScheduling::Immediate;
 
            },
 
            EC::SyncBlockStart => {
 
                debug_assert_eq!(self.exec_state.mode, CompMode::NonSync);
 
                self.handle_sync_start(sched_ctx, comp_ctx);
 
                return CompScheduling::Immediate;
 
            },
 
            EC::NewComponent(definition_id, type_id, arguments) => {
 
                debug_assert_eq!(self.exec_state.mode, CompMode::NonSync);
 
                self.create_component_and_transfer_ports(
 
                    sched_ctx, comp_ctx,
 
                    definition_id, type_id, arguments
 
                );
 
                return CompScheduling::Requeue;
 
            },
 
            EC::NewChannel => {
 
                debug_assert_eq!(self.exec_state.mode, CompMode::NonSync);
 
                debug_assert!(self.exec_ctx.stmt.is_none());
 
                let channel = comp_ctx.create_channel();
 
                self.exec_ctx.stmt = ExecStmt::CreatedChannel((
 
                    Value::Output(port_id_to_eval(channel.putter_id)),
 
                    Value::Input(port_id_to_eval(channel.getter_id))
 
                ));
 
                self.inbox_main.push(None);
 
                self.inbox_main.push(None);
 
                return CompScheduling::Immediate;
 
            }
 
        }
 
    }
 
}
 

	
 
impl CompPDL {
 
    pub(crate) fn new(initial_state: Prompt, num_ports: usize) -> Self {
 
        let mut inbox_main = Vec::new();
 
        inbox_main.reserve(num_ports);
 
        for _ in 0..num_ports {
 
            inbox_main.push(None);
 
        }
 

	
 
        return Self{
 
            exec_state: CompExecState::new(),
 
            select_state: SelectState::new(),
 
            prompt: initial_state,
 
            control: ControlLayer::default(),
 
            consensus: Consensus::new(),
 
            sync_counter: 0,
 
            exec_ctx: ExecCtx{
 
                stmt: ExecStmt::None,
 
            },
 
            inbox_main,
 
            inbox_backup: Vec::new(),
 
        }
 
    }
 

	
 
    // -------------------------------------------------------------------------
 
    // Running component and handling changes in global component state
 
    // -------------------------------------------------------------------------
 

	
 
    fn execute_prompt(&mut self, sched_ctx: &SchedulerCtx) -> EvalResult {
 
        let mut step_result = EvalContinuation::Stepping;
 
        while let EvalContinuation::Stepping = step_result {
 
            step_result = self.prompt.step(
 
                &sched_ctx.runtime.protocol.types, &sched_ctx.runtime.protocol.heap,
 
                &sched_ctx.runtime.protocol.modules, &mut self.exec_ctx,
 
            )?;
 
        }
 

	
 
        return Ok(step_result)
 
    }
 

	
 
    fn handle_sync_start(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx) {
 
        sched_ctx.log("Component starting sync mode");
 
        self.consensus.notify_sync_start(comp_ctx);
 
        for message in self.inbox_main.iter() {
 
            if let Some(message) = message {
 
                self.consensus.handle_incoming_data_message(comp_ctx, message);
 
            }
 
        }
 
        debug_assert_eq!(self.exec_state.mode, CompMode::NonSync);
 
        self.exec_state.mode = CompMode::Sync;
 
    }
 

	
 
    /// Handles end of sync. The conclusion to the sync round might arise
 
    /// immediately (and be handled immediately), or might come later through
 
    /// messaging. In any case the component should be scheduled again
 
    /// immediately
 
    fn handle_sync_end(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx) {
 
        sched_ctx.log("Component ending sync mode (now waiting for solution)");
 
        let decision = self.consensus.notify_sync_end_success(sched_ctx, comp_ctx);
 
        self.exec_state.mode = CompMode::SyncEnd;
 
        component::default_handle_sync_decision(sched_ctx, &mut self.exec_state, decision, &mut self.consensus);
 
        component::default_handle_sync_decision(sched_ctx, &mut self.exec_state, comp_ctx, decision, &mut self.consensus);
 
    }
 

	
 
    fn handle_component_exit(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx) {
 
        sched_ctx.log(&format!("Component exiting (reason: {:?}", self.exec_state.exit_reason));
 
        debug_assert_eq!(self.exec_state.mode, CompMode::StartExit);
 
        self.exec_state.mode = CompMode::BusyExit;
 
        let exit_inside_sync = self.exec_state.exit_reason.is_in_sync();
 

	
 
        // Doing this by index, then retrieving the handle is a bit rediculous,
 
        // but Rust is being Rust with its borrowing rules.
 
        for port_index in 0..comp_ctx.num_ports() {
 
            let port = comp_ctx.get_port_by_index_mut(port_index);
 
            if port.state == PortState::Closed {
 
                // Already closed, or in the process of being closed
 
                continue;
 
            }
 

	
 
            // Mark as closed
 
            let port_id = port.self_id;
 
            port.state = PortState::Closed;
 

	
 
            // Notify peer of closing
 
            let port_handle = comp_ctx.get_port_handle(port_id);
 
            let (peer, message) = self.control.initiate_port_closing(port_handle, exit_inside_sync, comp_ctx);
 
            let peer_info = comp_ctx.get_peer(peer);
 
            peer_info.handle.send_message(&sched_ctx.runtime, Message::Control(message), true);
 
        }
 
    }
 

	
 
    // -------------------------------------------------------------------------
 
    // Handling messages
 
    // -------------------------------------------------------------------------
 

	
 
    /// Handles a message that came in through the public inbox. This function
 
    /// will handle putting it in the correct place, and potentially blocking
 
    /// the port in case too many messages are being received.
 
    fn handle_incoming_data_message(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx, message: DataMessage) {
 
        use component::IncomingData;
 

	
 
        // Whatever we do, glean information from headers in message
 
        if self.exec_state.mode.is_in_sync_block() {
 
            self.consensus.handle_incoming_data_message(comp_ctx, &message);
 
        }
 

	
 
        let port_handle = comp_ctx.get_port_handle(message.data_header.target_port);
 
        let port_index = comp_ctx.get_port_index(port_handle);
 
        match component::default_handle_incoming_data_message(
 
            &mut self.exec_state, &mut self.inbox_main[port_index], comp_ctx, message,
 
            sched_ctx, &mut self.control
 
        ) {
 
            IncomingData::PlacedInSlot => {
 
                if self.exec_state.mode == CompMode::BlockedSelect {
 
                    let select_decision = self.select_state.handle_updated_inbox(&self.inbox_main, comp_ctx);
 
                    if let SelectDecision::Case(case_index) = select_decision {
 
                        self.exec_ctx.stmt = ExecStmt::PerformedSelectWait(case_index);
 
                        self.exec_state.mode = CompMode::Sync;
 
                    }
 
                }
 
            },
 
            IncomingData::SlotFull(message) => {
 
                self.inbox_backup.push(message);
 
            }
 
        }
 
    }
 

	
 
    fn handle_incoming_sync_message(&mut self, sched_ctx: &SchedulerCtx, comp_ctx: &mut CompCtx, message: SyncMessage) {
 
        let decision = self.consensus.receive_sync_message(sched_ctx, comp_ctx, message);
 
        component::default_handle_sync_decision(sched_ctx, &mut self.exec_state, decision, &mut self.consensus);
 
    }
 

	
 
    /// Handles an error coming from the generic `component::handle_xxx`
 
    /// functions. Hence accepts argument as a tuple.
 
    fn handle_generic_component_error(&mut self, sched_ctx: &SchedulerCtx, location_and_message: (PortInstruction, String)) {
 
        // Retrieve location and message, display in terminal
 
        let (location, message) = location_and_message;
 
        let error = match location {
 
            PortInstruction::None => CompError::Component(message),
 
            PortInstruction::NoSource => unreachable!(), // for debugging: all in-sync errors are associated with a source location
 
            PortInstruction::SourceLocation(expression_id) => {
 
                let protocol = &sched_ctx.runtime.protocol;
 
                CompError::Executor(EvalError::new_error_at_expr(
 
                    &self.prompt, &protocol.modules, &protocol.heap,
 
                    expression_id, message
 
                ))
 
            }
 
        };
 

	
 
        self.handle_component_error(sched_ctx, error);
 
    }
 

	
 
    fn handle_component_error(&mut self, sched_ctx: &SchedulerCtx, error: CompError) {
 
        sched_ctx.error(&format!("{}", error));
 

	
 
        // Set state to handle subsequent error
 
        let exit_reason = if self.exec_state.mode.is_in_sync_block() {
 
            ExitReason::ErrorInSync
0 comments (0 inline, 0 general)