Changeset - 979f0c33417c
[Not reviewed]
0 12 0
MH - 3 years ago 2022-02-24 18:21:06
contact@maxhenger.nl
WIP: Use monomorph index lookup, prepare builtin procedures
12 files changed with 183 insertions and 136 deletions:
0 comments (0 inline, 0 general)
src/collections/string_pool.rs
Show inline comments
 
@@ -126,97 +126,97 @@ impl StringPool {
 
        // Must fit now, compute hash and put in buffer
 
        debug_assert!(data_len <= last.remaining);
 
        let range_start = last.data.len();
 
        last.data.extend_from_slice(data);
 
        last.remaining -= data_len;
 
        debug_assert_eq!(range_start + data_len, last.data.len());
 

	
 
        unsafe {
 
            let start = last.data.as_ptr().offset(range_start as isize);
 
            StringRef{ data: start, length: data_len, _phantom: PhantomData }
 
        }
 
    }
 

	
 
    fn alloc_new_slab(&mut self) {
 
        let new_slab = Box::new(StringPoolSlab::new(self.last));
 
        let new_slab = Box::into_raw(new_slab);
 
        self.last = new_slab;
 
    }
 
}
 

	
 
impl Drop for StringPool {
 
    fn drop(&mut self) {
 
        let mut new_slab = self.last;
 
        while !new_slab.is_null() {
 
            let cur_slab = new_slab;
 
            unsafe {
 
                new_slab = (*cur_slab).prev;
 
                Box::from_raw(cur_slab); // consume and deallocate
 
            }
 
        }
 
    }
 
}
 

	
 
// String pool cannot be cloned, and the created `StringRef` instances remain
 
// allocated until the end of the program, so it is always safe to send. It is
 
// also sync in the sense that it becomes an immutable thing after compilation,
 
// but lets not derive that if we would ever become a multithreaded compiler in
 
// the future.
 
unsafe impl Send for StringPool {}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use super::*;
 

	
 
    #[test]
 
    fn display_empty_string_ref() {
 
        // Makes sure that null pointer inside StringRef will not cause issues
 
        let v = StringRef::new_empty();
 
        let val = format!("{}{:?}", v, v);
 
        let _val = format!("{}{:?}", v, v); // calls Format and Debug on StringRef
 
    }
 

	
 
    #[test]
 
    fn test_string_just_fits() {
 
        let large = "0".repeat(SLAB_SIZE);
 
        let mut pool = StringPool::new();
 
        let interned = pool.intern(large.as_bytes());
 
        assert_eq!(interned.as_str(), large);
 
    }
 

	
 
    #[test]
 
    #[should_panic]
 
    fn test_string_too_large() {
 
        let large = "0".repeat(SLAB_SIZE + 1);
 
        let mut pool = StringPool::new();
 
        let _interned = pool.intern(large.as_bytes());
 
    }
 

	
 
    #[test]
 
    fn test_lots_of_small_allocations() {
 
        const NUM_PER_SLAB: usize = 32;
 
        const NUM_SLABS: usize = 4;
 

	
 
        let to_intern = "0".repeat(SLAB_SIZE / NUM_PER_SLAB);
 
        let mut pool = StringPool::new();
 

	
 
        let mut last_slab = pool.last;
 
        let mut all_refs = Vec::new();
 

	
 
        // Fill up first slab
 
        for _alloc_idx in 0..NUM_PER_SLAB {
 
            let interned = pool.intern(to_intern.as_bytes());
 
            all_refs.push(interned);
 
            assert!(std::ptr::eq(last_slab, pool.last));
 
        }
 

	
 
        for _slab_idx in 0..NUM_SLABS-1 {
 
            for alloc_idx in 0..NUM_PER_SLAB {
 
                let interned = pool.intern(to_intern.as_bytes());
 
                all_refs.push(interned);
 

	
 
                if alloc_idx == 0 {
 
                    // First allocation produces a new slab
 
                    assert!(!std::ptr::eq(last_slab, pool.last));
 
                    last_slab = pool.last;
 
                } else {
 
                    assert!(std::ptr::eq(last_slab, pool.last));
 
                }
src/protocol/ast.rs
Show inline comments
 
@@ -762,97 +762,97 @@ impl Scope {
 
            this: id,
 
            parent: None,
 
            nested: Vec::new(),
 
            association,
 
            variables: Vec::new(),
 
            labels: Vec::new(),
 
            relative_pos_in_parent: -1,
 
            first_unique_id_in_scope: -1,
 
            next_unique_id_in_scope: -1,
 
        }
 
    }
 
}
 

	
 
impl Scope {
 
    pub(crate) fn new_invalid(this: ScopeId) -> Self {
 
        return Self{
 
            this,
 
            parent: None,
 
            nested: Vec::new(),
 
            association: ScopeAssociation::Definition(DefinitionId::new_invalid()),
 
            variables: Vec::new(),
 
            labels: Vec::new(),
 
            relative_pos_in_parent: -1,
 
            first_unique_id_in_scope: -1,
 
            next_unique_id_in_scope: -1,
 
        };
 
    }
 
}
 

	
 
#[derive(Debug, Clone, PartialEq, Eq)]
 
pub enum VariableKind {
 
    Parameter,      // in parameter list of function/component
 
    Local,          // declared in function/component body
 
    Binding,        // may be bound to in a binding expression (determined in validator/linker)
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct Variable {
 
    pub this: VariableId,
 
    // Parsing
 
    pub kind: VariableKind,
 
    pub parser_type: ParserType,
 
    pub identifier: Identifier,
 
    // Validator/linker
 
    pub relative_pos_in_parent: i32,
 
    pub unique_id_in_scope: i32, // Temporary fix until proper bytecode/asm is generated
 
}
 

	
 
#[derive(Debug, Clone)]
 
#[derive(Debug)]
 
pub enum Definition {
 
    Struct(StructDefinition),
 
    Enum(EnumDefinition),
 
    Union(UnionDefinition),
 
    Procedure(ProcedureDefinition),
 
}
 

	
 
impl Definition {
 
    pub fn is_struct(&self) -> bool {
 
        match self {
 
            Definition::Struct(_) => true,
 
            _ => false
 
        }
 
    }
 
    pub(crate) fn as_struct(&self) -> &StructDefinition {
 
        match self {
 
            Definition::Struct(result) => result,
 
            _ => panic!("Unable to cast 'Definition' to 'StructDefinition'"),
 
        }
 
    }
 
    pub(crate) fn as_struct_mut(&mut self) -> &mut StructDefinition {
 
        match self {
 
            Definition::Struct(result) => result,
 
            _ => panic!("Unable to cast 'Definition' to 'StructDefinition'"),
 
        }
 
    }
 
    pub fn is_enum(&self) -> bool {
 
        match self {
 
            Definition::Enum(_) => true,
 
            _ => false,
 
        }
 
    }
 
    pub(crate) fn as_enum(&self) -> &EnumDefinition {
 
        match self {
 
            Definition::Enum(result) => result,
 
            _ => panic!("Unable to cast 'Definition' to 'EnumDefinition'"),
 
        }
 
    }
 
    pub(crate) fn as_enum_mut(&mut self) -> &mut EnumDefinition {
 
        match self {
 
            Definition::Enum(result) => result,
 
            _ => panic!("Unable to cast 'Definition' to 'EnumDefinition'"),
 
        }
 
    }
 
    pub fn is_union(&self) -> bool {
 
        match self {
 
            Definition::Union(_) => true,
 
            _ => false,
 
@@ -973,188 +973,187 @@ pub struct EnumDefinition {
 
}
 

	
 
impl EnumDefinition {
 
    pub(crate) fn new_empty(
 
        this: EnumDefinitionId, defined_in: RootId, span: InputSpan,
 
        identifier: Identifier, poly_vars: Vec<Identifier>
 
    ) -> Self {
 
        Self{ this, defined_in, span, identifier, poly_vars, variants: Vec::new() }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct UnionVariantDefinition {
 
    pub span: InputSpan,
 
    pub identifier: Identifier,
 
    pub value: Vec<ParserType>, // if empty, then union variant does not contain any embedded types
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct UnionDefinition {
 
    pub this: UnionDefinitionId,
 
    pub defined_in: RootId,
 
    // Phase 1: symbol scanning
 
    pub span: InputSpan,
 
    pub identifier: Identifier,
 
    pub poly_vars: Vec<Identifier>,
 
    // Phase 2: parsing
 
    pub variants: Vec<UnionVariantDefinition>,
 
}
 

	
 
impl UnionDefinition {
 
    pub(crate) fn new_empty(
 
        this: UnionDefinitionId, defined_in: RootId, span: InputSpan,
 
        identifier: Identifier, poly_vars: Vec<Identifier>
 
    ) -> Self {
 
        Self{ this, defined_in, span, identifier, poly_vars, variants: Vec::new() }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
 
pub enum ProcedureKind {
 
    Function, // with return type
 
    Primitive, // without return type
 
    Composite,
 
}
 

	
 
/// Monomorphed instantiation of a procedure (or the sole instantiation of a
 
/// non-polymorphic procedure).
 
#[derive(Debug)]
 
pub struct ProcedureDefinitionMonomorph {
 
    pub argument_types: Vec<TypeId>,
 
    pub expr_info: Vec<ExpressionInfo>
 
}
 

	
 
impl ProcedureDefinitionMonomorph {
 
    pub(crate) fn new_invalid() -> Self {
 
        return Self{
 
            argument_types: Vec::new(),
 
            expr_info: Vec::new(),
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, Copy)]
 
pub struct ExpressionInfo {
 
    pub type_id: TypeId,
 
    pub variant: ExpressionInfoVariant,
 
}
 

	
 
impl ExpressionInfo {
 
    pub(crate) fn new_invalid() -> Self {
 
        return Self{
 
            type_id: TypeId::new_invalid(),
 
            variant: ExpressionInfoVariant::Generic,
 
        }
 
    }
 
}
 

	
 
#[derive(Clone, Copy)]
 
#[derive(Debug, Clone, Copy)]
 
pub enum ExpressionInfoVariant {
 
    Generic,
 
    Procedure(TypeId, u32), // procedure TypeID and its monomorph index
 
    Select(i32), // index
 
}
 

	
 
impl ExpressionInfoVariant {
 
    pub(crate) fn as_select(&self) -> i32 {
 
        match self {
 
            ExpressionInfoVariant::Select(v) => *v,
 
            _ => unreachable!(),
 
        }
 
    }
 

	
 
    pub(crate) fn as_procedure(&self) -> (TypeId, u32) {
 
        match self {
 
            ExpressionInfoVariant::Procedure(type_id, monomorph_index) => (*type_id, *monomorph_index),
 
            _ => unreachable!(),
 
        }
 
    }
 
}
 

	
 
/// Generic storage for functions, primitive components and composite
 
/// components.
 
// Note that we will have function definitions for builtin functions as well. In
 
// that case the span, the identifier span and the body are all invalid.
 
#[derive(Debug, Clone)]
 
#[derive(Debug)]
 
pub struct ProcedureDefinition {
 
    pub this: ProcedureDefinitionId,
 
    pub defined_in: RootId,
 
    // Symbol scanning
 
    pub builtin: bool,
 
    pub kind: ProcedureKind,
 
    pub span: InputSpan,
 
    pub identifier: Identifier,
 
    pub poly_vars: Vec<Identifier>,
 
    // Parser
 
    pub return_type: Option<ParserType>, // present on functions, not components
 
    pub parameters: Vec<VariableId>,
 
    pub scope: ScopeId,
 
    pub body: BlockStatementId,
 
    // Monomorphization of typed procedures
 
    pub monomorphs: Vec<ProcedureDefinitionMonomorph>,
 
    // Validation/linking
 
    pub num_expressions_in_body: i32,
 
}
 

	
 
impl ProcedureDefinition {
 
    pub(crate) fn new_empty(
 
        this: ProcedureDefinitionId, defined_in: RootId, span: InputSpan,
 
        kind: ProcedureKind, identifier: Identifier, poly_vars: Vec<Identifier>
 
    ) -> Self {
 
        Self {
 
            this, defined_in,
 
            builtin: false,
 
            span,
 
            kind, identifier, poly_vars,
 
            return_type: None,
 
            parameters: Vec::new(),
 
            scope: ScopeId::new_invalid(),
 
            body: BlockStatementId::new_invalid(),
 
            monomorphs: Vec::new(),
 
            num_expressions_in_body: -1,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub enum Statement {
 
    Block(BlockStatement),
 
    EndBlock(EndBlockStatement),
 
    Local(LocalStatement),
 
    Labeled(LabeledStatement),
 
    If(IfStatement),
 
    EndIf(EndIfStatement),
 
    While(WhileStatement),
 
    EndWhile(EndWhileStatement),
 
    Break(BreakStatement),
 
    Continue(ContinueStatement),
 
    Synchronous(SynchronousStatement),
 
    EndSynchronous(EndSynchronousStatement),
 
    Fork(ForkStatement),
 
    EndFork(EndForkStatement),
 
    Select(SelectStatement),
 
    EndSelect(EndSelectStatement),
 
    Return(ReturnStatement),
 
    Goto(GotoStatement),
 
    New(NewStatement),
 
    Expression(ExpressionStatement),
 
}
 

	
 
impl Statement {
 
    pub fn as_new(&self) -> &NewStatement {
 
        match self {
 
            Statement::New(result) => result,
 
            _ => panic!("Unable to cast `Statement` to `NewStatement`"),
 
        }
 
    }
 

	
 
    pub fn span(&self) -> InputSpan {
 
        match self {
 
            Statement::Block(v) => v.span,
 
            Statement::Local(v) => v.span(),
 
            Statement::Labeled(v) => v.label.span,
 
            Statement::If(v) => v.span,
 
            Statement::While(v) => v.span,
 
            Statement::Break(v) => v.span,
 
            Statement::Continue(v) => v.span,
 
            Statement::Synchronous(v) => v.span,
 
            Statement::Fork(v) => v.span,
 
            Statement::Select(v) => v.span,
src/protocol/eval/executor.rs
Show inline comments
 

	
 
use std::collections::VecDeque;
 

	
 
use super::value::*;
 
use super::store::*;
 
use super::error::*;
 
use crate::protocol::*;
 
use crate::protocol::ast::*;
 
use crate::protocol::type_table::*;
 

	
 
macro_rules! debug_enabled { () => { false }; }
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(false, "exec", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(false, "exec", $format, $($args),*);
 
    };
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) enum ExprInstruction {
 
    EvalExpr(ExpressionId),
 
    PushValToFront,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct Frame {
 
    pub(crate) definition: ProcedureDefinitionId,
 
    pub(crate) monomorph_type_id: TypeId,
 
    pub(crate) monomorph_index: usize,
 
    pub(crate) position: StatementId,
 
    pub(crate) expr_stack: VecDeque<ExprInstruction>, // hack for expression evaluation, evaluated by popping from back
 
    pub(crate) expr_values: VecDeque<Value>, // hack for expression results, evaluated by popping from front/back
 
    pub(crate) max_stack_size: u32,
 
}
 

	
 
impl Frame {
 
    /// Creates a new execution frame. Does not modify the stack in any way.
 
    pub fn new(heap: &Heap, definition_id: ProcedureDefinitionId, monomorph_type_id: TypeId) -> Self {
 
    pub fn new(heap: &Heap, definition_id: ProcedureDefinitionId, monomorph_type_id: TypeId, monomorph_index: u32) -> Self {
 
        let definition = &heap[definition_id];
 
        let outer_scope_id = definition.scope;
 
        let first_statement_id = definition.body;
 

	
 
        // Another not-so-pretty thing that has to be replaced somewhere in the
 
        // future...
 
        fn determine_max_stack_size(heap: &Heap, scope_id: ScopeId, max_size: &mut u32) {
 
            let scope = &heap[scope_id];
 

	
 
            // Check current block
 
            let cur_size = scope.next_unique_id_in_scope as u32;
 
            if cur_size > *max_size { *max_size = cur_size; }
 

	
 
            // And child blocks
 
            for child_scope in &scope.nested {
 
                determine_max_stack_size(heap, *child_scope, max_size);
 
            }
 
        }
 

	
 
        let mut max_stack_size = 0;
 
        determine_max_stack_size(heap, outer_scope_id, &mut max_stack_size);
 

	
 
        Frame{
 
            definition: definition_id,
 
            monomorph_type_id,
 
            monomorph_index: monomorph_index as usize,
 
            position: first_statement_id.upcast(),
 
            expr_stack: VecDeque::with_capacity(128),
 
            expr_values: VecDeque::with_capacity(128),
 
            max_stack_size,
 
        }
 
    }
 

	
 
    /// Prepares a single expression for execution. This involves walking the
 
    /// expression tree and putting them in the `expr_stack` such that
 
    /// continuously popping from its back will evaluate the expression. The
 
    /// results of each expression will be stored by pushing onto `expr_values`.
 
    pub fn prepare_single_expression(&mut self, heap: &Heap, expr_id: ExpressionId) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear(); // May not be empty if last expression result(s) were discarded
 

	
 
        self.serialize_expression(heap, expr_id);
 
    }
 

	
 
    /// Prepares multiple expressions for execution (i.e. evaluating all
 
    /// function arguments or all elements of an array/union literal). Per
 
    /// expression this works the same as `prepare_single_expression`. However
 
    /// after each expression is evaluated we insert a `PushValToFront`
 
    /// instruction
 
    pub fn prepare_multiple_expressions(&mut self, heap: &Heap, expr_ids: &[ExpressionId]) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear();
 

	
 
        for expr_id in expr_ids {
 
            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
            self.serialize_expression(heap, *expr_id);
 
        }
 
    }
 

	
 
    /// Performs depth-first serialization of expression tree. Let's not care
 
    /// about performance for a temporary runtime implementation
 
    fn serialize_expression(&mut self, heap: &Heap, id: ExpressionId) {
 
        self.expr_stack.push_back(ExprInstruction::EvalExpr(id));
 

	
 
        match &heap[id] {
 
            Expression::Assignment(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Binding(expr) => {
 
                self.serialize_expression(heap, expr.bound_to);
 
                self.serialize_expression(heap, expr.bound_from);
 
            },
 
            Expression::Conditional(expr) => {
 
@@ -177,104 +178,105 @@ impl Frame {
 
                    }
 
                }
 
            },
 
            Expression::Cast(expr) => {
 
                self.serialize_expression(heap, expr.subject);
 
            }
 
            Expression::Call(expr) => {
 
                for arg_expr_id in &expr.arguments {
 
                    self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                    self.serialize_expression(heap, *arg_expr_id);
 
                }
 
            },
 
            Expression::Variable(_expr) => {
 
                // No subexpressions
 
            }
 
        }
 
    }
 
}
 

	
 
pub type EvalResult = Result<EvalContinuation, EvalError>;
 

	
 
#[derive(Debug)]
 
pub enum EvalContinuation {
 
    // Returned in both sync and non-sync modes
 
    Stepping,
 
    // Returned only in sync mode
 
    BranchInconsistent,
 
    SyncBlockEnd,
 
    NewFork,
 
    BlockFires(PortId),
 
    BlockGet(PortId),
 
    Put(PortId, ValueGroup),
 
    // Returned only in non-sync mode
 
    ComponentTerminated,
 
    SyncBlockStart,
 
    NewComponent(ProcedureDefinitionId, TypeId, ValueGroup),
 
    NewChannel,
 
}
 

	
 
// Note: cloning is fine, methinks. cloning all values and the heap regions then
 
// we end up with valid "pointers" to heap regions.
 
#[derive(Debug, Clone)]
 
pub struct Prompt {
 
    pub(crate) frames: Vec<Frame>,
 
    pub(crate) store: Store,
 
}
 

	
 
impl Prompt {
 
    pub fn new(_types: &TypeTable, heap: &Heap, def: ProcedureDefinitionId, type_id: TypeId, args: ValueGroup) -> Self {
 
    pub fn new(types: &TypeTable, heap: &Heap, def: ProcedureDefinitionId, type_id: TypeId, args: ValueGroup) -> Self {
 
        let mut prompt = Self{
 
            frames: Vec::new(),
 
            store: Store::new(),
 
        };
 

	
 
        // Maybe do typechecking in the future?
 
        let new_frame = Frame::new(heap, def, type_id);
 
        let monomorph_index = types.get_monomorph(type_id).variant.as_procedure().monomorph_index;
 
        let new_frame = Frame::new(heap, def, type_id, monomorph_index);
 
        let max_stack_size = new_frame.max_stack_size;
 
        prompt.frames.push(new_frame);
 
        args.into_store(&mut prompt.store);
 
        prompt.store.reserve_stack(max_stack_size);
 

	
 
        prompt
 
    }
 

	
 
    /// Big 'ol function right here. Didn't want to break it up unnecessarily.
 
    /// It consists of, in sequence: executing any expressions that should be
 
    /// executed before the next statement can be evaluated, then a section that
 
    /// performs debug printing, and finally a section that takes the next
 
    /// statement and executes it. If the statement requires any expressions to
 
    /// be evaluated, then they will be added such that the next time `step` is
 
    /// called, all of these expressions are indeed evaluated.
 
    pub(crate) fn step(&mut self, types: &TypeTable, heap: &Heap, modules: &[Module], ctx: &mut impl RunContext) -> EvalResult {
 
        // Helper function to transfer multiple values from the expression value
 
        // array into a heap region (e.g. constructing arrays or structs).
 
        fn transfer_expression_values_front_into_heap(cur_frame: &mut Frame, store: &mut Store, num_values: usize) -> HeapPos {
 
            let heap_pos = store.alloc_heap();
 

	
 
            // Do the transformation first (because Rust...)
 
            for val_idx in 0..num_values {
 
                cur_frame.expr_values[val_idx] = store.read_take_ownership(cur_frame.expr_values[val_idx].clone());
 
            }
 

	
 
            // And now transfer to the heap region
 
            let values = &mut store.heap_regions[heap_pos as usize].values;
 
            debug_assert!(values.is_empty());
 
            values.reserve(num_values);
 
            for _ in 0..num_values {
 
                values.push(cur_frame.expr_values.pop_front().unwrap());
 
            }
 

	
 
            heap_pos
 
        }
 

	
 
        // Helper function to make sure that an index into an aray is valid.
 
        fn array_inclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx >= array_len as i64;
 
        }
 

	
 
        fn array_exclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx > array_len as i64;
 
        }
 

	
 
@@ -722,100 +724,100 @@ impl Prompt {
 
                                    let value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let value_heap_pos = value.as_string();
 
                                    let elements = &self.store.heap_regions[value_heap_pos as usize].values;
 

	
 
                                    let mut message = String::with_capacity(elements.len());
 
                                    for element in elements {
 
                                        message.push(element.as_char());
 
                                    }
 

	
 
                                    // Drop the heap-allocated value from the
 
                                    // store
 
                                    self.store.drop_heap_pos(value_heap_pos);
 
                                    println!("{}", message);
 
                                },
 
                                Method::SelectStart => {
 
                                    todo!("select start");
 
                                },
 
                                Method::SelectRegisterCasePort => {
 
                                    todo!("select register");
 
                                },
 
                                Method::SelectWait => {
 
                                    todo!("select wait");
 
                                },
 
                                Method::UserComponent => {
 
                                    // This is actually handled by the evaluation
 
                                    // of the statement.
 
                                    debug_assert_eq!(heap[expr.procedure].parameters.len(), cur_frame.expr_values.len());
 
                                    debug_assert_eq!(heap[cur_frame.position].as_new().expression, expr.this)
 
                                },
 
                                Method::UserFunction => {
 
                                    // Push a new frame. Note that all expressions have
 
                                    // been pushed to the front, so they're in the order
 
                                    // of the definition.
 
                                    let num_args = expr.arguments.len();
 

	
 
                                    // Determine stack boundaries
 
                                    let cur_stack_boundary = self.store.cur_stack_boundary;
 
                                    let new_stack_boundary = self.store.stack.len();
 

	
 
                                    // Push new boundary and function arguments for new frame
 
                                    self.store.stack.push(Value::PrevStackBoundary(cur_stack_boundary as isize));
 
                                    for _ in 0..num_args {
 
                                        let argument = self.store.read_take_ownership(cur_frame.expr_values.pop_front().unwrap());
 
                                        self.store.stack.push(argument);
 
                                    }
 

	
 
                                    // Determine the monomorph index of the function we're calling
 
                                    let mono_data = &heap[cur_frame.definition].monomorphs[cur_frame.monomorph_index];
 
                                    let type_id = mono_data.expr_info[expr.type_index as usize].variant.as_procedure().0;
 
                                    let (type_id, monomorph_index) = mono_data.expr_info[expr.type_index as usize].variant.as_procedure();
 

	
 
                                    // Push the new frame and reserve its stack size
 
                                    let new_frame = Frame::new(heap, expr.procedure, type_id);
 
                                    let new_frame = Frame::new(heap, expr.procedure, type_id, monomorph_index);
 
                                    let new_stack_size = new_frame.max_stack_size;
 
                                    self.frames.push(new_frame);
 
                                    self.store.cur_stack_boundary = new_stack_boundary;
 
                                    self.store.reserve_stack(new_stack_size);
 

	
 
                                    // To simplify the logic a little bit we will now
 
                                    // return and ask our caller to call us again
 
                                    return Ok(EvalContinuation::Stepping);
 
                                }
 
                            }
 
                        },
 
                        Expression::Variable(expr) => {
 
                            let variable = &heap[expr.declaration.unwrap()];
 
                            let ref_value = if expr.used_as_binding_target {
 
                                Value::Binding(variable.unique_id_in_scope as StackPos)
 
                            } else {
 
                                Value::Ref(ValueId::Stack(variable.unique_id_in_scope as StackPos))
 
                            };
 
                            cur_frame.expr_values.push_back(ref_value);
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        debug_log!("Frame [{:?}] at {:?}", cur_frame.definition, cur_frame.position);
 
        if debug_enabled!() {
 
            debug_log!("Expression value stack (size = {}):", cur_frame.expr_values.len());
 
            for (_stack_idx, _stack_val) in cur_frame.expr_values.iter().enumerate() {
 
                debug_log!("  [{:03}] {:?}", _stack_idx, _stack_val);
 
            }
 

	
 
            debug_log!("Stack (size = {}):", self.store.stack.len());
 
            for (_stack_idx, _stack_val) in self.store.stack.iter().enumerate() {
 
                debug_log!("  [{:03}] {:?}", _stack_idx, _stack_val);
 
            }
 

	
 
            debug_log!("Heap:");
 
            for (_heap_idx, _heap_region) in self.store.heap_regions.iter().enumerate() {
 
                let _is_free = self.store.free_regions.iter().any(|idx| *idx as usize == _heap_idx);
 
                debug_log!("  [{:03}] in_use: {}, len: {}, vals: {:?}", _heap_idx, !_is_free, _heap_region.values.len(), &_heap_region.values);
 
            }
 
        }
 
        // No (more) expressions to evaluate. So evaluate statement (that may
 
        // depend on the result on the last evaluated expression(s))
 
        let stmt = &heap[cur_frame.position];
 
        let return_value = match stmt {
 
            Statement::Block(stmt) => {
 
@@ -984,97 +986,97 @@ impl Prompt {
 
                // we may be returning a reference to something on our stack,
 
                // so we need to read that value and clone it.
 
                let return_value = cur_frame.expr_values.pop_back().unwrap();
 
                let return_value = match return_value {
 
                    Value::Ref(value_id) => self.store.read_copy(value_id),
 
                    _ => return_value,
 
                };
 

	
 
                // Pre-emptively pop our stack frame
 
                self.frames.pop();
 

	
 
                // Clean up our section of the stack
 
                self.store.clear_stack(0);
 
                self.store.stack.truncate(self.store.cur_stack_boundary + 1);
 
                let prev_stack_idx = self.store.stack.pop().unwrap().as_stack_boundary();
 

	
 
                // TODO: Temporary hack for testing, remove at some point
 
                if self.frames.is_empty() {
 
                    debug_assert!(prev_stack_idx == -1);
 
                    debug_assert!(self.store.stack.len() == 0);
 
                    self.store.stack.push(return_value);
 
                    return Ok(EvalContinuation::ComponentTerminated);
 
                }
 

	
 
                debug_assert!(prev_stack_idx >= 0);
 
                // Return to original state of stack frame
 
                self.store.cur_stack_boundary = prev_stack_idx as usize;
 
                let cur_frame = self.frames.last_mut().unwrap();
 
                cur_frame.expr_values.push_back(return_value);
 

	
 
                // We just returned to the previous frame, which might be in
 
                // the middle of evaluating expressions for a particular
 
                // statement. So we don't want to enter the code below.
 
                return Ok(EvalContinuation::Stepping);
 
            },
 
            Statement::Goto(stmt) => {
 
                cur_frame.position = stmt.target.upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::New(stmt) => {
 
                let call_expr = &heap[stmt.expression];
 
                debug_assert_eq!(
 
                    cur_frame.expr_values.len(), heap[call_expr.procedure].parameters.len(),
 
                    "mismatch in expr stack size and number of arguments for new statement"
 
                );
 

	
 
                let mono_data = &heap[cur_frame.definition].monomorphs[cur_frame.monomorph_index];
 
                let type_id = mono_data.expr_info[expr.type_index as usize].variant.as_procedure().0;
 
                let type_id = mono_data.expr_info[call_expr.type_index as usize].variant.as_procedure().0;
 

	
 
                // Note that due to expression value evaluation they exist in
 
                // reverse order on the stack.
 
                // TODO: Revise this code, keep it as is to be compatible with current runtime
 
                let mut args = Vec::new();
 
                while let Some(value) = cur_frame.expr_values.pop_front() {
 
                    args.push(value);
 
                }
 

	
 
                // Construct argument group, thereby copying heap regions
 
                let argument_group = ValueGroup::from_store(&self.store, &args);
 

	
 
                // Clear any heap regions
 
                for arg in &args {
 
                    self.store.drop_value(arg.get_heap_pos());
 
                }
 

	
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::NewComponent(call_expr.procedure, type_id, argument_group))
 
            },
 
            Statement::Expression(stmt) => {
 
                // The expression has just been completely evaluated. Some
 
                // values might have remained on the expression value stack.
 
                // cur_frame.expr_values.clear(); PROPER CLEARING
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
        };
 

	
 
        assert!(
 
            cur_frame.expr_values.is_empty(),
 
            "This is a debugging assertion that will fail if you perform expressions without \
 
            assigning to anything. This should be completely valid, and this assertion should be \
 
            replaced by something that clears the expression values if needed, but I'll keep this \
 
            in for now for debugging purposes."
 
        );
 

	
 
        // If the next statement requires evaluating expressions then we push
 
        // these onto the expression stack. This way we will evaluate this
 
        // stack in the next loop, then evaluate the statement using the result
 
        // from the expression evaluation.
 
        if !cur_frame.position.is_invalid() {
 
            let stmt = &heap[cur_frame.position];
 

	
 
            match stmt {
 
                Statement::Local(stmt) => {
src/protocol/mod.rs
Show inline comments
 
@@ -63,114 +63,116 @@ impl ProtocolDescription {
 
        let modules: Vec<Module> = parser.modules.into_iter()
 
            .map(|module| Module{
 
                source: module.source,
 
                root_id: module.root_id,
 
                name: module.name.map(|(_, name)| name)
 
            })
 
            .collect();
 

	
 
        return Ok(ProtocolDescription {
 
            modules,
 
            heap: parser.heap,
 
            types: parser.type_table,
 
            pool: Mutex::new(parser.string_pool),
 
        });
 
    }
 

	
 
    pub(crate) fn new_component(
 
        &self, module_name: &[u8], identifier: &[u8], arguments: ValueGroup
 
    ) -> Result<Prompt, ComponentCreationError> {
 
        // Find the module in which the definition can be found
 
        let module_root = self.lookup_module_root(module_name);
 
        if module_root.is_none() {
 
            return Err(ComponentCreationError::ModuleDoesntExist);
 
        }
 
        let module_root = module_root.unwrap();
 

	
 
        let root = &self.heap[module_root];
 
        let definition_id = root.get_definition_ident(&self.heap, identifier);
 
        if definition_id.is_none() {
 
            return Err(ComponentCreationError::DefinitionDoesntExist);
 
        }
 
        let definition_id = definition_id.unwrap();
 

	
 
        let ast_definition = &self.heap[definition_id];
 
        if !ast_definition.is_procedure() {
 
            return Err(ComponentCreationError::DefinitionNotComponent);
 
        }
 

	
 
        // Make sure that the types of the provided value group matches that of
 
        // the expected types.
 
        let ast_definition = ast_definition.as_procedure();
 
        if !ast_definition.poly_vars.is_empty() || ast_definition.kind == ProcedureKind::Function {
 
            return Err(ComponentCreationError::DefinitionNotComponent);
 
        }
 

	
 
        // - check number of arguments by retrieving the one instantiated
 
        //   monomorph
 
        let concrete_type = ConcreteType{ parts: vec![ConcreteTypePart::Component(ast_definition.this, 0)] };
 
        let mono_index = self.types.get_procedure_monomorph_type_id(&definition_id, &concrete_type.parts).unwrap();
 
        let mono_type = self.types.get_procedure_monomorph(mono_index);
 
        if mono_type.arg_types.len() != arguments.values.len() {
 
        let procedure_type_id = self.types.get_procedure_monomorph_type_id(&definition_id, &concrete_type.parts).unwrap();
 
        let procedure_monomorph_index = self.types.get_monomorph(procedure_type_id).variant.as_procedure().monomorph_index;
 
        let monomorph_info = &ast_definition.monomorphs[procedure_monomorph_index as usize];
 
        if monomorph_info.argument_types.len() != arguments.values.len() {
 
            return Err(ComponentCreationError::InvalidNumArguments);
 
        }
 

	
 
        // - for each argument try to make sure the types match
 
        for arg_idx in 0..arguments.values.len() {
 
            let expected_type = &mono_type.arg_types[arg_idx];
 
            let expected_type_id = monomorph_info.argument_types[arg_idx];
 
            let expected_type = &self.types.get_monomorph(expected_type_id).concrete_type;
 
            let provided_value = &arguments.values[arg_idx];
 
            if !self.verify_same_type(expected_type, 0, &arguments, provided_value) {
 
                return Err(ComponentCreationError::InvalidArgumentType(arg_idx));
 
            }
 
        }
 

	
 
        // By now we're sure that all of the arguments are correct. So create
 
        // the connector.
 
        return Ok(Prompt::new(&self.types, &self.heap, ast_definition.this, mono_index, arguments));
 
        return Ok(Prompt::new(&self.types, &self.heap, ast_definition.this, procedure_type_id, arguments));
 
    }
 

	
 
    fn lookup_module_root(&self, module_name: &[u8]) -> Option<RootId> {
 
        for module in self.modules.iter() {
 
            match &module.name {
 
                Some(name) => if name.as_bytes() == module_name {
 
                    return Some(module.root_id);
 
                },
 
                None => if module_name.is_empty() {
 
                    return Some(module.root_id);
 
                }
 
            }
 
        }
 

	
 
        return None;
 
    }
 

	
 
    fn verify_same_type(&self, expected: &ConcreteType, expected_idx: usize, arguments: &ValueGroup, argument: &Value) -> bool {
 
        use ConcreteTypePart as CTP;
 

	
 
        match &expected.parts[expected_idx] {
 
            CTP::Void | CTP::Message | CTP::Slice | CTP::Pointer | CTP::Function(_, _) | CTP::Component(_, _) => unreachable!(),
 
            CTP::Bool => if let Value::Bool(_) = argument { true } else { false },
 
            CTP::UInt8 => if let Value::UInt8(_) = argument { true } else { false },
 
            CTP::UInt16 => if let Value::UInt16(_) = argument { true } else { false },
 
            CTP::UInt32 => if let Value::UInt32(_) = argument { true } else { false },
 
            CTP::UInt64 => if let Value::UInt64(_) = argument { true } else { false },
 
            CTP::SInt8 => if let Value::SInt8(_) = argument { true } else { false },
 
            CTP::SInt16 => if let Value::SInt16(_) = argument { true } else { false },
 
            CTP::SInt32 => if let Value::SInt32(_) = argument { true } else { false },
 
            CTP::SInt64 => if let Value::SInt64(_) = argument { true } else { false },
 
            CTP::Character => if let Value::Char(_) = argument { true } else { false },
 
            CTP::String => {
 
                // Match outer string type and embedded character types
 
                if let Value::String(heap_pos) = argument {
 
                    for element in &arguments.regions[*heap_pos as usize] {
 
                        if let Value::Char(_) = element {} else {
 
                            return false;
 
                        }
 
                    }
 
                } else {
 
                    return false;
 
                }
 

	
 
                return true;
 
            },
 
            CTP::Array => {
 
                if let Value::Array(heap_pos) = argument {
src/protocol/parser/mod.rs
Show inline comments
 
@@ -253,163 +253,179 @@ impl Parser {
 
    pub fn parse(&mut self) -> Result<(), ParseError> {
 
        let mut pass_ctx = PassCtx{
 
            heap: &mut self.heap,
 
            symbols: &mut self.symbol_table,
 
            pool: &mut self.string_pool,
 
            arch: &self.arch,
 
        };
 

	
 
        // Advance all modules to the phase where all symbols are scanned
 
        for module_idx in 0..self.modules.len() {
 
            self.pass_symbols.parse(&mut self.modules, module_idx, &mut pass_ctx)?;
 
        }
 

	
 
        // With all symbols scanned, perform further compilation until we can
 
        // add all base types to the type table.
 
        for module_idx in 0..self.modules.len() {
 
            self.pass_import.parse(&mut self.modules, module_idx, &mut pass_ctx)?;
 
            self.pass_definitions.parse(&mut self.modules, module_idx, &mut pass_ctx)?;
 
        }
 

	
 
        // Add every known type to the type table
 
        self.type_table.build_base_types(&mut self.modules, &mut pass_ctx)?;
 

	
 
        // Continue compilation with the remaining phases now that the types
 
        // are all in the type table
 
        for module_idx in 0..self.modules.len() {
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                modules: &mut self.modules,
 
                module_idx,
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
                arch: &self.arch,
 
            };
 
            self.pass_validation.visit_module(&mut ctx)?;
 
        }
 

	
 
        // Perform typechecking on all modules
 
        let mut queue = ResolveQueue::new();
 
        for module_idx in 0..self.modules.len() {
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                modules: &mut self.modules,
 
                module_idx,
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
                arch: &self.arch,
 
            };
 
            PassTyping::queue_module_definitions(&mut ctx, &mut queue);
 
            self.pass_typing.queue_module_definitions(&mut ctx, &mut queue);
 
        };
 
        while !queue.is_empty() {
 
            let top = queue.pop().unwrap();
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                modules: &mut self.modules,
 
                module_idx: top.root_id.index as usize,
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
                arch: &self.arch,
 
            };
 
            self.pass_typing.handle_module_definition(&mut ctx, &mut queue, top)?;
 
        }
 

	
 
        // Rewrite nodes in tree, then prepare for execution of code
 
        for module_idx in 0..self.modules.len() {
 
            self.modules[module_idx].phase = ModuleCompilationPhase::Typed;
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                modules: &mut self.modules,
 
                module_idx,
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
                arch: &self.arch,
 
            };
 
            self.pass_rewriting.visit_module(&mut ctx)?;
 
            self.pass_stack_size.visit_module(&mut ctx)?;
 
        }
 

	
 
        // Write out desired information
 
        if let Some(filename) = &self.write_ast_to {
 
            let mut writer = ASTWriter::new();
 
            let mut file = std::fs::File::create(std::path::Path::new(filename)).unwrap();
 
            writer.write_ast(&mut file, &self.heap);
 
        }
 

	
 
        Ok(())
 
    }
 
}
 

	
 
fn insert_builtin_type(type_table: &mut TypeTable, parts: Vec<ConcreteTypePart>, has_poly_var: bool, size: usize, alignment: usize) -> TypeId {
 
    const POLY_VARS: [PolymorphicVariable; 1] = [PolymorphicVariable{
 
        identifier: Identifier::new_empty(InputSpan::new()),
 
        is_in_use: false,
 
    }];
 

	
 
    let concrete_type = ConcreteType{ parts };
 
    let poly_var = if has_poly_var {
 
        POLY_VARS.as_slice()
 
    } else {
 
        &[]
 
    };
 

	
 
    return type_table.add_builtin_type(concrete_type, poly_var, size, alignment);
 
    return type_table.add_builtin_data_type(concrete_type, poly_var, size, alignment);
 
}
 

	
 
// Note: args and return type need to be a function because we need to know the function ID.
 
fn insert_builtin_function<T: Fn(ProcedureDefinitionId) -> (Vec<(&'static str, ParserType)>, ParserType)> (
 
    p: &mut Parser, func_name: &str, polymorphic: &[&str], arg_and_return_fn: T
 
) {
 
    let mut poly_vars = Vec::with_capacity(polymorphic.len());
 
    // Insert into AST (to get an ID), also prepare the polymorphic variables
 
    // we need later for the type table
 
    let mut ast_poly_vars = Vec::with_capacity(polymorphic.len());
 
    let mut type_poly_vars = Vec::with_capacity(polymorphic.len());
 
    for poly_var in polymorphic {
 
        poly_vars.push(Identifier{ span: InputSpan::new(), value: p.string_pool.intern(poly_var.as_bytes()) });
 
        let identifier = Identifier{ span: InputSpan::new(), value: p.string_pool.intern(poly_var.as_bytes()) } ;
 
        ast_poly_vars.push(identifier.clone());
 
        type_poly_vars.push(PolymorphicVariable{ identifier, is_in_use: false });
 
    }
 

	
 
    let func_ident_ref = p.string_pool.intern(func_name.as_bytes());
 
    let procedure_id = p.heap.alloc_procedure_definition(|this| ProcedureDefinition {
 
        this,
 
        defined_in: RootId::new_invalid(),
 
        builtin: true,
 
        kind: ProcedureKind::Function,
 
        span: InputSpan::new(),
 
        identifier: Identifier{ span: InputSpan::new(), value: func_ident_ref.clone() },
 
        poly_vars,
 
        poly_vars: ast_poly_vars,
 
        return_type: None,
 
        parameters: Vec::new(),
 
        scope: ScopeId::new_invalid(),
 
        body: BlockStatementId::new_invalid(),
 
        num_expressions_in_body: -1,
 
        monomorphs: Vec::new(),
 
    });
 

	
 
    // Modify AST with more information about the procedure
 
    let (arguments, return_type) = arg_and_return_fn(procedure_id);
 

	
 
    let mut parameters = Vec::with_capacity(arguments.len());
 
    for (arg_name, arg_type) in arguments {
 
        let identifier = Identifier{ span: InputSpan::new(), value: p.string_pool.intern(arg_name.as_bytes()) };
 
        let param_id = p.heap.alloc_variable(|this| Variable{
 
            this,
 
            kind: VariableKind::Parameter,
 
            parser_type: arg_type.clone(),
 
            identifier,
 
            relative_pos_in_parent: 0,
 
            unique_id_in_scope: 0
 
        });
 
        parameters.push(param_id);
 
    }
 

	
 
    let func = &mut p.heap[procedure_id];
 
    func.parameters = parameters;
 
    func.return_type = Some(return_type);
 

	
 
    // Insert into symbol table
 
    p.symbol_table.insert_symbol(SymbolScope::Global, Symbol{
 
        name: func_ident_ref,
 
        variant: SymbolVariant::Definition(SymbolDefinition{
 
            defined_in_module: RootId::new_invalid(),
 
            defined_in_scope: SymbolScope::Global,
 
            definition_span: InputSpan::new(),
 
            identifier_span: InputSpan::new(),
 
            imported_at: None,
 
            class: DefinitionClass::Function,
 
            definition_id: procedure_id.upcast(),
 
        })
 
    }).unwrap();
 

	
 
    // Insert into type table
 
    let mut concrete_type = ConcreteType::default();
 
    concrete_type.parts.push(ConcreteTypePart::Function(procedure_id, type_poly_vars.len() as u32));
 

	
 
    for _ in 0..type_poly_vars.len() {
 
        concrete_type.parts.push(ConcreteTypePart::Void); // doesn't matter (I hope...)
 
    }
 
    p.type_table.add_builtin_procedure_type(concrete_type, &type_poly_vars);
 
}
 
\ No newline at end of file
src/protocol/parser/pass_rewriting.rs
Show inline comments
 
@@ -249,125 +249,96 @@ impl Visitor for PassRewriting {
 
        {
 
            let runtime_call_expr_id = create_ast_call_expr(ctx, Method::SelectWait, &mut self.expression_buffer, Vec::new());
 
            let variable_stmt_id = create_ast_variable_declaration_stmt(ctx, select_variable_id, runtime_call_expr_id.upcast());
 
            transformed_stmts.push(variable_stmt_id.upcast().upcast());
 
        }
 

	
 
        call_id_section.forget();
 
        expr_id_section.forget();
 

	
 
        // Now we transform each of the select block case's guard and code into
 
        // a chained if-else statement.
 
        if total_num_cases > 0 {
 
            let (if_stmt_id, end_if_stmt_id) = transform_select_case_code(ctx, id, 0, select_variable_id);
 
            let first_end_if_stmt = &mut ctx.heap[end_if_stmt_id];
 
            first_end_if_stmt.next = outer_end_block_id.upcast();
 

	
 
            let mut last_if_stmt_id = if_stmt_id;
 
            let mut last_end_if_stmt_id = end_if_stmt_id;
 
            transformed_stmts.push(last_if_stmt_id.upcast());
 

	
 
            for case_index in 1..total_num_cases {
 
                let (if_stmt_id, end_if_stmt_id) = transform_select_case_code(ctx, id, case_index, select_variable_id);
 
                let false_case_scope_id = ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::If(last_if_stmt_id, false)));
 
                set_ast_if_statement_false_body(ctx, last_if_stmt_id, last_end_if_stmt_id, IfStatementCase{ body: if_stmt_id.upcast(), scope: false_case_scope_id });
 

	
 
                let end_if_stmt = &mut ctx.heap[end_if_stmt_id];
 
                end_if_stmt.next = last_end_if_stmt_id.upcast();
 

	
 
                last_if_stmt_id = if_stmt_id;
 
                last_end_if_stmt_id = end_if_stmt_id;
 
            }
 
        }
 

	
 
        // Final steps: set the statements of the replacement block statement,
 
        // and link all of those statements together
 
        let mut last_stmt_id = transformed_stmts[0];
 
        for stmt_id in transformed_stmts.iter().skip(1).copied() {
 
            set_ast_statement_next(ctx, last_stmt_id, stmt_id);
 
            last_stmt_id = stmt_id;
 
        }
 

	
 
        let outer_block_stmt = &mut ctx.heap[outer_block_id];
 
        outer_block_stmt.statements = transformed_stmts;
 

	
 
        return Ok(())
 
    }
 
}
 

	
 
impl PassRewriting {
 
    fn create_runtime_call_statement(&self, ctx: &mut Ctx, method: Method, arguments: Vec<ExpressionId>) -> (CallExpressionId, ExpressionStatementId) {
 
        let call_expr_id = ctx.heap.alloc_call_expression(|this| CallExpression{
 
            this,
 
            func_span: InputSpan::new(),
 
            full_span: InputSpan::new(),
 
            parser_type: ParserType{
 
                elements: Vec::new(),
 
                full_span: InputSpan::new(),
 
            },
 
            method,
 
            arguments,
 
            procedure: ProcedureDefinitionId::new_invalid(),
 
            parent: ExpressionParent::None,
 
        });
 
        let call_stmt_id = ctx.heap.alloc_expression_statement(|this| ExpressionStatement{
 
            this,
 
            span: InputSpan::new(),
 
            expression: call_expr_id.upcast(),
 
            next: StatementId::new_invalid(),
 
        });
 

	
 
        let call_expr = &mut ctx.heap[call_expr_id];
 
        call_expr.parent = ExpressionParent::ExpressionStmt(call_stmt_id);
 

	
 
        return (call_expr_id, call_stmt_id);
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Utilities to create compiler-generated AST nodes
 
// -----------------------------------------------------------------------------
 

	
 
fn create_ast_variable(ctx: &mut Ctx, scope_id: ScopeId) -> VariableId {
 
    let variable_id = ctx.heap.alloc_variable(|this| Variable{
 
        this,
 
        kind: VariableKind::Local,
 
        parser_type: ParserType{
 
            elements: Vec::new(),
 
            full_span: InputSpan::new(),
 
        },
 
        identifier: Identifier::new_empty(InputSpan::new()),
 
        relative_pos_in_parent: -1,
 
        unique_id_in_scope: -1,
 
    });
 
    let scope = &mut ctx.heap[scope_id];
 
    scope.variables.push(variable_id);
 

	
 
    return variable_id;
 
}
 

	
 
fn create_ast_variable_expr(ctx: &mut Ctx, variable_id: VariableId) -> VariableExpressionId {
 
    return ctx.heap.alloc_variable_expression(|this| VariableExpression{
 
        this,
 
        identifier: Identifier::new_empty(InputSpan::new()),
 
        declaration: Some(variable_id),
 
        used_as_binding_target: false,
 
        parent: ExpressionParent::None,
 
        type_index: -1
 
    });
 
}
 

	
 
fn create_ast_call_expr(ctx: &mut Ctx, method: Method, buffer: &mut ScopedBuffer<ExpressionId>, arguments: Vec<ExpressionId>) -> CallExpressionId {
 
    let expression_ids = buffer.start_section_initialized(&arguments);
 
    let call_expression_id = ctx.heap.alloc_call_expression(|this| CallExpression{
 
        this,
 
        func_span: InputSpan::new(),
 
        full_span: InputSpan::new(),
 
        parser_type: ParserType{
 
            elements: Vec::new(),
 
            full_span: InputSpan::new(),
 
        },
 
        method,
 
        arguments,
 
        procedure: ProcedureDefinitionId::new_invalid(),
 
        parent: ExpressionParent::None,
 
        type_index: -1,
src/protocol/parser/pass_typing.rs
Show inline comments
 
@@ -981,219 +981,225 @@ struct InferenceRuleSelectTupleMember {
 

	
 
struct InferenceRuleLiteralStruct {
 
    element_indices: Vec<InferNodeIndex>,
 
}
 

	
 
struct InferenceRuleLiteralUnion {
 
    element_indices: Vec<InferNodeIndex>
 
}
 

	
 
struct InferenceRuleLiteralArray {
 
    element_indices: Vec<InferNodeIndex>
 
}
 

	
 
struct InferenceRuleLiteralTuple {
 
    element_indices: Vec<InferNodeIndex>
 
}
 

	
 
struct InferenceRuleCastExpr {
 
    subject_index: InferNodeIndex,
 
}
 

	
 
struct InferenceRuleCallExpr {
 
    argument_indices: Vec<InferNodeIndex>
 
}
 

	
 
/// Data associated with a variable expression: an expression that reads the
 
/// value from a variable.
 
struct InferenceRuleVariableExpr {
 
    var_data_index: VarDataIndex, // shared variable information
 
}
 

	
 
/// This particular visitor will recurse depth-first into the AST and ensures
 
/// that all expressions have the appropriate types.
 
pub(crate) struct PassTyping {
 
    // Current definition we're typechecking.
 
    reserved_type_id: TypeId,
 
    reserved_monomorph_index: u32,
 
    procedure_id: ProcedureDefinitionId,
 
    procedure_kind: ProcedureKind,
 
    poly_vars: Vec<ConcreteType>,
 
    // Temporary variables during construction of inference rulesr
 
    parent_index: Option<InferNodeIndex>,
 
    // Buffers for iteration over various types
 
    var_buffer: ScopedBuffer<VariableId>,
 
    expr_buffer: ScopedBuffer<ExpressionId>,
 
    stmt_buffer: ScopedBuffer<StatementId>,
 
    bool_buffer: ScopedBuffer<bool>,
 
    index_buffer: ScopedBuffer<usize>,
 
    definition_buffer: ScopedBuffer<DefinitionId>,
 
    poly_progress_buffer: ScopedBuffer<u32>,
 
    // Mapping from parser type to inferred type. We attempt to continue to
 
    // specify these types until we're stuck or we've fully determined the type.
 
    infer_nodes: Vec<InferenceNode>,                     // will be transferred to type table at end
 
    poly_data: Vec<PolyData>,       // data for polymorph inference
 
    var_data: Vec<VarData>,
 
    // Keeping track of which expressions need to be reinferred because the
 
    // expressions they're linked to made progression on an associated type
 
    node_queued: DequeSet<InferNodeIndex>,
 
}
 

	
 
/// Generic struct that is used to store inferred types associated with
 
/// polymorphic types.
 
struct PolyData {
 
    first_rule_application: bool,
 
    definition_id: DefinitionId, // the definition, only used for user feedback
 
    /// Inferred types of the polymorphic variables as they are written down
 
    /// at the type's definition.
 
    poly_vars: Vec<InferenceType>,
 
    expr_types: PolyDataTypes,
 
}
 

	
 
// silly structure, just so we can use `PolyDataTypeIndex` ergonomically while
 
// making sure we're still capable of borrowing from `poly_vars`.
 
struct PolyDataTypes {
 
    /// Inferred types of associated types (e.g. struct fields, tuple members,
 
    /// function arguments). These types may depend on the polymorphic variables
 
    /// defined above.
 
    associated: Vec<InferenceType>,
 
    /// Inferred "returned" type (e.g. if a struct field is selected, then this
 
    /// contains the type of the selected field, for a function call it contains
 
    /// the return type). May depend on the polymorphic variables defined above.
 
    returned: InferenceType,
 
}
 

	
 
#[derive(Clone, Copy)]
 
enum PolyDataTypeIndex {
 
    Associated(usize), // indexes into `PolyData.associated`
 
    Returned,
 
}
 

	
 
impl PolyDataTypes {
 
    fn get_type(&self, index: PolyDataTypeIndex) -> &InferenceType {
 
        match index {
 
            PolyDataTypeIndex::Associated(index) => return &self.associated[index],
 
            PolyDataTypeIndex::Returned => return &self.returned,
 
        }
 
    }
 

	
 
    fn get_type_mut(&mut self, index: PolyDataTypeIndex) -> &mut InferenceType {
 
        match index {
 
            PolyDataTypeIndex::Associated(index) => return &mut self.associated[index],
 
            PolyDataTypeIndex::Returned => return &mut self.returned,
 
        }
 
    }
 
}
 

	
 
struct VarData {
 
    var_id: VariableId,
 
    var_type: InferenceType,
 
    used_at: Vec<InferNodeIndex>, // of variable expressions
 
    linked_var: Option<VarDataIndex>,
 
}
 

	
 
impl PassTyping {
 
    pub(crate) fn new() -> Self {
 
        PassTyping {
 
            reserved_type_id: TypeId::new_invalid(),
 
            reserved_monomorph_index: u32::MAX,
 
            procedure_id: ProcedureDefinitionId::new_invalid(),
 
            procedure_kind: ProcedureKind::Function,
 
            poly_vars: Vec::new(),
 
            parent_index: None,
 
            var_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            expr_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            stmt_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            bool_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            index_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            definition_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            poly_progress_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            infer_nodes: Vec::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            poly_data: Vec::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            var_data: Vec::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            node_queued: DequeSet::new(),
 
        }
 
    }
 

	
 
    pub(crate) fn queue_module_definitions(ctx: &mut Ctx, queue: &mut ResolveQueue) {
 
    pub(crate) fn queue_module_definitions(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) {
 
        debug_assert_eq!(ctx.module().phase, ModuleCompilationPhase::ValidatedAndLinked);
 
        let root_id = ctx.module().root_id;
 
        let root = &ctx.heap.protocol_descriptions[root_id];
 
        for definition_id in &root.definitions {
 
            let definition = &ctx.heap[*definition_id];
 
        let definitions_section = self.definition_buffer.start_section_initialized(&root.definitions);
 

	
 
        for definition_id in definitions_section.iter_copied() {
 
            let definition = &ctx.heap[definition_id];
 

	
 
            let first_concrete_part_and_procedure_id = match definition {
 
                Definition::Procedure(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        if definition.kind == ProcedureKind::Function {
 
                            Some((ConcreteTypePart::Function(definition.this, 0), definition.this))
 
                        } else {
 
                            Some((ConcreteTypePart::Component(definition.this, 0), definition.this))
 
                        }
 
                    } else {
 
                        None
 
                    }
 
                }
 
                Definition::Enum(_) | Definition::Struct(_) | Definition::Union(_) => None,
 
            };
 

	
 
            if let Some((first_concrete_part, procedure_id)) = first_concrete_part_and_procedure_id {
 
                let procedure = &mut ctx.heap[procedure_id];
 
                let monomorph_index = procedure.monomorphs.len() as u32;
 
                procedure.monomorphs.push(ProcedureDefinitionMonomorph::new_invalid());
 

	
 
                let concrete_type = ConcreteType{ parts: vec![first_concrete_part] };
 
                let type_id = ctx.types.reserve_procedure_monomorph_type_id(definition_id, concrete_type, monomorph_index);
 
                let type_id = ctx.types.reserve_procedure_monomorph_type_id(&definition_id, concrete_type, monomorph_index);
 
                queue.push(ResolveQueueElement{
 
                    root_id,
 
                    definition_id: *definition_id,
 
                    definition_id,
 
                    reserved_type_id: type_id,
 
                    reserved_monomorph_index: monomorph_index,
 
                })
 
            }
 
        }
 

	
 
        definitions_section.forget();
 
    }
 

	
 
    pub(crate) fn handle_module_definition(
 
        &mut self, ctx: &mut Ctx, queue: &mut ResolveQueue, element: ResolveQueueElement
 
    ) -> VisitorResult {
 
        self.reset();
 
        debug_assert_eq!(ctx.module().root_id, element.root_id);
 
        debug_assert!(self.poly_vars.is_empty());
 

	
 
        // Prepare for visiting the definition
 
        self.reserved_type_id = element.reserved_type_id;
 
        self.reserved_monomorph_index = element.reserved_monomorph_index;
 

	
 
        let proc_base = ctx.types.get_base_definition(&element.definition_id).unwrap();
 
        if proc_base.is_polymorph {
 
            let monomorph = ctx.types.get_monomorph(element.reserved_type_id);
 
            for poly_arg in monomorph.concrete_type.embedded_iter(0) {
 
                self.poly_vars.push(ConcreteType{ parts: Vec::from(poly_arg) });
 
            }
 
        }
 

	
 
        // Visit the definition, setting up the type resolving process, then
 
        // (attempt to) resolve all types
 
        self.visit_definition(ctx, element.definition_id)?;
 
        self.resolve_types(ctx, queue)?;
 
        Ok(())
 
    }
 

	
 
    fn reset(&mut self) {
 
        self.reserved_type_id = TypeId::new_invalid();
 
        self.procedure_id = ProcedureDefinitionId::new_invalid();
 
        self.procedure_kind = ProcedureKind::Function;
 
        self.poly_vars.clear();
 
        self.parent_index = None;
 

	
 
        self.infer_nodes.clear();
 
        self.poly_data.clear();
 
        self.var_data.clear();
 
        self.node_queued.clear();
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// PassTyping - Visitor-like implementation
 
// -----------------------------------------------------------------------------
 

	
 
type VisitorResult = Result<(), ParseError>;
 
type VisitExprResult = Result<InferNodeIndex, ParseError>;
 
@@ -1969,182 +1975,186 @@ impl PassTyping {
 
            ctx: &Ctx, expr_id: ExpressionId, inference_poly_args: &Vec<InferenceType>,
 
            first_concrete_part: ConcreteTypePart,
 
        ) -> Result<ConcreteType, ParseError> {
 
            // Prepare storage vector
 
            let mut num_inference_parts = 0;
 
            for inference_type in inference_poly_args {
 
                num_inference_parts += inference_type.parts.len();
 
            }
 

	
 
            let mut concrete_type = ConcreteType{
 
                parts: Vec::with_capacity(1 + num_inference_parts),
 
            };
 
            concrete_type.parts.push(first_concrete_part);
 

	
 
            // Go through all polymorphic arguments and add them to the concrete
 
            // types.
 
            for (poly_idx, poly_type) in inference_poly_args.iter().enumerate() {
 
                if !poly_type.is_done {
 
                    let expr = &ctx.heap[expr_id];
 
                    let definition = match expr {
 
                        Expression::Call(expr) => expr.procedure.upcast(),
 
                        Expression::Literal(expr) => match &expr.value {
 
                            Literal::Enum(lit) => lit.definition,
 
                            Literal::Union(lit) => lit.definition,
 
                            Literal::Struct(lit) => lit.definition,
 
                            _ => unreachable!()
 
                        },
 
                        _ => unreachable!(),
 
                    };
 
                    let poly_vars = ctx.heap[definition].poly_vars();
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module().source, expr.operation_span(), format!(
 
                            "could not fully infer the type of polymorphic variable '{}' of this expression (got '{}')",
 
                            poly_vars[poly_idx].value.as_str(), poly_type.display_name(&ctx.heap)
 
                        )
 
                    ));
 
                }
 

	
 
                poly_type.write_concrete_type(&mut concrete_type);
 
            }
 

	
 
            Ok(concrete_type)
 
        }
 

	
 
        // Every expression checked, and new monomorphs are queued. Transfer the
 
        // expression information to the AST. If this is the first time we're
 
        // visiting this procedure then we assign expression indices as well.
 
        let procedure = &ctx.heap[self.procedure_id];
 
        let num_infer_nodes = self.infer_nodes().len();
 
        let num_infer_nodes = self.infer_nodes.len();
 
        let mut monomorph = ProcedureDefinitionMonomorph{
 
            argument_types: Vec::with_capacity(procedure.parameters.len()),
 
            expr_info: Vec::with_capacity(num_infer_nodes),
 
        };
 

	
 
        // For all of the expressions look up the TypeId (or create a new one).
 
        // For function calls and component instantiations figure out if they
 
        // need to be typechecked
 
        for infer_node in self.infer_nodes.iter_mut() {
 
            // Determine type ID
 
            let expr = &ctx.heap[infer_node.expr_id];
 
            let poly_data = &self.poly_data[infer_node.poly_data_index as usize];
 

	
 
            // TODO: Maybe optimize? Split insertion up into lookup, then clone
 
            //  if needed?
 
            let mut concrete_type = ConcreteType::default();
 
            infer_node.expr_type.write_concrete_type(&mut concrete_type);
 
            let info_type_id = ctx.types.add_monomorphed_type(ctx.modules, ctx.heap, ctx.arch, concrete_type)?;
 

	
 
            // Determine procedure type ID, i.e. a called/instantiated
 
            // procedure's signature.
 
            let info_variant = if let Expression::Call(expr) = expr {
 
                // Construct full function type. If not yet typechecked then
 
                // queue it for typechecking.
 
                let poly_data = &self.poly_data[infer_node.poly_data_index as usize];
 
                debug_assert!(expr.method.is_user_defined() || expr.method.is_public_builtin());
 
                let procedure_id = expr.procedure;
 
                let num_poly_vars = poly_data.poly_vars.len() as u32;
 

	
 
                let first_part = match expr.method {
 
                    Method::UserFunction => ConcreteTypePart::Function(expr.procedure, num_poly_vars),
 
                    Method::UserComponent => ConcreteTypePart::Component(expr.procedure, num_poly_vars),
 
                    _ => ConcreteTypePart::Function(expr.procedure, num_poly_vars),
 
                    Method::UserFunction => ConcreteTypePart::Function(procedure_id, num_poly_vars),
 
                    Method::UserComponent => ConcreteTypePart::Component(procedure_id, num_poly_vars),
 
                    _ => ConcreteTypePart::Function(procedure_id, num_poly_vars),
 
                };
 

	
 

	
 
                let definition_id = expr.procedure.upcast();
 
                let definition_id = procedure_id.upcast();
 
                let signature_type = poly_data_type_to_concrete_type(
 
                    ctx, infer_node.expr_id, &poly_data.poly_vars, first_part
 
                )?;
 

	
 
                let (type_id, monomorph_index) = if let Some(type_id) = ctx.types.get_procedure_monomorph_type_id(&definition_id, &signature_type.parts) {
 
                    // Procedure is already typechecked
 
                    let monomorph_index = ctx.types.get_monomorph(type_id).variant.as_procedure().monomorph_index;
 
                    (type_id, monomorph_index)
 
                } else {
 
                    // Procedure is not yet typechecked, reserve a TypeID and a monomorph index
 
                    let procedure_to_check = &mut ctx.heap[expr.procedure];
 
                    let procedure_to_check = &mut ctx.heap[procedure_id];
 
                    let monomorph_index = procedure_to_check.monomorphs.len() as u32;
 
                    procedure_to_check.monomorphs.push(ProcedureDefinitionMonomorph::new_invalid());
 
                    let type_id = ctx.types.reserve_procedure_monomorph_type_id(&definition_id, signature_type, monomorph_index);
 
                    queue.push(ResolveQueueElement{
 
                        root_id: ctx.heap[definition_id].defined_in(),
 
                        definition_id,
 
                        reserved_type_id: type_id,
 
                        reserved_monomorph_index: monomorph_index,
 
                    });
 

	
 
                    (type_id, monomorph_index)
 
                };
 

	
 
                ExpressionInfoVariant::Procedure(type_id, monomorph_index)
 
            } else if let Expression::Select(expr) = expr {
 
            } else if let Expression::Select(_expr) = expr {
 
                ExpressionInfoVariant::Select(infer_node.field_index)
 
            } else {
 
                ExpressionInfoVariant::Generic
 
            };
 

	
 
            infer_node.info_type_id = info_type_id;
 
            infer_node.info_variant = info_variant;
 
        }
 

	
 
        // Write the types of the arguments
 
        let procedure = &ctx.heap[self.procedure_id];
 
        for parameter_id in procedure.parameters.iter().copied() {
 
            let mut concrete = ConcreteType::default();
 
            let var_data = self.var_data.iter().find(|v| v.var_id == parameter_id).unwrap();
 
            var_data.var_type.write_concrete_type(&mut concrete);
 
            let type_id = ctx.types.add_monomorphed_type(ctx.modules, ctx.heap, ctx.arch, concrete)?;
 
            monomorph.argument_types.push(type_id)
 
        }
 

	
 
        // Determine if we have already assigned type indices to the expressions
 
        // before (the indices that, for a monomorph, can retrieve the type of
 
        // the expression).
 
        let has_type_indices = self.reserved_monomorph_index == 0;
 
        let has_type_indices = self.reserved_monomorph_index > 0;
 
        if has_type_indices {
 
            // already have indices, so resize and then index into it
 
            monomorph.expr_info.resize_with(num_infer_nodes, ExpressionInfo::new_invalid());
 
            debug_assert!(monomorph.expr_info.is_empty());
 
            monomorph.expr_info.resize(num_infer_nodes, ExpressionInfo::new_invalid());
 
            for infer_node in self.infer_nodes.iter() {
 
                let type_index = ctx.heap[infer_node.expr_id].type_index();
 
                monomorph.expr_info[type_index as usize] = infer_node.as_expression_info();
 
            }
 
        } else {
 
            // no indices yet, need to be assigned in AST
 
            for infer_node in self.infer_nodes.iter() {
 
                let type_index = monomorph.expr_info.len();
 
                monomorph.expr_info.push(infer_node.as_expression_info());
 
                *ctx.heap[infer_node.expr_id].type_index_mut() = type_index as i32;
 
            }
 
        }
 

	
 
        // Push the information into the AST
 
        let procedure = &mut ctx.heap[self.procedure_id];
 
        procedure.monomorphs[self.reserved_monomorph_index as usize] = monomorph;
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_inference_rule(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        use InferenceRule as IR;
 

	
 
        let node = &self.infer_nodes[node_index];
 
        match &node.inference_rule {
 
            IR::Noop =>
 
                unreachable!(),
 
            IR::MonoTemplate(_) =>
 
                self.progress_inference_rule_mono_template(ctx, node_index),
 
            IR::BiEqual(_) =>
 
                self.progress_inference_rule_bi_equal(ctx, node_index),
 
            IR::TriEqualArgs(_) =>
 
                self.progress_inference_rule_tri_equal_args(ctx, node_index),
 
            IR::TriEqualAll(_) =>
 
                self.progress_inference_rule_tri_equal_all(ctx, node_index),
 
            IR::Concatenate(_) =>
 
                self.progress_inference_rule_concatenate(ctx, node_index),
 
            IR::IndexingExpr(_) =>
 
                self.progress_inference_rule_indexing_expr(ctx, node_index),
 
            IR::SlicingExpr(_) =>
 
                self.progress_inference_rule_slicing_expr(ctx, node_index),
 
            IR::SelectStructField(_) =>
 
                self.progress_inference_rule_select_struct_field(ctx, node_index),
 
            IR::SelectTupleMember(_) =>
 
                self.progress_inference_rule_select_tuple_member(ctx, node_index),
 
            IR::LiteralStruct(_) =>
 
                self.progress_inference_rule_literal_struct(ctx, node_index),
 
            IR::LiteralEnum =>
 
@@ -2197,168 +2207,166 @@ impl PassTyping {
 
        let argument_template = rule.argument_template;
 
        let arg1_index = rule.argument1_index;
 
        let arg2_index = rule.argument2_index;
 

	
 
        let self_template_progress = self.progress_template(ctx, node_index, result_template.application, result_template.template)?;
 
        let arg1_template_progress = self.progress_template(ctx, arg1_index, argument_template.application, argument_template.template)?;
 
        let (arg1_progress, arg2_progress) = self.apply_equal2_constraint(ctx, node_index, arg1_index, 0, arg2_index, 0)?;
 

	
 
        if self_template_progress { self.queue_node_parent(node_index); }
 
        if arg1_template_progress || arg1_progress { self.queue_node(arg1_index); }
 
        if arg2_progress { self.queue_node(arg2_index); }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn progress_inference_rule_tri_equal_all(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_tri_equal_all();
 

	
 
        let template = rule.template;
 
        let arg1_index = rule.argument1_index;
 
        let arg2_index = rule.argument2_index;
 

	
 
        let template_progress = self.progress_template(ctx, node_index, template.application, template.template)?;
 
        let (node_progress, arg1_progress, arg2_progress) =
 
            self.apply_equal3_constraint(ctx, node_index, arg1_index, arg2_index, 0)?;
 

	
 
        if template_progress || node_progress { self.queue_node_parent(node_index); }
 
        if arg1_progress { self.queue_node(arg1_index); }
 
        if arg2_progress { self.queue_node(arg2_index); }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn progress_inference_rule_concatenate(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_concatenate();
 
        let arg1_index = rule.argument1_index;
 
        let arg2_index = rule.argument2_index;
 

	
 
        // Two cases: one of the arguments is a string (then all must be), or
 
        // one of the arguments is an array (and all must be arrays).
 
        let (expr_is_str, expr_is_not_str) = self.type_is_certainly_or_certainly_not_string(node_index);
 
        let (arg1_is_str, arg1_is_not_str) = self.type_is_certainly_or_certainly_not_string(arg1_index);
 
        let (arg2_is_str, arg2_is_not_str) = self.type_is_certainly_or_certainly_not_string(arg2_index);
 

	
 
        let someone_is_str = expr_is_str || arg1_is_str || arg2_is_str;
 
        let someone_is_not_str = expr_is_not_str || arg1_is_not_str || arg2_is_not_str;
 
        println!("DEBUG: Running concat, is_str = {}, is_not_str = {}", someone_is_str, someone_is_not_str);
 
        // Note: this statement is an expression returning the progression bools
 
        let (node_progress, arg1_progress, arg2_progress) = if someone_is_str {
 
            // One of the arguments is a string, then all must be strings
 
            self.apply_equal3_constraint(ctx, node_index, arg1_index, arg2_index, 0)?
 
        } else {
 
            let progress_expr = if someone_is_not_str {
 
                // Output must be a normal array
 
                self.apply_template_constraint(ctx, node_index, &ARRAY_TEMPLATE)?
 
            } else {
 
                // Output may still be anything
 
                self.apply_template_constraint(ctx, node_index, &ARRAYLIKE_TEMPLATE)?
 
            };
 

	
 
            let progress_arg1 = self.apply_template_constraint(ctx, arg1_index, &ARRAYLIKE_TEMPLATE)?;
 
            let progress_arg2 = self.apply_template_constraint(ctx, arg2_index, &ARRAYLIKE_TEMPLATE)?;
 

	
 
            // If they're all arraylike, then we want the subtype to match
 
            let (subtype_expr, subtype_arg1, subtype_arg2) =
 
                self.apply_equal3_constraint(ctx, node_index, arg1_index, arg2_index, 1)?;
 

	
 
            (progress_expr || subtype_expr, progress_arg1 || subtype_arg1, progress_arg2 || subtype_arg2)
 
        };
 

	
 
        if node_progress { self.queue_node_parent(node_index); }
 
        if arg1_progress { self.queue_node(arg1_index); }
 
        if arg2_progress { self.queue_node(arg2_index); }
 

	
 
        return Ok(())
 
    }
 

	
 
    fn progress_inference_rule_indexing_expr(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_indexing_expr();
 
        let subject_index = rule.subject_index;
 
        let index_index = rule.index_index; // which one?
 

	
 
        // Subject is arraylike, index in integerlike
 
        let subject_template_progress = self.apply_template_constraint(ctx, subject_index, &ARRAYLIKE_TEMPLATE)?;
 
        let index_template_progress = self.apply_template_constraint(ctx, index_index, &INTEGERLIKE_TEMPLATE)?;
 

	
 
        // If subject is type `Array<T>`, then expr type is `T`
 
        let (node_progress, subject_progress) =
 
            self.apply_equal2_constraint(ctx, node_index, node_index, 0, subject_index, 1)?;
 

	
 
        if node_progress { self.queue_node_parent(node_index); }
 
        if subject_template_progress || subject_progress { self.queue_node(subject_index); }
 
        if index_template_progress { self.queue_node(index_index); }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn progress_inference_rule_slicing_expr(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_slicing_expr();
 
        let subject_index = rule.subject_index;
 
        let from_index_index = rule.from_index;
 
        let to_index_index = rule.to_index;
 

	
 
        debug_log!("Rule slicing [node: {}, expr: {}]", node_index, node.expr_id.index);
 

	
 
        // Subject is arraylike, indices are integerlike
 
        let subject_template_progress = self.apply_template_constraint(ctx, subject_index, &ARRAYLIKE_TEMPLATE)?;
 
        let from_template_progress = self.apply_template_constraint(ctx, from_index_index, &INTEGERLIKE_TEMPLATE)?;
 
        let to_template_progress = self.apply_template_constraint(ctx, to_index_index, &INTEGERLIKE_TEMPLATE)?;
 
        let (from_index_progress, to_index_progress) =
 
            self.apply_equal2_constraint(ctx, node_index, from_index_index, 0, to_index_index, 0)?;
 

	
 
        // Same as array indexing: result depends on whether subject is string
 
        // or array
 
        let (is_string, is_not_string) = self.type_is_certainly_or_certainly_not_string(node_index);
 
        println!("DEBUG: Running slicing, is_str = {}, is_not_str = {}", is_string, is_not_string);
 
        let (node_progress, subject_progress) = if is_string {
 
            // Certainly a string
 
            (
 
                self.apply_forced_constraint(ctx, node_index, &STRING_TEMPLATE)?,
 
                false
 
            )
 
        } else if is_not_string {
 
            // Certainly not a string, apply template constraint. Then make sure
 
            // that if we have an `Array<T>`, that the slice produces `Slice<T>`
 
            let node_template_progress = self.apply_template_constraint(ctx, node_index, &SLICE_TEMPLATE)?;
 
            let (node_progress, subject_progress) =
 
                self.apply_equal2_constraint(ctx, node_index, node_index, 1, subject_index, 1)?;
 

	
 
            (
 
                node_template_progress || node_progress,
 
                subject_progress
 
            )
 
        } else {
 
            // Not sure yet
 
            let node_template_progress = self.apply_template_constraint(ctx, node_index, &ARRAYLIKE_TEMPLATE)?;
 
            let (node_progress, subject_progress) =
 
                self.apply_equal2_constraint(ctx, node_index, node_index, 1, subject_index, 1)?;
 

	
 
            (
 
                node_template_progress || node_progress,
 
                subject_progress
 
            )
 
        };
 

	
 
        if node_progress { self.queue_node_parent(node_index); }
 
        if subject_template_progress || subject_progress { self.queue_node(subject_index); }
 
        if from_template_progress || from_index_progress { self.queue_node(from_index_index); }
 
        if to_template_progress || to_index_progress { self.queue_node(to_index_index); }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn progress_inference_rule_select_struct_field(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_select_struct_field();
 

	
 
        let subject_index = rule.subject_index;
 
        let selected_field = rule.selected_field.clone();
 

	
 
        fn get_definition_id_from_inference_type(inference_type: &InferenceType) -> Result<Option<DefinitionId>, ()> {
 
            for part in inference_type.parts.iter() {
 
                if part.is_marker() { continue; }
 
                if !part.is_concrete() { break; }
 
@@ -2925,97 +2933,96 @@ impl PassTyping {
 

	
 
                        return Err(ParseError::new_error_at_span(
 
                            &ctx.module().source, var_decl_this.identifier.span, format!(
 
                                "conflicting types for this channel, this port has type '{}'",
 
                                var_data_this.var_type.display_name(&ctx.heap)
 
                            )
 
                        ).with_info_at_span(
 
                            &ctx.module().source, var_decl_linked.identifier.span, format!(
 
                                "while this port has type '{}'",
 
                                var_data_linked.var_type.display_name(&ctx.heap)
 
                            )
 
                        ));
 
                    }
 
                }
 
            }
 
        }
 

	
 
        if progress_expr { self.queue_node_parent(node_index); }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn progress_template(&mut self, ctx: &Ctx, node_index: InferNodeIndex, application: InferenceRuleTemplateApplication, template: &[InferenceTypePart]) -> Result<bool, ParseError> {
 
        use InferenceRuleTemplateApplication as TA;
 

	
 
        match application {
 
            TA::None => Ok(false),
 
            TA::Template => self.apply_template_constraint(ctx, node_index, template),
 
            TA::Forced => self.apply_forced_constraint(ctx, node_index, template),
 
        }
 
    }
 

	
 
    fn queue_node_parent(&mut self, node_index: InferNodeIndex) {
 
        let node = &self.infer_nodes[node_index];
 
        if let Some(parent_node_index) = node.parent_index {
 
            self.node_queued.push_back(parent_node_index);
 
        }
 
    }
 

	
 
    #[inline]
 
    fn queue_node(&mut self, node_index: InferNodeIndex) {
 
        self.node_queued.push_back(node_index);
 
    }
 

	
 
    /// Returns whether the type is certainly a string (true, false), certainly
 
    /// not a string (false, true), or still unknown (false, false).
 
    fn type_is_certainly_or_certainly_not_string(&self, node_index: InferNodeIndex) -> (bool, bool) {
 
        let expr_type = &self.infer_nodes[node_index].expr_type;
 
        println!("DEBUG: Running test on {:?}", expr_type.parts);
 
        let mut part_index = 0;
 
        while part_index < expr_type.parts.len() {
 
            let part = &expr_type.parts[part_index];
 

	
 
            if part.is_marker() {
 
                part_index += 1;
 
                continue;
 
            }
 
            if !part.is_concrete() { break; }
 

	
 
            if *part == InferenceTypePart::String {
 
                // First part is a string
 
                return (true, false);
 
            } else {
 
                return (false, true);
 
            }
 
        }
 

	
 
        // If here then first non-marker type is not concrete
 
        if part_index == expr_type.parts.len() {
 
            // nothing known at all
 
            return (false, false);
 
        }
 

	
 
        // Special case: array-like where its argument is not a character
 
        if part_index + 1 < expr_type.parts.len() {
 
            if expr_type.parts[part_index] == InferenceTypePart::ArrayLike && expr_type.parts[part_index + 1] != InferenceTypePart::Character {
 
                return (false, true);
 
            }
 
        }
 

	
 

	
 
        (false, false)
 
    }
 

	
 
    /// Applies a template type constraint: the type associated with the
 
    /// supplied expression will be molded into the provided `template`. But
 
    /// will be considered valid if the template could've been molded into the
 
    /// expression type as well. Hence the template may be fully specified (e.g.
 
    /// a bool) or contain "inference" variables (e.g. an array of T)
 
    fn apply_template_constraint(
 
        &mut self, ctx: &Ctx, node_index: InferNodeIndex, template: &[InferenceTypePart]
 
    ) -> Result<bool, ParseError> {
 
        let expr_type = &mut self.infer_nodes[node_index].expr_type;
 
        match InferenceType::infer_subtree_for_single_type(expr_type, 0, template, 0, false) {
 
            SingleInferenceResult::Modified => Ok(true),
 
            SingleInferenceResult::Unmodified => Ok(false),
 
            SingleInferenceResult::Incompatible => Err(
src/protocol/parser/pass_validation_linking.rs
Show inline comments
 
@@ -78,191 +78,184 @@ pub(crate) struct PassValidationLinking {
 
    in_select_guard: SelectStatementId, // for detection/rejection of builtin calls
 
    in_select_arm: u32,
 
    in_test_expr: StatementId, // wrapping if/while stmt id
 
    in_binding_expr: BindingExpressionId, // to resolve variable expressions
 
    in_binding_expr_lhs: bool,
 
    // Traversal state, current scope (which can be used to find the parent
 
    // scope) and the definition variant we are considering.
 
    cur_scope: ScopeId,
 
    proc_id: ProcedureDefinitionId,
 
    proc_kind: ProcedureKind,
 
    // "Trailing" traversal state, set be child/prev stmt/expr used by next one
 
    prev_stmt: StatementId,
 
    expr_parent: ExpressionParent,
 
    // Set by parent to indicate that child expression must be assignable. The
 
    // child will throw an error if it is not assignable. The stored span is
 
    // used for the error's position
 
    must_be_assignable: Option<InputSpan>,
 
    // Keeping track of relative positions and unique IDs.
 
    relative_pos_in_parent: i32, // of statements: to determine when variables are visible
 
    // Control flow statements that require label resolving
 
    control_flow_stmts: Vec<ControlFlowStatement>,
 
    // Various temporary buffers for traversal. Essentially working around
 
    // Rust's borrowing rules since it cannot understand we're modifying AST
 
    // members but not the AST container.
 
    variable_buffer: ScopedBuffer<VariableId>,
 
    definition_buffer: ScopedBuffer<DefinitionId>,
 
    statement_buffer: ScopedBuffer<StatementId>,
 
    expression_buffer: ScopedBuffer<ExpressionId>,
 
    scope_buffer: ScopedBuffer<ScopeId>,
 
}
 

	
 
impl PassValidationLinking {
 
    pub(crate) fn new() -> Self {
 
        Self{
 
            in_sync: SynchronousStatementId::new_invalid(),
 
            in_while: WhileStatementId::new_invalid(),
 
            in_select_guard: SelectStatementId::new_invalid(),
 
            in_select_arm: 0,
 
            in_test_expr: StatementId::new_invalid(),
 
            in_binding_expr: BindingExpressionId::new_invalid(),
 
            in_binding_expr_lhs: false,
 
            cur_scope: ScopeId::new_invalid(),
 
            prev_stmt: StatementId::new_invalid(),
 
            expr_parent: ExpressionParent::None,
 
            proc_id: ProcedureDefinitionId::new_invalid(),
 
            proc_kind: ProcedureKind::Function,
 
            must_be_assignable: None,
 
            relative_pos_in_parent: 0,
 
            next_expr_index: 0,
 
            control_flow_stmts: Vec::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            variable_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            definition_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            statement_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            expression_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            scope_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
        }
 
    }
 

	
 
    fn reset_state(&mut self) {
 
        self.in_sync = SynchronousStatementId::new_invalid();
 
        self.in_while = WhileStatementId::new_invalid();
 
        self.in_select_guard = SelectStatementId::new_invalid();
 
        self.in_test_expr = StatementId::new_invalid();
 
        self.in_binding_expr = BindingExpressionId::new_invalid();
 
        self.in_binding_expr_lhs = false;
 
        self.cur_scope = ScopeId::new_invalid();
 
        self.proc_id = ProcedureDefinitionId::new_invalid();
 
        self.proc_kind = ProcedureKind::Function;
 
        self.prev_stmt = StatementId::new_invalid();
 
        self.expr_parent = ExpressionParent::None;
 
        self.must_be_assignable = None;
 
        self.relative_pos_in_parent = 0;
 
        self.next_expr_index = 0;
 
        self.control_flow_stmts.clear();
 
    }
 
}
 

	
 
macro_rules! assign_then_erase_next_stmt {
 
    ($self:ident, $ctx:ident, $stmt_id:expr) => {
 
        if !$self.prev_stmt.is_invalid() {
 
            $ctx.heap[$self.prev_stmt].link_next($stmt_id);
 
            $self.prev_stmt = StatementId::new_invalid();
 
        }
 
    }
 
}
 

	
 
macro_rules! assign_and_replace_next_stmt {
 
    ($self:ident, $ctx:ident, $stmt_id:expr) => {
 
        if !$self.prev_stmt.is_invalid() {
 
            $ctx.heap[$self.prev_stmt].link_next($stmt_id);
 
        }
 
        $self.prev_stmt = $stmt_id;
 
    }
 
}
 

	
 
impl Visitor for PassValidationLinking {
 
    fn visit_module(&mut self, ctx: &mut Ctx) -> VisitorResult {
 
        debug_assert_eq!(ctx.module().phase, ModuleCompilationPhase::TypesAddedToTable);
 

	
 
        let root = &ctx.heap[ctx.module().root_id];
 
        let section = self.definition_buffer.start_section_initialized(&root.definitions);
 
        for definition_id in section.iter_copied() {
 
            self.visit_definition(ctx, definition_id)?;
 
        }
 
        section.forget();
 

	
 
        ctx.module_mut().phase = ModuleCompilationPhase::ValidatedAndLinked;
 
        Ok(())
 
    }
 
    //--------------------------------------------------------------------------
 
    // Definition visitors
 
    //--------------------------------------------------------------------------
 

	
 
    fn visit_procedure_definition(&mut self, ctx: &mut Ctx, id: ProcedureDefinitionId) -> VisitorResult {
 
        self.reset_state();
 

	
 
        let definition = &ctx.heap[id];
 
        self.proc_id = id;
 
        self.proc_kind = definition.kind;
 
        self.expr_parent = ExpressionParent::None;
 

	
 
        // Visit parameters
 
        let scope_id = definition.scope;
 
        let old_scope = self.push_scope(ctx, true, scope_id);
 

	
 
        let definition = &ctx.heap[id];
 
        let body_id = definition.body;
 
        let section = self.variable_buffer.start_section_initialized(&definition.parameters);
 
        for variable_idx in 0..section.len() {
 
            let variable_id = section[variable_idx];
 
            self.checked_at_single_scope_add_local(ctx, self.cur_scope, -1, variable_id)?;
 
        }
 
        section.forget();
 

	
 
        // Visit statements in function body
 
        self.visit_block_stmt(ctx, body_id)?;
 
        self.pop_scope(old_scope);
 

	
 
        // Assign total number of expressions and assign an in-block unique ID
 
        // to each of the locals in the procedure.
 
        let definition = &mut ctx.heap[id];
 
        definition.num_expressions_in_body = self.next_expr_index;
 

	
 
        self.resolve_pending_control_flow_targets(ctx)?;
 

	
 
        Ok(())
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Statement visitors
 
    //--------------------------------------------------------------------------
 

	
 
    fn visit_block_stmt(&mut self, ctx: &mut Ctx, id: BlockStatementId) -> VisitorResult {
 
        // Get end of block
 
        let block_stmt = &ctx.heap[id];
 
        let end_block_id = block_stmt.end_block;
 
        let scope_id = block_stmt.scope;
 

	
 
        // Traverse statements in block
 
        let statement_section = self.statement_buffer.start_section_initialized(&block_stmt.statements);
 
        let old_scope = self.push_scope(ctx, false, scope_id);
 
        assign_and_replace_next_stmt!(self, ctx, id.upcast());
 

	
 
        for stmt_idx in 0..statement_section.len() {
 
            self.relative_pos_in_parent = stmt_idx as i32;
 
            self.visit_stmt(ctx, statement_section[stmt_idx])?;
 
        }
 

	
 
        statement_section.forget();
 
        assign_and_replace_next_stmt!(self, ctx, end_block_id.upcast());
 

	
 
        self.pop_scope(old_scope);
 
        Ok(())
 
    }
 

	
 
    fn visit_local_memory_stmt(&mut self, ctx: &mut Ctx, id: MemoryStatementId) -> VisitorResult {
 
        let stmt = &ctx.heap[id];
 
        let expr_id = stmt.initial_expr;
 
        let variable_id = stmt.variable;
 

	
 
        self.checked_add_local(ctx, self.cur_scope, self.relative_pos_in_parent, variable_id)?;
 

	
 
        assign_and_replace_next_stmt!(self, ctx, id.upcast().upcast());
 
        debug_assert_eq!(self.expr_parent, ExpressionParent::None);
 
        self.expr_parent = ExpressionParent::Memory(id);
 
        self.visit_assignment_expr(ctx, expr_id)?;
 
        self.expr_parent = ExpressionParent::None;
 

	
 
        Ok(())
 
    }
 

	
 
@@ -579,97 +572,96 @@ impl Visitor for PassValidationLinking {
 
        self.expr_parent = ExpressionParent::New(id);
 
        self.visit_call_expr(ctx, call_expr_id)?;
 
        self.expr_parent = ExpressionParent::None;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_expr_stmt(&mut self, ctx: &mut Ctx, id: ExpressionStatementId) -> VisitorResult {
 
        let expr_id = ctx.heap[id].expression;
 

	
 
        assign_and_replace_next_stmt!(self, ctx, id.upcast());
 
        debug_assert_eq!(self.expr_parent, ExpressionParent::None);
 
        self.expr_parent = ExpressionParent::ExpressionStmt(id);
 
        self.visit_expr(ctx, expr_id)?;
 
        self.expr_parent = ExpressionParent::None;
 

	
 
        Ok(())
 
    }
 

	
 

	
 
    //--------------------------------------------------------------------------
 
    // Expression visitors
 
    //--------------------------------------------------------------------------
 

	
 
    fn visit_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 

	
 
        let assignment_expr = &mut ctx.heap[id];
 

	
 
        // Although we call assignment an expression to simplify the compiler's
 
        // code (mainly typechecking), we disallow nested use in expressions
 
        match self.expr_parent {
 
            // Look at us: lying through our teeth while providing error messages.
 
            ExpressionParent::Memory(_) => {},
 
            ExpressionParent::ExpressionStmt(_) => {},
 
            _ => {
 
                let assignment_span = assignment_expr.full_span;
 
                return Err(ParseError::new_error_str_at_span(
 
                    &ctx.module().source, assignment_span,
 
                    "assignments are statements, and cannot be used in expressions"
 
                ))
 
            },
 
        }
 

	
 
        let left_expr_id = assignment_expr.left;
 
        let right_expr_id = assignment_expr.right;
 
        let old_expr_parent = self.expr_parent;
 
        assignment_expr.parent = old_expr_parent;
 
        self.next_expr_index += 1;
 

	
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 0);
 
        self.must_be_assignable = Some(assignment_expr.operator_span);
 
        self.visit_expr(ctx, left_expr_id)?;
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 1);
 
        self.must_be_assignable = None;
 
        self.visit_expr(ctx, right_expr_id)?;
 
        self.expr_parent = old_expr_parent;
 
        Ok(())
 
    }
 

	
 
    fn visit_binding_expr(&mut self, ctx: &mut Ctx, id: BindingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 

	
 
        // Check for valid context of binding expression
 
        if let Some(span) = self.must_be_assignable {
 
            return Err(ParseError::new_error_str_at_span(
 
                &ctx.module().source, span, "cannot assign to the result from a binding expression"
 
            ));
 
        }
 

	
 
        if self.in_test_expr.is_invalid() {
 
            let binding_expr = &ctx.heap[id];
 
            return Err(ParseError::new_error_str_at_span(
 
                &ctx.module().source, binding_expr.full_span,
 
                "binding expressions can only be used inside the testing expression of 'if' and 'while' statements"
 
            ));
 
        }
 

	
 
        if !self.in_binding_expr.is_invalid() {
 
            let binding_expr = &ctx.heap[id];
 
            let previous_expr = &ctx.heap[self.in_binding_expr];
 
            return Err(ParseError::new_error_str_at_span(
 
                &ctx.module().source, binding_expr.full_span,
 
                "nested binding expressions are not allowed"
 
            ).with_info_str_at_span(
 
                &ctx.module().source, previous_expr.operator_span,
 
                "the outer binding expression is found here"
 
            ));
 
        }
 

	
 
        let mut seeking_parent = self.expr_parent;
 
        loop {
 
            // Perform upward search to make sure only LogicalAnd is applied to
 
            // the binding expression
 
            let valid = match seeking_parent {
 
                ExpressionParent::If(_) | ExpressionParent::While(_) => {
 
                    // Every parent expression (if any) were LogicalAnd.
src/protocol/parser/type_table.rs
Show inline comments
 
@@ -248,96 +248,97 @@ pub struct StructMonomorph {
 
    pub fields: Vec<StructMonomorphField>,
 
}
 

	
 
pub struct StructMonomorphField {
 
    pub type_id: TypeId,
 
    concrete_type: ConcreteType,
 
    pub size: usize,
 
    pub alignment: usize,
 
    pub offset: usize,
 
}
 

	
 
/// Union monomorph
 
pub struct UnionMonomorph {
 
    pub variants: Vec<UnionMonomorphVariant>,
 
    pub tag_size: usize, // copied from `UnionType` upon monomorph construction.
 
    // note that the stack size is in the `TypeMonomorph` struct. This size and
 
    // alignment will include the size of the union tag.
 
    //
 
    // heap_size contains the allocated size of the union in the case it
 
    // is used to break a type loop. If it is 0, then it doesn't require
 
    // allocation and lives entirely on the stack.
 
    pub heap_size: usize,
 
    pub heap_alignment: usize,
 
}
 

	
 
pub struct UnionMonomorphVariant {
 
    pub lives_on_heap: bool,
 
    pub embedded: Vec<UnionMonomorphEmbedded>,
 
}
 

	
 
pub struct UnionMonomorphEmbedded {
 
    pub type_id: TypeId,
 
    concrete_type: ConcreteType,
 
    // Note that the meaning of the offset (and alignment) depend on whether or
 
    // not the variant lives on the stack/heap. If it lives on the stack then
 
    // they refer to the offset from the start of the union value (so the first
 
    // embedded type lives at a non-zero offset, because the union tag sits in
 
    // the front). If it lives on the heap then it refers to the offset from the
 
    // allocated memory region (so the first embedded type lives at a 0 offset).
 
    pub size: usize,
 
    pub alignment: usize,
 
    pub offset: usize,
 
}
 

	
 
/// Procedure (functions and components of all possible types) monomorph. Also
 
/// stores the expression type data from the typechecking/inferencing pass.
 
pub struct ProcedureMonomorph {
 
    pub monomorph_index: u32,
 
    pub builtin: bool,
 
}
 

	
 
/// Tuple monomorph. Again a kind of exception because one cannot define a named
 
/// tuple type containing explicit polymorphic variables. But again: we need to
 
/// store size/offset/alignment information, so we do it here.
 
pub struct TupleMonomorph {
 
    pub members: Vec<TupleMonomorphMember>
 
}
 

	
 
pub struct TupleMonomorphMember {
 
    pub type_id: TypeId,
 
    concrete_type: ConcreteType,
 
    pub size: usize,
 
    pub alignment: usize,
 
    pub offset: usize,
 
}
 

	
 
/// Generic unique type ID. Every monomorphed type and every non-polymorphic
 
/// type will have one of these associated with it.
 
#[derive(Debug, Clone, Copy, PartialEq)]
 
pub struct TypeId(i64);
 

	
 
impl TypeId {
 
    pub(crate) fn new_invalid() -> Self {
 
        return Self(-1);
 
    }
 
}
 

	
 
/// A monomorphed type (or non-polymorphic type's) memory layout and information
 
/// regarding associated types (like a struct's field type).
 
pub struct MonoType {
 
    pub type_id: TypeId,
 
    pub concrete_type: ConcreteType,
 
    pub size: usize,
 
    pub alignment: usize,
 
    pub(crate) variant: MonoTypeVariant
 
}
 

	
 
impl MonoType {
 
    #[inline]
 
    fn new_empty(type_id: TypeId, concrete_type: ConcreteType, variant: MonoTypeVariant) -> Self {
 
        return Self {
 
            type_id, concrete_type,
 
            size: 0,
 
            alignment: 0,
 
            variant,
 
        }
 
    }
 
@@ -477,96 +478,98 @@ impl PartialEq for MonoSearchKey {
 

	
 

	
 
            if self_in_use == other_in_use {
 
                if self_in_use {
 
                    // Both are in use, so both parts should be equal
 
                    let delta_self = self_end_index - self_index;
 
                    let delta_other = other_end_index - other_index;
 
                    if delta_self != delta_other {
 
                        // Both in use, but not of equal length, so the types
 
                        // cannot match
 
                        return false;
 
                    }
 

	
 
                    for _ in 0..delta_self {
 
                        let (_, self_part) = self.parts[self_index];
 
                        let (_, other_part) = other.parts[other_index];
 

	
 
                        if self_part != other_part {
 
                            return false;
 
                        }
 

	
 
                        self_index += 1;
 
                        other_index += 1;
 
                    }
 
                } else {
 
                    // Both not in use, so skip associated parts
 
                    self_index = self_end_index;
 
                    other_index = other_end_index;
 
                }
 
            } else {
 
                // No agreement on importance of parts. This is practically
 
                // impossible
 
                unreachable!();
 
            }
 
        }
 

	
 
        // Everything matched, so if we're at the end of both arrays then we're
 
        // certain that the two keys are equal.
 
        return self_index == self.parts.len() && other_index == other.parts.len();
 
    }
 
}
 

	
 
impl Eq for MonoSearchKey{}
 

	
 
//------------------------------------------------------------------------------
 
// Type table
 
//------------------------------------------------------------------------------
 

	
 
const POLY_VARS_IN_USE: [PolymorphicVariable; 1] = [PolymorphicVariable{ identifier: Identifier::new_empty(InputSpan::new()), is_in_use: true }];
 

	
 
// Programmer note: keep this struct free of dynamically allocated memory
 
#[derive(Clone)]
 
struct TypeLoopBreadcrumb {
 
    type_id: TypeId,
 
    next_member: u32,
 
    next_embedded: u32, // for unions, the index into the variant's embedded types
 
}
 

	
 
// Programmer note: keep this struct free of dynamically allocated memory
 
#[derive(Clone)]
 
struct MemoryBreadcrumb {
 
    type_id: TypeId,
 
    next_member: u32,
 
    next_embedded: u32,
 
    first_size_alignment_idx: u32,
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum TypeLoopResult {
 
    TypeExists,
 
    PushBreadcrumb(DefinitionId, ConcreteType),
 
    TypeLoop(usize), // index into vec of breadcrumbs at which the type matched
 
}
 

	
 
enum MemoryLayoutResult {
 
    TypeExists(usize, usize), // (size, alignment)
 
    PushBreadcrumb(MemoryBreadcrumb),
 
}
 

	
 
// TODO: @Optimize, initial memory-unoptimized implementation
 
struct TypeLoopEntry {
 
    type_id: TypeId,
 
    is_union: bool,
 
}
 

	
 
struct TypeLoop {
 
    members: Vec<TypeLoopEntry>,
 
}
 

	
 
type DefinitionMap = HashMap<DefinitionId, DefinedType>;
 
type MonoTypeMap = HashMap<MonoSearchKey, TypeId>;
 
type MonoTypeArray = Vec<MonoType>;
 

	
 
pub struct TypeTable {
 
    // Lookup from AST DefinitionId to a defined type. Also lookups for
 
    // concrete type to monomorphs
 
    pub(crate) definition_lookup: DefinitionMap,
 
    mono_type_lookup: MonoTypeMap,
 
@@ -612,198 +615,209 @@ impl TypeTable {
 
        debug_assert!(self.definition_lookup.is_empty());
 

	
 
        dbg_code!({
 
            for (index, module) in modules.iter().enumerate() {
 
                debug_assert_eq!(index, module.root_id.index as usize);
 
            }
 
        });
 

	
 
        // Use context to guess hashmap size of the base types
 
        let reserve_size = ctx.heap.definitions.len();
 
        self.definition_lookup.reserve(reserve_size);
 

	
 
        // Resolve all base types
 
        for definition_idx in 0..ctx.heap.definitions.len() {
 
            let definition_id = ctx.heap.definitions.get_id(definition_idx);
 
            let definition = &ctx.heap[definition_id];
 

	
 
            match definition {
 
                Definition::Enum(_) => self.build_base_enum_definition(modules, ctx, definition_id)?,
 
                Definition::Union(_) => self.build_base_union_definition(modules, ctx, definition_id)?,
 
                Definition::Struct(_) => self.build_base_struct_definition(modules, ctx, definition_id)?,
 
                Definition::Procedure(_) => self.build_base_procedure_definition(modules, ctx, definition_id)?,
 
            }
 
        }
 

	
 
        debug_assert_eq!(self.definition_lookup.len(), reserve_size, "mismatch in reserved size of type table");
 
        for module in modules.iter_mut() {
 
            module.phase = ModuleCompilationPhase::TypesAddedToTable;
 
        }
 

	
 
        // Go through all types again, lay out all types that are not
 
        // polymorphic. This might cause us to lay out monomorphized polymorphs
 
        // if these were member types of non-polymorphic types.
 
        for definition_idx in 0..ctx.heap.definitions.len() {
 
            let definition_id = ctx.heap.definitions.get_id(definition_idx);
 
            let poly_type = self.definition_lookup.get(&definition_id).unwrap();
 

	
 
            if !poly_type.definition.is_data_type() || !poly_type.poly_vars.is_empty() {
 
                continue;
 
            }
 

	
 
            // If here then the type is a data type without polymorphic
 
            // variables, but we might have instantiated it already, so:
 
            let concrete_parts = [ConcreteTypePart::Instance(definition_id, 0)];
 
            self.mono_search_key.set(&concrete_parts, &[]);
 
            let type_id = self.mono_type_lookup.get(&self.mono_search_key);
 
            if type_id.is_none() {
 
                self.detect_and_resolve_type_loops_for(
 
                    modules, ctx.heap,
 
                    modules, ctx.heap, ctx.arch,
 
                    ConcreteType{
 
                        parts: vec![ConcreteTypePart::Instance(definition_id, 0)]
 
                    },
 
                )?;
 
                self.lay_out_memory_for_encountered_types(ctx.arch);
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    /// Retrieves base definition from type table. We must be able to retrieve
 
    /// it as we resolve all base types upon type table construction (for now).
 
    /// However, in the future we might do on-demand type resolving, so return
 
    /// an option anyway
 
    #[inline]
 
    pub(crate) fn get_base_definition(&self, definition_id: &DefinitionId) -> Option<&DefinedType> {
 
        self.definition_lookup.get(&definition_id)
 
    }
 

	
 
    /// Returns the index into the monomorph type array if the procedure type
 
    /// already has a (reserved) monomorph.
 
    #[inline]
 
    pub(crate) fn get_procedure_monomorph_type_id(&self, definition_id: &DefinitionId, type_parts: &[ConcreteTypePart]) -> Option<TypeId> {
 
        // Cannot use internal search key due to mutability issues. But this
 
        // method should end up being deprecated at some point anyway.
 
        debug_assert_eq!(get_concrete_type_definition(type_parts).unwrap(), *definition_id);
 
        let base_type = self.definition_lookup.get(definition_id).unwrap();
 
        let mut search_key = MonoSearchKey::with_capacity(type_parts.len());
 
        search_key.set(type_parts, &base_type.poly_vars);
 

	
 
        return self.mono_type_lookup.get(&search_key).copied();
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn get_monomorph(&self, type_id: TypeId) -> &MonoType {
 
        return &self.mono_types[type_id.0 as usize];
 
    }
 

	
 
    /// Reserves space for a monomorph of a polymorphic procedure. The index
 
    /// will point into a (reserved) slot of the array of expression types. The
 
    /// monomorph may NOT exist yet (because the reservation implies that we're
 
    /// going to be performing typechecking on it, and we don't want to
 
    /// check the same monomorph twice)
 
    pub(crate) fn reserve_procedure_monomorph_type_id(&mut self, definition_id: &DefinitionId, concrete_type: ConcreteType, monomorph_index: u32) -> TypeId {
 
        debug_assert_eq!(get_concrete_type_definition(&concrete_type.parts).unwrap(), *definition_id);
 
        let type_id = TypeId(self.mono_types.len() as i64);
 
        let base_type = self.definition_lookup.get_mut(definition_id).unwrap();
 
        self.mono_search_key.set(&concrete_type.parts, &base_type.poly_vars);
 

	
 
        debug_assert!(!self.mono_type_lookup.contains_key(&self.mono_search_key));
 
        self.mono_type_lookup.insert(self.mono_search_key.clone(), type_id);
 
        self.mono_types.push(MonoType::new_empty(type_id, concrete_type, MonoTypeVariant::Procedure(ProcedureMonomorph{
 
            monomorph_index,
 
            builtin: false,
 
        })));
 

	
 
        return type_id;
 
    }
 

	
 
    /// Adds a builtin type to the type table. As this is only called by the
 
    /// compiler during setup we assume it cannot fail.
 
    pub(crate) fn add_builtin_type(&mut self, concrete_type: ConcreteType, poly_vars: &[PolymorphicVariable], size: usize, alignment: usize) -> TypeId {
 
    pub(crate) fn add_builtin_data_type(&mut self, concrete_type: ConcreteType, poly_vars: &[PolymorphicVariable], size: usize, alignment: usize) -> TypeId {
 
        self.mono_search_key.set(&concrete_type.parts, poly_vars);
 
        debug_assert!(!self.mono_type_lookup.contains_key(&self.mono_search_key));
 
        debug_assert_ne!(alignment, 0);
 
        let type_id = TypeId(self.mono_types.len() as i64);
 
        self.mono_type_lookup.insert(self.mono_search_key.clone(), type_id);
 
        self.mono_types.push(MonoType{
 
            type_id,
 
            concrete_type,
 
            size,
 
            alignment,
 
            variant: MonoTypeVariant::Builtin,
 
        });
 

	
 
        return type_id;
 
    }
 

	
 
    /// Adds a builtin procedure to the type table.
 
    pub(crate) fn add_builtin_procedure_type(&mut self, concrete_type: ConcreteType, poly_vars: &[PolymorphicVariable]) -> TypeId {
 
        self.mono_search_key.set(&concrete_type.parts, poly_vars);
 
        debug_assert!(!self.mono_type_lookup.contains_key(&self.mono_search_key));
 
        let type_id = TypeId(self.mono_types.len() as i64);
 
        self.mono_type_lookup.insert(self.mono_search_key.clone(), type_id);
 
        self.mono_types.push(MonoType{
 
            type_id,
 
            concrete_type,
 
            size: 0,
 
            alignment: 0,
 
            variant: MonoTypeVariant::Procedure(ProcedureMonomorph{
 
                monomorph_index: u32::MAX,
 
                builtin: true,
 
            })
 
        });
 

	
 
        return type_id;
 
    }
 

	
 
    /// Adds a monomorphed type to the type table. If it already exists then the
 
    /// previous entry will be used.
 
    pub(crate) fn add_monomorphed_type(
 
        &mut self, modules: &[Module], heap: &Heap, arch: &TargetArch, concrete_type: ConcreteType
 
    ) -> Result<TypeId, ParseError> {
 
        let poly_vars = match get_concrete_type_definition(&concrete_type.parts) {
 
            Some(definition_id) => {
 
                let definition = self.definition_lookup.get(&definition_id).unwrap();
 
                definition.poly_vars.as_slice()
 
            },
 
            None => {
 
                &[]
 
            }
 
        };
 

	
 
        // Check if the concrete type was already added
 
        self.mono_search_key.set(&concrete_type.parts, poly_vars);
 
        Self::set_search_key_to_type(&mut self.mono_search_key, &self.definition_lookup, &concrete_type.parts);
 
        if let Some(type_id) = self.mono_type_lookup.get(&self.mono_search_key) {
 
            return Ok(*type_id);
 
        }
 

	
 
        // Concrete type needs to be added
 
        self.detect_and_resolve_type_loops_for(modules, heap, concrete_type)?;
 
        self.detect_and_resolve_type_loops_for(modules, heap, arch, concrete_type)?;
 
        let type_id = self.encountered_types[0].type_id;
 
        self.lay_out_memory_for_encountered_types(arch);
 

	
 
        return Ok(type_id);
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Building base types
 
    //--------------------------------------------------------------------------
 

	
 
    /// Builds the base type for an enum. Will not compute byte sizes
 
    fn build_base_enum_definition(&mut self, modules: &[Module], ctx: &mut PassCtx, definition_id: DefinitionId) -> Result<(), ParseError> {
 
        debug_assert!(!self.definition_lookup.contains_key(&definition_id), "base enum already built");
 
        let definition = ctx.heap[definition_id].as_enum();
 
        let root_id = definition.defined_in;
 

	
 
        // Determine enum variants
 
        let mut enum_value = -1;
 
        let mut variants = Vec::with_capacity(definition.variants.len());
 

	
 
        for variant in &definition.variants {
 
            if enum_value == i64::MAX {
 
                let source = &modules[definition.defined_in.index as usize].source;
 
                return Err(ParseError::new_error_str_at_span(
 
                    source, variant.identifier.span,
 
                    "this enum variant has an integer value that is too large"
 
                ));
 
            }
 

	
 
            enum_value += 1;
 
            if let EnumVariantValue::Integer(explicit_value) = variant.value {
 
                enum_value = explicit_value;
 
            }
 

	
 
            variants.push(EnumVariant{
 
                identifier: variant.identifier.clone(),
 
                value: enum_value,
 
            });
 
        }
 

	
 
        // Determine tag size
 
        let mut min_enum_value = 0;
 
        let mut max_enum_value = 0;
 
        if !variants.is_empty() {
 
            min_enum_value = variants[0].value;
 
            max_enum_value = variants[0].value;
 
            for variant in variants.iter().skip(1) {
 
                min_enum_value = min_enum_value.min(variant.value);
 
@@ -1079,132 +1093,133 @@ impl TypeTable {
 
    /// arguments all have unique names, and the arguments do not conflict with
 
    /// any symbols defined at the module scope.
 
    fn check_poly_args_collision(
 
        modules: &[Module], ctx: &PassCtx, root_id: RootId, poly_args: &[Identifier]
 
    ) -> Result<(), ParseError> {
 
        // Make sure polymorphic arguments are unique and none of the
 
        // identifiers conflict with any imported scopes
 
        for (arg_idx, poly_arg) in poly_args.iter().enumerate() {
 
            for other_poly_arg in &poly_args[..arg_idx] {
 
                if poly_arg == other_poly_arg {
 
                    let module_source = &modules[root_id.index as usize].source;
 
                    return Err(ParseError::new_error_str_at_span(
 
                        module_source, poly_arg.span,
 
                        "This polymorphic argument is defined more than once"
 
                    ).with_info_str_at_span(
 
                        module_source, other_poly_arg.span,
 
                        "It conflicts with this polymorphic argument"
 
                    ));
 
                }
 
            }
 

	
 
            // Check if identifier conflicts with a symbol defined or imported
 
            // in the current module
 
            if let Some(symbol) = ctx.symbols.get_symbol_by_name(SymbolScope::Module(root_id), poly_arg.value.as_bytes()) {
 
                // We have a conflict
 
                let module_source = &modules[root_id.index as usize].source;
 
                let introduction_span = symbol.variant.span_of_introduction(ctx.heap);
 
                return Err(ParseError::new_error_str_at_span(
 
                    module_source, poly_arg.span,
 
                    "This polymorphic argument conflicts with another symbol"
 
                ).with_info_str_at_span(
 
                    module_source, introduction_span,
 
                    "It conflicts due to this symbol"
 
                ));
 
            }
 
        }
 

	
 
        // All arguments are fine
 
        Ok(())
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Detecting type loops
 
    //--------------------------------------------------------------------------
 

	
 
    /// Internal function that will detect type loops and check if they're
 
    /// resolvable. If so then the appropriate union variants will be marked as
 
    /// "living on heap". If not then a `ParseError` will be returned
 
    fn detect_and_resolve_type_loops_for(&mut self, modules: &[Module], heap: &Heap, concrete_type: ConcreteType) -> Result<(), ParseError> {
 
    fn detect_and_resolve_type_loops_for(&mut self, modules: &[Module], heap: &Heap, arch: &TargetArch, concrete_type: ConcreteType) -> Result<(), ParseError> {
 
        // Programmer notes: what happens here is the we call
 
        // `check_member_for_type_loops` for a particular type's member, and
 
        // then take action using the return value:
 
        // 1. It might already be resolved: in this case it implies we don't
 
        //  have type loops, or they have been resolved.
 
        // 2. A new type is encountered. If so then it is added to the type loop
 
        //  breadcrumbs.
 
        // 3. A type loop is detected (implying the type is already resolved, or
 
        //  already exists in the type loop breadcrumbs).
 
        //
 
        // Using the breadcrumbs we incrementally check every member type of a
 
        // particular considered type (e.g. a struct field, tuple member), and
 
        // do the same as above. Note that when a breadcrumb is added we reserve
 
        // space in the monomorph storage, initialized to zero-values (i.e.
 
        // wrong values). The breadcrumbs keep track of how far and along we are
 
        // with resolving the member types.
 
        //
 
        // At the end we may have some type loops. If they're unresolvable then
 
        // we throw an error). If there are no type loops or they are all
 
        // resolvable then we end up with a list of `encountered_types`. These
 
        // are then used by `lay_out_memory_for_encountered_types`.
 
        debug_assert!(self.type_loop_breadcrumbs.is_empty());
 
        debug_assert!(self.type_loops.is_empty());
 
        debug_assert!(self.encountered_types.is_empty());
 

	
 
        // Push the initial breadcrumb
 
        let initial_breadcrumb = Self::check_member_for_type_loops(
 
            &self.type_loop_breadcrumbs, &self.definition_lookup, &self.mono_type_lookup,
 
            &mut self.mono_search_key, &concrete_type
 
        );
 

	
 
        if let TypeLoopResult::PushBreadcrumb(definition_id, concrete_type) = initial_breadcrumb {
 
            self.handle_new_breadcrumb_for_type_loops(definition_id, concrete_type);
 
            self.handle_new_breadcrumb_for_type_loops(arch, definition_id, concrete_type);
 
        } else {
 
            unreachable!();
 
        }
 
            unreachable!()
 
        };
 

	
 
        // Enter into the main resolving loop
 
        while !self.type_loop_breadcrumbs.is_empty() {
 
            // Because we might be modifying the breadcrumb array we need to
 
            let breadcrumb_idx = self.type_loop_breadcrumbs.len() - 1;
 
            let mut breadcrumb = self.type_loop_breadcrumbs[breadcrumb_idx].clone();
 

	
 
            let mono_type = &self.mono_types[breadcrumb.type_id.0 as usize];
 
            let resolve_result = match &mono_type.variant {
 
                MonoTypeVariant::Builtin => {
 
                    TypeLoopResult::TypeExists
 
                }
 
                MonoTypeVariant::Enum => {
 
                    TypeLoopResult::TypeExists
 
                },
 
                MonoTypeVariant::Union(monomorph) => {
 
                    let num_variants = monomorph.variants.len() as u32;
 
                    let mut union_result = TypeLoopResult::TypeExists;
 

	
 
                    'member_loop: while breadcrumb.next_member < num_variants {
 
                        let mono_variant = &monomorph.variants[breadcrumb.next_member as usize];
 
                        let num_embedded = mono_variant.embedded.len() as u32;
 

	
 
                        while breadcrumb.next_embedded < num_embedded {
 
                            let mono_embedded = &mono_variant.embedded[breadcrumb.next_embedded as usize];
 
                            union_result = Self::check_member_for_type_loops(
 
                                &self.type_loop_breadcrumbs, &self.definition_lookup, &self.mono_type_lookup,
 
                                &mut self.mono_search_key, &mono_embedded.concrete_type
 
                            );
 

	
 
                            if union_result != TypeLoopResult::TypeExists {
 
                                // In type loop or new breadcrumb pushed, so
 
                                // break out of the resolving loop
 
                                break 'member_loop;
 
                            }
 

	
 
                            breadcrumb.next_embedded += 1;
 
                        }
 

	
 
                        breadcrumb.next_embedded = 0;
 
                        breadcrumb.next_member += 1
 
                    }
 

	
 
                    union_result
 
                },
 
                MonoTypeVariant::Struct(monomorph) => {
 
                    let num_fields = monomorph.fields.len() as u32;
 

	
 
@@ -1214,97 +1229,97 @@ impl TypeTable {
 
                        struct_result = Self::check_member_for_type_loops(
 
                            &self.type_loop_breadcrumbs, &self.definition_lookup, &self.mono_type_lookup,
 
                            &mut self.mono_search_key, &mono_field.concrete_type
 
                        );
 

	
 
                        if struct_result != TypeLoopResult::TypeExists {
 
                            // Type loop or breadcrumb pushed, so break out of
 
                            // the resolving loop
 
                            break;
 
                        }
 

	
 
                        breadcrumb.next_member += 1;
 
                    }
 

	
 
                    struct_result
 
                },
 
                MonoTypeVariant::Procedure(_) => unreachable!(),
 
                MonoTypeVariant::Tuple(monomorph) => {
 
                    let num_members = monomorph.members.len() as u32;
 
                    let mut tuple_result = TypeLoopResult::TypeExists;
 

	
 
                    while breadcrumb.next_member < num_members {
 
                        let tuple_member = &monomorph.members[breadcrumb.next_member as usize];
 
                        tuple_result = Self::check_member_for_type_loops(
 
                            &self.type_loop_breadcrumbs, &self.definition_lookup, &self.mono_type_lookup,
 
                            &mut self.mono_search_key, &tuple_member.concrete_type
 
                        );
 

	
 
                        if tuple_result != TypeLoopResult::TypeExists {
 
                            break;
 
                        }
 

	
 
                        breadcrumb.next_member += 1;
 
                    }
 

	
 
                    tuple_result
 
                }
 
            };
 

	
 
            // Handle the result of attempting to resolve the current breadcrumb
 
            match resolve_result {
 
                TypeLoopResult::TypeExists => {
 
                    // We finished parsing the type
 
                    self.type_loop_breadcrumbs.pop();
 
                },
 
                TypeLoopResult::PushBreadcrumb(definition_id, concrete_type) => {
 
                    // We recurse into the member type.
 
                    self.type_loop_breadcrumbs[breadcrumb_idx] = breadcrumb;
 
                    self.handle_new_breadcrumb_for_type_loops(definition_id, concrete_type);
 
                    self.handle_new_breadcrumb_for_type_loops(arch, definition_id, concrete_type);
 
                },
 
                TypeLoopResult::TypeLoop(first_idx) => {
 
                    // Because we will be modifying breadcrumbs within the
 
                    // type-loop handling code, put back the modified breadcrumb
 
                    self.type_loop_breadcrumbs[breadcrumb_idx] = breadcrumb;
 

	
 
                    // We're in a type loop. Add the type loop
 
                    let mut loop_members = Vec::with_capacity(self.type_loop_breadcrumbs.len() - first_idx);
 
                    let mut contains_union = false;
 

	
 
                    for breadcrumb_idx in first_idx..self.type_loop_breadcrumbs.len() {
 
                        let breadcrumb = &mut self.type_loop_breadcrumbs[breadcrumb_idx];
 
                        let mut is_union = false;
 

	
 
                        // Check if type loop member is a union that may be
 
                        // broken up by moving some of its members to the heap.
 
                        let mono_type = &mut self.mono_types[breadcrumb.type_id.0 as usize];
 
                        if let MonoTypeVariant::Union(union_type) = &mut mono_type.variant {
 
                            // Mark the variant that caused the loop as heap
 
                            // allocated to break the type loop.
 
                            let variant = &mut union_type.variants[breadcrumb.next_member as usize];
 
                            variant.lives_on_heap = true;
 
                            breadcrumb.next_embedded += 1;
 

	
 
                            is_union = true;
 
                            contains_union = true;
 
                        } // else: we don't care about the type for now
 

	
 
                        loop_members.push(TypeLoopEntry{
 
                            type_id: breadcrumb.type_id,
 
                            is_union
 
                        });
 
                    }
 

	
 
                    let new_type_loop = TypeLoop{ members: loop_members };
 
                    if !contains_union {
 
                        // No way to (potentially) break the union. So return a
 
                        // type loop error. This is because otherwise our
 
                        // breadcrumb resolver ends up in an infinite loop.
 
                        return Err(construct_type_loop_error(
 
                            &self.mono_types, &new_type_loop, modules, heap
 
                        ));
 
                    }
 

	
 
                    self.type_loops.push(new_type_loop);
 
                }
 
            }
 
        }
 
@@ -1400,143 +1415,173 @@ impl TypeTable {
 
        }
 

	
 
        for type_loop in &self.type_loops {
 
            let mut can_be_broken = false;
 
            debug_assert!(!type_loop.members.is_empty());
 

	
 
            for entry in &type_loop.members {
 
                if entry.is_union {
 
                    let mono_type = self.mono_types[entry.type_id.0 as usize].variant.as_union();
 
                    debug_assert!(!mono_type.variants.is_empty()); // otherwise it couldn't be part of the type loop
 
                    let has_stack_variant = mono_type.variants.iter().any(|variant| !variant.lives_on_heap);
 
                    if has_stack_variant {
 
                        can_be_broken = true;
 
                        break;
 
                    }
 
                }
 
            }
 

	
 
            if !can_be_broken {
 
                // Construct a type loop error
 
                return Err(construct_type_loop_error(&self.mono_types, type_loop, modules, heap));
 
            }
 
        }
 

	
 
        // If here, then all type loops have been resolved and we can lay out
 
        // all of the members
 
        self.type_loops.clear();
 

	
 
        return Ok(());
 
    }
 

	
 
    /// Checks if the specified type needs to be resolved (i.e. we need to push
 
    /// a breadcrumb), is already resolved (i.e. we can continue with the next
 
    /// member of the currently considered type) or is in the process of being
 
    /// resolved (i.e. we're in a type loop). Because of borrowing rules we
 
    /// don't do any modifications of internal types here. Hence: if we
 
    /// return `PushBreadcrumb` then call `handle_new_breadcrumb_for_type_loops`
 
    /// to take care of storing the appropriate types.
 
    fn check_member_for_type_loops(
 
        breadcrumbs: &[TypeLoopBreadcrumb], definition_map: &DefinitionMap, mono_type_map: &MonoTypeMap,
 
        mono_key: &mut MonoSearchKey, concrete_type: &ConcreteType
 
    ) -> TypeLoopResult {
 
        use ConcreteTypePart as CTP;
 

	
 
        // Depending on the type, lookup if the type has already been visited
 
        // (i.e. either already has its memory layed out, or is part of a type
 
        // loop because we've already visited the type)
 
        debug_assert!(!concrete_type.parts.is_empty());
 
        let (definition_id, type_id) = match &concrete_type.parts[0] {
 
            CTP::Tuple(_) => {
 
                Self::set_search_key_to_tuple(mono_key, definition_map, &concrete_type.parts);
 
                let type_id = mono_type_map.get(&mono_key).copied();
 
                (DefinitionId::new_invalid(), type_id)
 
            },
 
            CTP::Instance(definition_id, _) => {
 
                let definition_type = definition_map.get(definition_id).unwrap();
 
                mono_key.set(&concrete_type.parts, &definition_type.poly_vars);
 
                let type_id = mono_type_map.get(&mono_key).copied();
 

	
 
                (*definition_id, type_id)
 
            },
 
            CTP::Function(_, _) |
 
            CTP::Component(_, _) => {
 
                todo!("function pointers")
 
            },
 
            _ => {
 
                return TypeLoopResult::TypeExists
 
            },
 
        let definition_id = if let ConcreteTypePart::Instance(definition_id, _) = concrete_type.parts[0] {
 
            definition_id
 
        } else {
 
            DefinitionId::new_invalid()
 
        };
 

	
 
        if let Some(type_id) = type_id {
 
        Self::set_search_key_to_type(mono_key, definition_map, &concrete_type.parts);
 
        if let Some(type_id) = mono_type_map.get(mono_key).copied() {
 
            for (breadcrumb_idx, breadcrumb) in breadcrumbs.iter().enumerate() {
 
                if breadcrumb.type_id == type_id {
 
                    return TypeLoopResult::TypeLoop(breadcrumb_idx);
 
                }
 
            }
 

	
 
            return TypeLoopResult::TypeExists;
 
        }
 

	
 
        // Type is not yet known, so we need to insert it into the lookup and
 
        // push a new breadcrumb.
 
        return TypeLoopResult::PushBreadcrumb(definition_id, concrete_type.clone());
 
    }
 

	
 
    /// Handles the `PushBreadcrumb` result for a `check_member_for_type_loops`
 
    /// call. Will preallocate entries in the monomorphed type storage (with
 
    /// all memory properties zeroed).
 
    fn handle_new_breadcrumb_for_type_loops(&mut self, definition_id: DefinitionId, concrete_type: ConcreteType) {
 
    fn handle_new_breadcrumb_for_type_loops(&mut self, arch: &TargetArch, definition_id: DefinitionId, concrete_type: ConcreteType) {
 
        use DefinedTypeVariant as DTV;
 
        use ConcreteTypePart as CTP;
 

	
 
        let mut is_union = false;
 

	
 
        let type_id = match &concrete_type.parts[0] {
 
            // Builtin types
 
            CTP::Void | CTP::Message | CTP::Bool |
 
            CTP::UInt8 | CTP::UInt16 | CTP::UInt32 | CTP::UInt64 |
 
            CTP::SInt8 | CTP::SInt16 | CTP::SInt32 | CTP::SInt64 |
 
            CTP::Character | CTP::String |
 
            CTP::Array | CTP::Slice | CTP::Input | CTP::Output | CTP::Pointer => {
 
                // Insert the entry for the builtin type, we should be able to
 
                // immediately "steal" the size from the preinserted builtins.
 
                let base_type_id = match &concrete_type.parts[0] {
 
                    CTP::Void => arch.void_type_id,
 
                    CTP::Message => arch.message_type_id,
 
                    CTP::Bool => arch.bool_type_id,
 
                    CTP::UInt8 => arch.uint8_type_id,
 
                    CTP::UInt16 => arch.uint16_type_id,
 
                    CTP::UInt32 => arch.uint32_type_id,
 
                    CTP::UInt64 => arch.uint64_type_id,
 
                    CTP::SInt8 => arch.sint8_type_id,
 
                    CTP::SInt16 => arch.sint16_type_id,
 
                    CTP::SInt32 => arch.sint32_type_id,
 
                    CTP::SInt64 => arch.sint64_type_id,
 
                    CTP::Character => arch.char_type_id,
 
                    CTP::String => arch.string_type_id,
 
                    CTP::Array => arch.array_type_id,
 
                    CTP::Slice => arch.slice_type_id,
 
                    CTP::Input => arch.input_type_id,
 
                    CTP::Output => arch.output_type_id,
 
                    CTP::Pointer => arch.pointer_type_id,
 
                    _ => unreachable!(),
 
                };
 
                let base_type = &self.mono_types[base_type_id.0 as usize];
 

	
 
                let type_id = TypeId(self.mono_types.len() as i64);
 
                Self::set_search_key_to_type(&mut self.mono_search_key, &self.definition_lookup, &concrete_type.parts);
 
                self.mono_type_lookup.insert(self.mono_search_key.clone(), type_id);
 
                self.mono_types.push(MonoType{
 
                    type_id,
 
                    concrete_type,
 
                    size: base_type.size,
 
                    alignment: base_type.alignment,
 
                    variant: MonoTypeVariant::Builtin
 
                });
 

	
 
                type_id
 
            },
 
            // User-defined types
 
            CTP::Tuple(num_embedded) => {
 
                debug_assert!(definition_id.is_invalid()); // because tuples do not have an associated `DefinitionId`
 
                let mut members = Vec::with_capacity(*num_embedded as usize);
 
                for section in ConcreteTypeIter::new(&concrete_type.parts, 0) {
 
                    members.push(TupleMonomorphMember{
 
                        type_id: TypeId::new_invalid(),
 
                        concrete_type: ConcreteType{ parts: Vec::from(section) },
 
                        size: 0,
 
                        alignment: 0,
 
                        offset: 0
 
                    });
 
                }
 

	
 
                let type_id = TypeId(self.mono_types.len() as i64);
 
                Self::set_search_key_to_tuple(&mut self.mono_search_key, &self.definition_lookup, &concrete_type.parts);
 
                self.mono_type_lookup.insert(self.mono_search_key.clone(), type_id);
 
                self.mono_types.push(MonoType::new_empty(type_id, concrete_type, MonoTypeVariant::Tuple(TupleMonomorph{ members })));
 

	
 
                type_id
 
            },
 
            CTP::Instance(_check_definition_id, _) => {
 
                debug_assert_eq!(definition_id, *_check_definition_id); // because this is how `definition_id` was determined
 

	
 
                Self::set_search_key_to_type(&mut self.mono_search_key, &self.definition_lookup, &concrete_type.parts);
 
                let base_type = self.definition_lookup.get(&definition_id).unwrap();
 
                let type_id = match &base_type.definition {
 
                    DTV::Enum(definition) => {
 
                        // The enum is a bit exceptional in that when we insert
 
                        // it we we will immediately set its size/alignment:
 
                        // there is nothing to compute here.
 
                        debug_assert!(definition.size != 0 && definition.alignment != 0);
 
                        let type_id = TypeId(self.mono_types.len() as i64);
 
                        self.mono_type_lookup.insert(self.mono_search_key.clone(), type_id);
 
                        self.mono_types.push(MonoType::new_empty(type_id, concrete_type, MonoTypeVariant::Enum));
 

	
 
                        let mono_type = &mut self.mono_types[type_id.0 as usize];
 
                        mono_type.size = definition.size;
 
                        mono_type.alignment = definition.alignment;
 

	
 
                        type_id
 
                    },
 
                    DTV::Union(definition) => {
 
                        // Create all the variants with their concrete types
 
                        let mut mono_variants = Vec::with_capacity(definition.variants.len());
 
                        for poly_variant in &definition.variants {
 
                            let mut mono_embedded = Vec::with_capacity(poly_variant.embedded.len());
 
                            for poly_embedded in &poly_variant.embedded {
 
                                let mono_concrete = Self::construct_concrete_type(poly_embedded, &concrete_type);
 
@@ -1554,190 +1599,192 @@ impl TypeTable {
 
                                embedded: mono_embedded,
 
                            })
 
                        }
 

	
 
                        let type_id = TypeId(self.mono_types.len() as i64);
 
                        let tag_size = definition.tag_size;
 
                        Self::set_search_key_to_type(&mut self.mono_search_key, &self.definition_lookup, &concrete_type.parts);
 
                        self.mono_type_lookup.insert(self.mono_search_key.clone(), type_id);
 
                        self.mono_types.push(MonoType::new_empty(type_id, concrete_type, MonoTypeVariant::Union(UnionMonomorph{
 
                            variants: mono_variants,
 
                            tag_size,
 
                            heap_size: 0,
 
                            heap_alignment: 0,
 
                        })));
 

	
 
                        is_union = true;
 
                        type_id
 
                    },
 
                    DTV::Struct(definition) => {
 
                        // Create fields
 
                        let mut mono_fields = Vec::with_capacity(definition.fields.len());
 
                        for poly_field in &definition.fields {
 
                            let mono_concrete = Self::construct_concrete_type(&poly_field.parser_type, &concrete_type);
 
                            mono_fields.push(StructMonomorphField{
 
                                type_id: TypeId::new_invalid(),
 
                                concrete_type: mono_concrete,
 
                                size: 0,
 
                                alignment: 0,
 
                                offset: 0
 
                            })
 
                        }
 

	
 
                        let type_id = TypeId(self.mono_types.len() as i64);
 
                        Self::set_search_key_to_type(&mut self.mono_search_key, &self.definition_lookup, &concrete_type.parts);
 
                        self.mono_type_lookup.insert(self.mono_search_key.clone(), type_id);
 
                        self.mono_types.push(MonoType::new_empty(type_id, concrete_type, MonoTypeVariant::Struct(StructMonomorph{
 
                            fields: mono_fields,
 
                        })));
 

	
 
                        type_id
 
                    },
 
                    DTV::Procedure(_) => {
 
                        unreachable!("pushing type resolving breadcrumb for procedure type")
 
                    },
 
                };
 

	
 
                type_id
 
            },
 
            _ => unreachable!(),
 
            CTP::Function(_, _) | CTP::Component(_, _) => todo!("function pointers"),
 
        };
 

	
 
        self.encountered_types.push(TypeLoopEntry{ type_id, is_union });
 
        self.type_loop_breadcrumbs.push(TypeLoopBreadcrumb{
 
            type_id,
 
            next_member: 0,
 
            next_embedded: 0,
 
        });
 
    }
 

	
 
    /// Constructs a concrete type out of a parser type for a struct field or
 
    /// union embedded type. It will do this by looking up the polymorphic
 
    /// variables in the supplied concrete type. The assumption is that the
 
    /// polymorphic variable's indices correspond to the subtrees in the
 
    /// concrete type.
 
    fn construct_concrete_type(member_type: &ParserType, container_type: &ConcreteType) -> ConcreteType {
 
        use ParserTypeVariant as PTV;
 
        use ConcreteTypePart as CTP;
 

	
 
        // TODO: Combine with code in pass_typing.rs
 
        fn parser_to_concrete_part(part: &ParserTypeVariant) -> Option<ConcreteTypePart> {
 
            match part {
 
                PTV::Void      => Some(CTP::Void),
 
                PTV::Message   => Some(CTP::Message),
 
                PTV::Bool      => Some(CTP::Bool),
 
                PTV::UInt8     => Some(CTP::UInt8),
 
                PTV::UInt16    => Some(CTP::UInt16),
 
                PTV::UInt32    => Some(CTP::UInt32),
 
                PTV::UInt64    => Some(CTP::UInt64),
 
                PTV::SInt8     => Some(CTP::SInt8),
 
                PTV::SInt16    => Some(CTP::SInt16),
 
                PTV::SInt32    => Some(CTP::SInt32),
 
                PTV::SInt64    => Some(CTP::SInt64),
 
                PTV::Character => Some(CTP::Character),
 
                PTV::String    => Some(CTP::String),
 
                PTV::Array     => Some(CTP::Array),
 
                PTV::Input     => Some(CTP::Input),
 
                PTV::Output    => Some(CTP::Output),
 
                PTV::Tuple(num) => Some(CTP::Tuple(*num)),
 
                PTV::Definition(definition_id, num) => Some(CTP::Instance(*definition_id, *num)),
 
                _              => None
 
            }
 
        }
 

	
 
        let mut parts = Vec::with_capacity(member_type.elements.len()); // usually a correct estimation, might not be
 
        for member_part in &member_type.elements {
 
            // Check if we have a regular builtin type
 
            if let Some(part) = parser_to_concrete_part(&member_part.variant) {
 
                parts.push(part);
 
                continue;
 
            }
 

	
 
            // Not builtin, but if all code is working correctly, we only care
 
            // about the polymorphic argument at this point.
 
            if let PTV::PolymorphicArgument(_container_definition_id, poly_arg_idx) = member_part.variant {
 
                debug_assert_eq!(_container_definition_id, get_concrete_type_definition(&container_type.parts).unwrap());
 

	
 
                let mut container_iter = container_type.embedded_iter(0);
 
                for _ in 0..poly_arg_idx {
 
                    container_iter.next();
 
                }
 

	
 
                let poly_section = container_iter.next().unwrap();
 
                parts.extend(poly_section);
 

	
 
                continue;
 
            }
 

	
 
            unreachable!("unexpected type part {:?} from {:?}", member_part, member_type);
 
        }
 

	
 
        return ConcreteType{ parts };
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Determining memory layout for types
 
    //--------------------------------------------------------------------------
 

	
 
    /// Should be called after type loops are detected (and resolved
 
    /// successfully). As a result of this call we expect the
 
    /// `encountered_types` array to be filled. We'll calculate size/alignment/
 
    /// offset values for those types in this routine.
 
    fn lay_out_memory_for_encountered_types(&mut self, arch: &TargetArch) {
 
        // Programmers note: this works like a little stack machine. We have
 
        // memory layout breadcrumbs which, like the type loop breadcrumbs, keep
 
        // track of the currently considered member type. This breadcrumb also
 
        // stores an index into the `size_alignment_stack`, which will be used
 
        // to store intermediate size/alignment pairs until all members are
 
        // resolved. Note that this `size_alignment_stack` is NOT an
 
        // optimization, we're working around borrowing rules here.
 

	
 
        // Just finished type loop detection, so we're left with the encountered
 
        // types only
 
        // types only. If we don't have any (a builtin type's monomorph was
 
        // added to the type table) then this function shouldn't be called at
 
        // all.
 
        debug_assert!(self.type_loops.is_empty());
 
        debug_assert!(!self.encountered_types.is_empty());
 
        debug_assert!(self.memory_layout_breadcrumbs.is_empty());
 
        debug_assert!(self.size_alignment_stack.is_empty());
 

	
 
        let (ptr_size, ptr_align) = self.mono_types[arch.pointer_type_id.0 as usize].get_size_alignment().unwrap();
 

	
 
        // Push the first entry (the type we originally started with when we
 
        // were detecting type loops)
 
        let first_entry = &self.encountered_types[0];
 
        self.memory_layout_breadcrumbs.push(MemoryBreadcrumb{
 
            type_id: first_entry.type_id,
 
            next_member: 0,
 
            next_embedded: 0,
 
            first_size_alignment_idx: 0,
 
        });
 

	
 
        // Enter the main resolving loop
 
        'breadcrumb_loop: while !self.memory_layout_breadcrumbs.is_empty() {
 
            let cur_breadcrumb_idx = self.memory_layout_breadcrumbs.len() - 1;
 
            let mut breadcrumb = self.memory_layout_breadcrumbs[cur_breadcrumb_idx].clone();
 

	
 
            let mono_type = &self.mono_types[breadcrumb.type_id.0 as usize];
 
            match &mono_type.variant {
 
                MonoTypeVariant::Builtin | MonoTypeVariant::Enum => {
 
                    // Size should already be computed
 
                    dbg_code!({
 
                        let mono_type = &self.mono_types[breadcrumb.type_id.0 as usize];
 
                        debug_assert!(mono_type.size != 0 && mono_type.alignment != 0);
 
                    });
 
                },
 
                MonoTypeVariant::Union(mono_type) => {
 
                    // Retrieve size/alignment of each embedded type. We do not
 
                    // compute the offsets or total type sizes yet.
 
                    let num_variants = mono_type.variants.len() as u32;
 
                    while breadcrumb.next_member < num_variants {
 
                        let mono_variant = &mono_type.variants[breadcrumb.next_member as usize];
 

	
 
                        if mono_variant.lives_on_heap {
 
                            // To prevent type loops we made this a heap-
 
                            // allocated variant. This implies we cannot
 
                            // compute sizes of members at this point.
 
                        } else {
 
                            let num_embedded = mono_variant.embedded.len() as u32;
 
                            while breadcrumb.next_embedded < num_embedded {
 
                                let mono_embedded = &mono_variant.embedded[breadcrumb.next_embedded as usize];
 
                                let layout_result = Self::get_memory_layout_or_breadcrumb(
 
                                    &self.definition_lookup, &self.mono_type_lookup, &self.mono_types,
 
@@ -2072,111 +2119,122 @@ impl TypeTable {
 
        let (part, size) = if min_val >= 0 {
 
            // Can be an unsigned integer
 
            if max_val <= (u8::MAX as i64) {
 
                (ConcreteTypePart::UInt8, 1)
 
            } else if max_val <= (u16::MAX as i64) {
 
                (ConcreteTypePart::UInt16, 2)
 
            } else if max_val <= (u32::MAX as i64) {
 
                (ConcreteTypePart::UInt32, 4)
 
            } else {
 
                (ConcreteTypePart::UInt64, 8)
 
            }
 
        } else {
 
            // Must be a signed integer
 
            if min_val >= (i8::MIN as i64) && max_val <= (i8::MAX as i64) {
 
                (ConcreteTypePart::SInt8, 1)
 
            } else if min_val >= (i16::MIN as i64) && max_val <= (i16::MAX as i64) {
 
                (ConcreteTypePart::SInt16, 2)
 
            } else if min_val >= (i32::MIN as i64) && max_val <= (i32::MAX as i64) {
 
                (ConcreteTypePart::SInt32, 4)
 
            } else {
 
                (ConcreteTypePart::SInt64, 8)
 
            }
 
        };
 

	
 
        return (ConcreteType{ parts: vec![part] }, size);
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Small utilities
 
    //--------------------------------------------------------------------------
 

	
 
    fn create_polymorphic_variables(variables: &[Identifier]) -> Vec<PolymorphicVariable> {
 
        let mut result = Vec::with_capacity(variables.len());
 
        for variable in variables.iter() {
 
            result.push(PolymorphicVariable{ identifier: variable.clone(), is_in_use: false });
 
        }
 

	
 
        result
 
    }
 

	
 
    fn mark_used_polymorphic_variables(poly_vars: &mut Vec<PolymorphicVariable>, parser_type: &ParserType) {
 
        for element in &parser_type.elements {
 
            if let ParserTypeVariant::PolymorphicArgument(_, idx) = &element.variant {
 
                poly_vars[*idx as usize].is_in_use = true;
 
            }
 
        }
 
    }
 

	
 
    /// Sets the search key. If `false` is returned then the provided type is a
 
    /// builtin type. If `true` is returned then we're dealing with a user-
 
    /// defined type.
 
    fn set_search_key_to_type(search_key: &mut MonoSearchKey, definition_map: &DefinitionMap, type_parts: &[ConcreteTypePart]) -> bool {
 
    /// Sets the search key to a specific type.
 
    fn set_search_key_to_type(search_key: &mut MonoSearchKey, definition_map: &DefinitionMap, type_parts: &[ConcreteTypePart]) {
 
        use ConcreteTypePart as CTP;
 

	
 
        match type_parts[0] {
 
            ConcreteTypePart::Tuple(_) => {
 
            // Builtin types without any embedded types
 
            CTP::Void | CTP::Message | CTP::Bool |
 
            CTP::UInt8 | CTP::UInt16 | CTP::UInt32 | CTP::UInt64 |
 
            CTP::SInt8 | CTP::SInt16 | CTP::SInt32 | CTP::SInt64 |
 
            CTP::Character | CTP::String => {
 
                debug_assert_eq!(type_parts.len(), 1);
 
                search_key.set_top_type(type_parts[0]);
 
            },
 
            // Builtin types with a single nested type
 
            CTP::Array | CTP::Slice | CTP::Input | CTP::Output | CTP::Pointer => {
 
                debug_assert_eq!(type_parts.len(), 2);
 
                search_key.set(type_parts, &POLY_VARS_IN_USE[..1])
 
            },
 
            // User-defined types
 
            CTP::Tuple(_) => {
 
                Self::set_search_key_to_tuple(search_key, definition_map, type_parts);
 
                return true;
 
            },
 
            ConcreteTypePart::Instance(definition_id, _) => {
 
            CTP::Instance(definition_id, _) => {
 
                let definition_type = definition_map.get(&definition_id).unwrap();
 
                search_key.set(type_parts, &definition_type.poly_vars);
 
                return true;
 
            },
 
            ConcreteTypePart::Function(_, _) | ConcreteTypePart::Component(_, _) => {
 
            CTP::Function(_, _) | CTP::Component(_, _) => {
 
                todo!("implement function pointers")
 
            },
 
            _ => return false,
 
        }
 
    }
 

	
 
    fn set_search_key_to_tuple(search_key: &mut MonoSearchKey, definition_map: &DefinitionMap, type_parts: &[ConcreteTypePart]) {
 
        dbg_code!({
 
            let is_tuple = if let ConcreteTypePart::Tuple(_) = type_parts[0] { true } else { false };
 
            assert!(is_tuple);
 
        });
 
        search_key.set_top_type(type_parts[0]);
 
        for subtree in ConcreteTypeIter::new(type_parts, 0) {
 
            if let Some(definition_id) = get_concrete_type_definition(subtree) {
 
                // A definition, so retrieve poly var usage info
 
                let definition_type = definition_map.get(&definition_id).unwrap();
 
                search_key.push_subtree(subtree, &definition_type.poly_vars);
 
            } else {
 
                // Not a definition, so all type information is important
 
                search_key.push_subtype(subtree, true);
 
            }
 
        }
 
    }
 
}
 

	
 
#[inline]
 
fn align_offset_to(offset: &mut usize, alignment: usize) {
 
    debug_assert!(alignment > 0);
 
    let alignment_min_1 = alignment - 1;
 
    *offset += alignment_min_1;
 
    *offset &= !(alignment_min_1);
 
}
 

	
 
#[inline]
 
fn get_concrete_type_definition(concrete_parts: &[ConcreteTypePart]) -> Option<DefinitionId> {
 
    match concrete_parts[0] {
 
        ConcreteTypePart::Instance(definition_id, _) => {
 
            return Some(definition_id)
 
        },
 
        ConcreteTypePart::Function(definition_id, _) |
 
        ConcreteTypePart::Component(definition_id, _) => {
 
            return Some(definition_id.upcast());
 
        },
 
        _ => {
 
            return None;
 
        },
 
    }
 
}
 
\ No newline at end of file
src/protocol/tests/utils.rs
Show inline comments
 
@@ -743,128 +743,128 @@ impl<'a> VariableTester<'a> {
 
            "[{}] Expected parser type '{}', but got '{}' for {}",
 
            self.ctx.test_name, expected, &serialized, self.assert_postfix()
 
        );
 
        self
 
    }
 

	
 
    pub(crate) fn assert_concrete_type(self, expected: &str) -> Self {
 
        // Lookup concrete type in type table
 
        let mono_proc = get_procedure_monomorph(&self.ctx.heap, &self.ctx.types, self.definition_id);
 
        let mono_index = mono_proc.monomorph_index;
 
        let mono_data = &self.ctx.heap[self.definition_id].as_procedure().monomorphs[mono_index as usize];
 
        let expr_info = &mono_data.expr_info[self.var_expr.type_index as usize];
 
        let concrete_type = &self.ctx.types.get_monomorph(expr_info.type_id).concrete_type;
 

	
 
        // Serialize and check
 
        let serialized = concrete_type.display_name(self.ctx.heap);
 

	
 
        assert_eq!(
 
            expected, &serialized,
 
            "[{}] Expected concrete type '{}', but got '{}' for {}",
 
            self.ctx.test_name, expected, &serialized, self.assert_postfix()
 
        );
 
        self
 
    }
 

	
 
    fn assert_postfix(&self) -> String {
 
        format!("Variable{{ name: {} }}", self.variable.identifier.value.as_str())
 
    }
 
}
 

	
 
pub(crate) struct ExpressionTester<'a> {
 
    ctx: TestCtx<'a>,
 
    definition_id: DefinitionId, // of the enclosing function/component
 
    expr: &'a Expression
 
}
 

	
 
impl<'a> ExpressionTester<'a> {
 
    fn new(
 
        ctx: TestCtx<'a>, definition_id: DefinitionId, expr: &'a Expression
 
    ) -> Self {
 
        Self{ ctx, definition_id, expr }
 
    }
 

	
 
    pub(crate) fn assert_concrete_type(self, expected: &str) -> Self {
 
        // Lookup concrete type
 
        let mono_proc = get_procedure_monomorph(&self.ctx.heap, &self.ctx.types, self.definition_id);
 
        let mono_index = mono_proc.monomorph_index;
 
        let mono_data = &self.ctx.heap[self.definition_id].as_procedure().monomorphs[mono_index as usize];
 
        let expr_info = &mono_data.expr_info[self.var_expr.type_index as usize];
 
        let expr_info = &mono_data.expr_info[self.expr.type_index() as usize];
 
        let concrete_type = &self.ctx.types.get_monomorph(expr_info.type_id).concrete_type;
 

	
 
        // Serialize and check type
 
        let serialized = concrete_type.display_name(self.ctx.heap);
 

	
 
        assert_eq!(
 
            expected, &serialized,
 
            "[{}] Expected concrete type '{}', but got '{}' for {}",
 
            self.ctx.test_name, expected, &serialized, self.assert_postfix()
 
        );
 
        self
 
    }
 

	
 
    fn assert_postfix(&self) -> String {
 
        format!(
 
            "Expression{{ debug: {:?} }}",
 
            self.expr
 
        )
 
    }
 
}
 

	
 
fn get_procedure_monomorph<'a>(heap: &Heap, types: &'a TypeTable, definition_id: DefinitionId) -> &'a ProcedureMonomorph {
 
    let ast_definition = heap[definition_id].as_procedure();
 
    let func_type = if ast_definition.kind == ProcedureKind::Function {
 
        [ConcreteTypePart::Function(ast_definition.this, 0)]
 
    } else {
 
        [ConcreteTypePart::Component(ast_definition.this, 0)]
 
    };
 

	
 
    let mono_index = types.get_procedure_monomorph_type_id(&definition_id, &func_type).unwrap();
 
    let mono_data = types.get_procedure_monomorph(mono_index);
 
    let mono_data = types.get_monomorph(mono_index).variant.as_procedure();
 

	
 
    mono_data
 
}
 

	
 
//------------------------------------------------------------------------------
 
// Interface for failed compilation
 
//------------------------------------------------------------------------------
 

	
 
pub(crate) struct AstErrTester {
 
    test_name: String,
 
    error: ParseError,
 
}
 

	
 
impl AstErrTester {
 
    fn new(test_name: String, error: ParseError) -> Self {
 
        Self{ test_name, error }
 
    }
 

	
 
    pub(crate) fn error<F: Fn(ErrorTester)>(&self, f: F) {
 
        // Maybe multiple errors will be supported in the future
 
        let tester = ErrorTester{ test_name: &self.test_name, error: &self.error };
 
        f(tester)
 
    }
 
}
 

	
 
//------------------------------------------------------------------------------
 
// Utilities for failed compilation
 
//------------------------------------------------------------------------------
 

	
 
pub(crate) struct ErrorTester<'a> {
 
    test_name: &'a str,
 
    error: &'a ParseError,
 
}
 

	
 
impl<'a> ErrorTester<'a> {
 
    pub(crate) fn assert_num(self, num: usize) -> Self {
 
        assert_eq!(
 
            num, self.error.statements.len(),
 
            "[{}] expected error to consist of '{}' parts, but encountered '{}' for {}",
 
            self.test_name, num, self.error.statements.len(), self.assert_postfix()
 
        );
 

	
 
        self
 
    }
 

	
 
    pub(crate) fn assert_ctx_has(self, idx: usize, msg: &str) -> Self {
 
        assert!(
 
            self.error.statements[idx].context.contains(msg),
src/runtime2/store/component.rs
Show inline comments
 
@@ -489,84 +489,84 @@ mod tests {
 
        const NUM_OPERATIONS: usize = 1024;
 
        const NUM_OPS_PER_THREAD: usize = NUM_OPERATIONS / NUM_THREADS;
 
        const NUM_OPS_PER_ROUND: usize = NUM_OPS_PER_THREAD / NUM_ROUNDS;
 
        const NUM_STORED_PER_THREAD: usize = 32;
 

	
 
        assert!(NUM_OPERATIONS % NUM_THREADS == 0);
 
        assert!(NUM_OPS_PER_THREAD / 2 > NUM_STORED_PER_THREAD);
 

	
 
        let seeds = seeds();
 
        for seed_index in 0..seeds.len() {
 
            // Setup store, counters and threads
 
            let store = Arc::new(ComponentStore::new(16));
 
            let counters = Counters::new();
 

	
 
            let mut threads = Vec::with_capacity(NUM_THREADS);
 
            for thread_index in 0..NUM_THREADS {
 
                // Setup local clones to move into the thread
 
                let store = store.clone();
 
                let counters = counters.clone();
 

	
 
                // Setup local rng
 
                let mut seed = seeds[seed_index];
 
                for seed_val_idx in 0..16 {
 
                    seed[seed_val_idx] ^= thread_index as u8; // blegh
 
                }
 
                let mut rng = Pcg32::from_seed(seed);
 

	
 
                let handle = std::thread::spawn(move || {
 
                    let mut stored = Vec::with_capacity(NUM_STORED_PER_THREAD);
 

	
 
                    for _round_index in 0..NUM_ROUNDS {
 
                        // Modify store elements in the store randomly, for some
 
                        // silly definition of random
 
                        for _op_index in 0..NUM_OPS_PER_ROUND {
 
                            // Perform a single operation, depending on current
 
                            // size of the number of values owned by this thread
 
                            let new_value = rng.next_u64();
 
                            let should_create = rng.next_u32() % 2 == 0;
 
                            let is_empty = stored.is_empty();
 
                            let is_full = stored.len() == NUM_STORED_PER_THREAD;
 

	
 
                            if is_empty || (!is_full && should_create) {
 
                                // Must create
 
                                let el_index = store.create(Resource::new(&counters, new_value));
 
                                stored.push((el_index, new_value));
 
                            } else {
 
                                // Must destroy
 
                                let stored_index = new_value as usize % stored.len();
 
                                let (el_index, el_value) = stored.remove(stored_index);
 
                                let (el_index, _el_value) = stored.remove(stored_index);
 
                                store.destroy(el_index);
 
                            }
 
                        }
 

	
 
                        // Checking if the values we own still make sense
 
                        for (el_index, value) in stored.iter().copied() {
 
                            let gotten = store.get(el_index);
 
                            assert_eq!(value, gotten.val, "failed at thread {} value {}", thread_index, el_index);
 
                        }
 
                    }
 

	
 
                    return stored.len(); // return number of remaining elements
 
                });
 
                threads.push(handle);
 
            }
 

	
 
            // Done with the current round
 
            let mut total_left_allocated = 0;
 
            for thread in threads {
 
                let num_still_stored = thread.join().unwrap();
 
                total_left_allocated += num_still_stored as u64;
 
            }
 

	
 
            // Before store is dropped
 
            // note: cannot determine number of creations, creation/destructor
 
            // is random
 
            let num_ctor = counters.ctor.load(Ordering::Acquire);
 
            assert_dtor_eq!(counters, num_ctor - total_left_allocated);
 

	
 
            // After store is dropped
 
            drop(store);
 
            assert_dtor_eq!(counters, num_ctor);
 
        }
 
    }
 
}
 
\ No newline at end of file
src/runtime2/tests/mod.rs
Show inline comments
 
use crate::protocol::*;
 
use crate::protocol::eval::*;
 
use crate::runtime2::runtime::*;
 
use crate::runtime2::component::{CompCtx, CompPDL};
 

	
 
fn create_component(rt: &Runtime, module_name: &str, routine_name: &str, args: ValueGroup) {
 
    let prompt = rt.inner.protocol.new_component(
 
        module_name.as_bytes(), routine_name.as_bytes(), args
 
    ).expect("create prompt");
 
    let reserved = rt.inner.start_create_pdl_component();
 
    let ctx = CompCtx::new(&reserved);
 
    let (key, _) = rt.inner.finish_create_pdl_component(reserved, CompPDL::new(prompt, 0), ctx, false);
 
    rt.inner.enqueue_work(key);
 
}
 

	
 
fn no_args() -> ValueGroup { ValueGroup::new_stack(Vec::new()) }
 

	
 
#[test]
 
fn test_component_creation() {
 
    let pd = ProtocolDescription::parse(b"
 
    primitive nothing_at_all() {
 
        s32 a = 5;
 
        auto b = 5 + a;
 
    }
 
    ").expect("compilation");
 
    let rt = Runtime::new(1, true, pd);
 

	
 
    for i in 0..20 {
 
    for _i in 0..20 {
 
        create_component(&rt, "", "nothing_at_all", no_args());
 
    }
 
}
 

	
 
#[test]
 
fn test_component_communication() {
 
    let pd = ProtocolDescription::parse(b"
 
    primitive sender(out<u32> o, u32 outside_loops, u32 inside_loops) {
 
        u32 outside_index = 0;
 
        while (outside_index < outside_loops) {
 
            u32 inside_index = 0;
 
            sync while (inside_index < inside_loops) {
 
                put(o, inside_index);
 
                inside_index += 1;
 
            }
 
            outside_index += 1;
 
        }
 
    }
 

	
 
    primitive receiver(in<u32> i, u32 outside_loops, u32 inside_loops) {
 
        u32 outside_index = 0;
 
        while (outside_index < outside_loops) {
 
            u32 inside_index = 0;
 
            sync while (inside_index < inside_loops) {
 
                auto val = get(i);
 
                while (val != inside_index) {} // infinite loop if incorrect value is received
 
                inside_index += 1;
 
            }
 
            outside_index += 1;
 
        }
 
    }
 

	
 
    composite constructor() {
 
        channel o_orom -> i_orom;
 
        channel o_mrom -> i_mrom;
 
        channel o_ormm -> i_ormm;
 
        channel o_mrmm -> i_mrmm;
 

	
 
        // one round, one message per round
 
        new sender(o_orom, 1, 1);
 
        new receiver(i_orom, 1, 1);
 

	
 
        // multiple rounds, one message per round
 
        new sender(o_mrom, 5, 1);
 
        new receiver(i_mrom, 5, 1);
 

	
 
        // one round, multiple messages per round
 
        new sender(o_ormm, 1, 5);
0 comments (0 inline, 0 general)