Changeset - 2451a3ca7e4d
[Not reviewed]
0 13 0
MH - 3 years ago 2022-02-14 18:39:50
contact@maxhenger.nl
WIP: Refactored most of type table, pending one bugfix
6 files changed:
0 comments (0 inline, 0 general)
src/collections/raw_vec.rs
Show inline comments
 
use std::{mem, ptr, cmp};
 
use std::alloc::{Layout, alloc, dealloc};
 

	
 
#[derive(Debug)]
 
enum AllocError {
 
    CapacityOverflow,
 
}
 

	
 
/// Generic raw vector. It has a base pointer, a capacity and a length. Basic
 
/// operations are supported, but the user of the structure is responsible for
 
/// ensuring that no illegal mutable access occurs.
 
/// A lot of the logic is simply stolen from the std lib. The destructor will
 
/// free the backing memory, but will not run any destructors.
 
/// Try to use functions to modify the length. But feel free if you know what
 
/// you're doing
 
pub struct RawVec<T: Sized> {
 
    base: *mut T,
 
    cap: usize,
 
    pub len: usize,
 
}
 

	
 
impl<T: Sized> RawVec<T> {
 
    const T_ALIGNMENT: usize = mem::align_of::<T>();
 
    const T_SIZE: usize = mem::size_of::<T>();
 
    
 
    const GROWTH_RATE: usize = 2;
 

	
 
    pub fn new() -> Self {
 
        Self{
 
            base: ptr::null_mut(),
 
            cap: 0,
 
            len: 0,
 
        }
 
    }
 

	
 
    pub fn with_capacity(capacity: usize) -> Self {
 
        // Could be done a bit more efficiently
 
        let mut result = Self::new();
 
        result.ensure_space(capacity).unwrap();
 
        return result;
 
    }
 

	
 
    #[inline]
 
    pub unsafe fn get(&self, idx: usize) -> *const T {
 
        debug_assert!(idx < self.len);
 
        return self.base.add(idx);
 
    }
 

	
 
    #[inline]
 
    pub unsafe fn get_mut(&self, idx: usize) -> *mut T {
 
        debug_assert!(idx < self.len);
 
        return self.base.add(idx);
 
    }
 

	
 
    /// Pushes a new element to the end of the list.
 
    pub fn push(&mut self, item: T) {
 
        self.ensure_space(1).unwrap();
 
        unsafe {
 
            let target = self.base.add(self.len);
 
            std::ptr::write(target, item);
 
            self.len += 1;
 
        }
 
    }
 

	
 
    /// Moves the elements in the range [from_idx, from_idx + num_to_move) to
 
    /// the range [to_idx, to_idx + num_to_move). Caller must make sure that all
 
    /// non-overlapping elements of the second range had their destructor called
 
    /// in case those elements were used.
 
    pub fn move_range(&mut self, from_idx: usize, to_idx: usize, num_to_move: usize) {
 
        debug_assert!(from_idx + num_to_move <= self.len);
 
        debug_assert!(to_idx + num_to_move <= self.len); // maybe not in future, for now this is fine
 
        unsafe {
 
            let source = self.base.add(from_idx);
 
            let target = self.base.add(to_idx);
 
            std::ptr::copy(source, target, num_to_move);
 
        }
 
    }
 

	
 
    pub fn len(&self) -> usize {
 
        return self.len;
 
    }
 

	
 
    pub fn as_slice(&self) -> &[T] {
 
        return unsafe{
 
            std::slice::from_raw_parts(self.base, self.len)
 
        };
 
    }
 

	
 
    fn ensure_space(&mut self, additional: usize) -> Result<(), AllocError>{
 
        debug_assert!(Self::T_SIZE != 0);
 
        debug_assert!(self.cap >= self.len);
 
        if self.cap - self.len < additional {
 
            // Need to resize. Note that due to all checked conditions we have
 
            // that new_cap >= 1.
 
            debug_assert!(additional > 0);
 
            let new_cap = self.len.checked_add(additional).unwrap();
 
            let new_cap = cmp::max(new_cap, self.cap * Self::GROWTH_RATE);
 

	
 
            let layout = Layout::array::<T>(new_cap)
 
                .map_err(|_| AllocError::CapacityOverflow)?;
 
            debug_assert_eq!(new_cap * Self::T_SIZE, layout.size());
 

	
 
            unsafe {
 
                // Allocate new storage, transfer bits, deallocate old store
 
                let new_base = alloc(layout);
 

	
 
                if self.cap > 0 {
 
                    let old_base = self.base as *mut u8;
 
                    let (old_size, old_layout) = self.current_layout();
 

	
 
                    ptr::copy_nonoverlapping(old_base, new_base, old_size);
 
                    dealloc(old_base, old_layout);
 
                }
 

	
 
                self.base = new_base as *mut T;
 
                self.cap = new_cap;
 
            }
 
        } // else: still enough space
 

	
 
        return Ok(());
 
    }
 

	
 
    #[inline]
 
    fn current_layout(&self) -> (usize, Layout) {
 
        debug_assert!(Self::T_SIZE > 0);
 
        let old_size = self.cap * Self::T_SIZE;
 
        unsafe {
 
            return (
 
                old_size,
 
                Layout::from_size_align_unchecked(old_size, Self::T_ALIGNMENT)
 
            );
 
        }
 
    }
 
}
 

	
 
impl<T: Sized> Drop for RawVec<T> {
 
    fn drop(&mut self) {
 
        if self.cap > 0 {
 
            debug_assert!(!self.base.is_null());
 
            let (_, layout) = self.current_layout();
 
            unsafe {
 
                dealloc(self.base as *mut u8, layout);
 
                if cfg!(debug_assertions) {
 
                    self.base = ptr::null_mut();
 
                }
 
                dbg_code!({ self.base = ptr::null_mut(); });
 
            }
 
        }
 
    }
 
}
 
\ No newline at end of file
src/protocol/eval/executor.rs
Show inline comments
 

	
 
use std::collections::VecDeque;
 

	
 
use super::value::*;
 
use super::store::*;
 
use super::error::*;
 
use crate::protocol::*;
 
use crate::protocol::ast::*;
 
use crate::protocol::type_table::*;
 

	
 
macro_rules! debug_enabled { () => { false }; }
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(false, "exec", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(false, "exec", $format, $($args),*);
 
    };
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) enum ExprInstruction {
 
    EvalExpr(ExpressionId),
 
    PushValToFront,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct Frame {
 
    pub(crate) definition: DefinitionId,
 
    pub(crate) monomorph_idx: i32,
 
    pub(crate) monomorph_type_id: TypeId,
 
    pub(crate) position: StatementId,
 
    pub(crate) expr_stack: VecDeque<ExprInstruction>, // hack for expression evaluation, evaluated by popping from back
 
    pub(crate) expr_values: VecDeque<Value>, // hack for expression results, evaluated by popping from front/back
 
    pub(crate) max_stack_size: u32,
 
}
 

	
 
impl Frame {
 
    /// Creates a new execution frame. Does not modify the stack in any way.
 
    pub fn new(heap: &Heap, definition_id: DefinitionId, monomorph_idx: i32) -> Self {
 
    pub fn new(heap: &Heap, definition_id: DefinitionId, monomorph_type_id: TypeId) -> Self {
 
        let definition = &heap[definition_id];
 
        let (outer_scope_id, first_statement_id) = match definition {
 
            Definition::Component(definition) => (definition.scope, definition.body),
 
            Definition::Function(definition) => (definition.scope, definition.body),
 
            _ => unreachable!("initializing frame with {:?} instead of a function/component", definition),
 
        };
 

	
 
        // Another not-so-pretty thing that has to be replaced somewhere in the
 
        // future...
 
        fn determine_max_stack_size(heap: &Heap, scope_id: ScopeId, max_size: &mut u32) {
 
            let scope = &heap[scope_id];
 

	
 
            // Check current block
 
            let cur_size = scope.next_unique_id_in_scope as u32;
 
            if cur_size > *max_size { *max_size = cur_size; }
 

	
 
            // And child blocks
 
            for child_scope in &scope.nested {
 
                determine_max_stack_size(heap, *child_scope, max_size);
 
            }
 
        }
 

	
 
        let mut max_stack_size = 0;
 
        determine_max_stack_size(heap, outer_scope_id, &mut max_stack_size);
 

	
 
        Frame{
 
            definition: definition_id,
 
            monomorph_idx,
 
            monomorph_type_id,
 
            position: first_statement_id.upcast(),
 
            expr_stack: VecDeque::with_capacity(128),
 
            expr_values: VecDeque::with_capacity(128),
 
            max_stack_size,
 
        }
 
    }
 

	
 
    /// Prepares a single expression for execution. This involves walking the
 
    /// expression tree and putting them in the `expr_stack` such that
 
    /// continuously popping from its back will evaluate the expression. The
 
    /// results of each expression will be stored by pushing onto `expr_values`.
 
    pub fn prepare_single_expression(&mut self, heap: &Heap, expr_id: ExpressionId) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear(); // May not be empty if last expression result(s) were discarded
 

	
 
        self.serialize_expression(heap, expr_id);
 
    }
 

	
 
    /// Prepares multiple expressions for execution (i.e. evaluating all
 
    /// function arguments or all elements of an array/union literal). Per
 
    /// expression this works the same as `prepare_single_expression`. However
 
    /// after each expression is evaluated we insert a `PushValToFront`
 
    /// instruction
 
    pub fn prepare_multiple_expressions(&mut self, heap: &Heap, expr_ids: &[ExpressionId]) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear();
 

	
 
        for expr_id in expr_ids {
 
            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
            self.serialize_expression(heap, *expr_id);
 
        }
 
    }
 

	
 
    /// Performs depth-first serialization of expression tree. Let's not care
 
    /// about performance for a temporary runtime implementation
 
    fn serialize_expression(&mut self, heap: &Heap, id: ExpressionId) {
 
        self.expr_stack.push_back(ExprInstruction::EvalExpr(id));
 

	
 
        match &heap[id] {
 
            Expression::Assignment(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Binding(expr) => {
 
                self.serialize_expression(heap, expr.bound_to);
 
                self.serialize_expression(heap, expr.bound_from);
 
            },
 
            Expression::Conditional(expr) => {
 
                self.serialize_expression(heap, expr.test);
 
            },
 
            Expression::Binary(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Unary(expr) => {
 
                self.serialize_expression(heap, expr.expression);
 
            },
 
            Expression::Indexing(expr) => {
 
                self.serialize_expression(heap, expr.index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Slicing(expr) => {
 
                self.serialize_expression(heap, expr.from_index);
 
                self.serialize_expression(heap, expr.to_index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Select(expr) => {
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Literal(expr) => {
 
                // Here we only care about literals that have subexpressions
 
                match &expr.value {
 
                    Literal::Null | Literal::True | Literal::False |
 
                    Literal::Character(_) | Literal::String(_) |
 
                    Literal::Integer(_) | Literal::Enum(_) => {
 
                        // No subexpressions
 
                    },
 
                    Literal::Struct(literal) => {
 
                        // Note: fields expressions are evaluated in programmer-
 
                        // specified order. But struct construction expects them
 
                        // in type-defined order. I might want to come back to
 
                        // this.
 
                        let mut _num_pushed = 0;
 
                        for want_field_idx in 0..literal.fields.len() {
 
                            for field in &literal.fields {
 
                                if field.field_idx == want_field_idx {
 
                                    _num_pushed += 1;
 
                                    self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                                    self.serialize_expression(heap, field.value);
 
                                }
 
                            }
 
                        }
 
                        debug_assert_eq!(_num_pushed, literal.fields.len())
 
                    },
 
                    Literal::Union(literal) => {
 
                        for value_expr_id in &literal.values {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    },
 
                    Literal::Array(value_expr_ids) => {
 
                        for value_expr_id in value_expr_ids {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    },
 
                    Literal::Tuple(value_expr_ids) => {
 
                        for value_expr_id in value_expr_ids {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    }
 
                }
 
            },
 
            Expression::Cast(expr) => {
 
                self.serialize_expression(heap, expr.subject);
 
            }
 
            Expression::Call(expr) => {
 
                for arg_expr_id in &expr.arguments {
 
                    self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                    self.serialize_expression(heap, *arg_expr_id);
 
                }
 
            },
 
            Expression::Variable(_expr) => {
 
                // No subexpressions
 
            }
 
        }
 
    }
 
}
 

	
 
pub type EvalResult = Result<EvalContinuation, EvalError>;
 

	
 
#[derive(Debug)]
 
pub enum EvalContinuation {
 
    // Returned in both sync and non-sync modes
 
    Stepping,
 
    // Returned only in sync mode
 
    BranchInconsistent,
 
    SyncBlockEnd,
 
    NewFork,
 
    BlockFires(PortId),
 
    BlockGet(PortId),
 
    Put(PortId, ValueGroup),
 
    // Returned only in non-sync mode
 
    ComponentTerminated,
 
    SyncBlockStart,
 
    NewComponent(DefinitionId, i32, ValueGroup),
 
    NewComponent(DefinitionId, TypeId, ValueGroup),
 
    NewChannel,
 
}
 

	
 
// Note: cloning is fine, methinks. cloning all values and the heap regions then
 
// we end up with valid "pointers" to heap regions.
 
#[derive(Debug, Clone)]
 
pub struct Prompt {
 
    pub(crate) frames: Vec<Frame>,
 
    pub(crate) store: Store,
 
}
 

	
 
impl Prompt {
 
    pub fn new(_types: &TypeTable, heap: &Heap, def: DefinitionId, monomorph_idx: i32, args: ValueGroup) -> Self {
 
    pub fn new(_types: &TypeTable, heap: &Heap, def: DefinitionId, type_id: TypeId, args: ValueGroup) -> Self {
 
        let mut prompt = Self{
 
            frames: Vec::new(),
 
            store: Store::new(),
 
        };
 

	
 
        // Maybe do typechecking in the future?
 
        let new_frame = Frame::new(heap, def, monomorph_idx);
 
        let new_frame = Frame::new(heap, def, type_id);
 
        let max_stack_size = new_frame.max_stack_size;
 
        prompt.frames.push(new_frame);
 
        args.into_store(&mut prompt.store);
 
        prompt.store.reserve_stack(max_stack_size);
 

	
 
        prompt
 
    }
 

	
 
    /// Big 'ol function right here. Didn't want to break it up unnecessarily.
 
    /// It consists of, in sequence: executing any expressions that should be
 
    /// executed before the next statement can be evaluated, then a section that
 
    /// performs debug printing, and finally a section that takes the next
 
    /// statement and executes it. If the statement requires any expressions to
 
    /// be evaluated, then they will be added such that the next time `step` is
 
    /// called, all of these expressions are indeed evaluated.
 
    pub(crate) fn step(&mut self, types: &TypeTable, heap: &Heap, modules: &[Module], ctx: &mut impl RunContext) -> EvalResult {
 
        // Helper function to transfer multiple values from the expression value
 
        // array into a heap region (e.g. constructing arrays or structs).
 
        fn transfer_expression_values_front_into_heap(cur_frame: &mut Frame, store: &mut Store, num_values: usize) -> HeapPos {
 
            let heap_pos = store.alloc_heap();
 

	
 
            // Do the transformation first (because Rust...)
 
            for val_idx in 0..num_values {
 
                cur_frame.expr_values[val_idx] = store.read_take_ownership(cur_frame.expr_values[val_idx].clone());
 
            }
 

	
 
            // And now transfer to the heap region
 
            let values = &mut store.heap_regions[heap_pos as usize].values;
 
            debug_assert!(values.is_empty());
 
            values.reserve(num_values);
 
            for _ in 0..num_values {
 
                values.push(cur_frame.expr_values.pop_front().unwrap());
 
            }
 

	
 
            heap_pos
 
        }
 

	
 
        // Helper function to make sure that an index into an aray is valid.
 
        fn array_inclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx >= array_len as i64;
 
        }
 

	
 
        fn array_exclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx > array_len as i64;
 
        }
 

	
 
        fn construct_array_error(prompt: &Prompt, modules: &[Module], heap: &Heap, expr_id: ExpressionId, heap_pos: u32, idx: i64) -> EvalError {
 
            let array_len = prompt.store.heap_regions[heap_pos as usize].values.len();
 
            return EvalError::new_error_at_expr(
 
                prompt, modules, heap, expr_id,
 
                format!("index {} is out of bounds: array length is {}", idx, array_len)
 
            )
 
        }
 

	
 
        // Checking if we're at the end of execution
 
        let cur_frame = self.frames.last_mut().unwrap();
 
        if cur_frame.position.is_invalid() {
 
            if heap[cur_frame.definition].is_function() {
 
                todo!("End of function without return, return an evaluation error");
 
            }
 
            return Ok(EvalContinuation::ComponentTerminated);
 
        }
 

	
 
        debug_log!("Taking step in '{}'", heap[cur_frame.definition].identifier().value.as_str());
 

	
 
        // Execute all pending expressions
 
        while !cur_frame.expr_stack.is_empty() {
 
            let next = cur_frame.expr_stack.pop_back().unwrap();
 
            debug_log!("Expr stack: {:?}", next);
 
            match next {
 
                ExprInstruction::PushValToFront => {
 
                    cur_frame.expr_values.rotate_right(1);
 
                },
 
                ExprInstruction::EvalExpr(expr_id) => {
 
                    let expr = &heap[expr_id];
 
                    match expr {
 
                        Expression::Assignment(expr) => {
 
                            let to = cur_frame.expr_values.pop_back().unwrap().as_ref();
 
                            let rhs = cur_frame.expr_values.pop_back().unwrap();
 

	
 
                            // Note: although not pretty, the assignment operator takes ownership
 
                            // of the right-hand side value when possible. So we do not drop the
 
                            // rhs's optionally owned heap data.
 
                            let rhs = self.store.read_take_ownership(rhs);
 
                            apply_assignment_operator(&mut self.store, to, expr.operation, rhs);
 
                        },
 
                        Expression::Binding(_expr) => {
 
                            let bind_to = cur_frame.expr_values.pop_back().unwrap();
 
                            let bind_from = cur_frame.expr_values.pop_back().unwrap();
 
                            let bind_to_heap_pos = bind_to.get_heap_pos();
 
                            let bind_from_heap_pos = bind_from.get_heap_pos();
 

	
 
                            let result = apply_binding_operator(&mut self.store, bind_to, bind_from);
 
                            self.store.drop_value(bind_to_heap_pos);
 
                            self.store.drop_value(bind_from_heap_pos);
 
                            cur_frame.expr_values.push_back(Value::Bool(result));
 
                        },
 
                        Expression::Conditional(expr) => {
 
                            // Evaluate testing expression, then extend the
 
                            // expression stack with the appropriate expression
 
                            let test_result = cur_frame.expr_values.pop_back().unwrap().as_bool();
 
                            if test_result {
 
                                cur_frame.serialize_expression(heap, expr.true_expression);
 
                            } else {
 
                                cur_frame.serialize_expression(heap, expr.false_expression);
 
                            }
 
                        },
 
                        Expression::Binary(expr) => {
 
                            let lhs = cur_frame.expr_values.pop_back().unwrap();
 
                            let rhs = cur_frame.expr_values.pop_back().unwrap();
 
                            let result = apply_binary_operator(&mut self.store, &lhs, expr.operation, &rhs);
 
                            cur_frame.expr_values.push_back(result);
 
                            self.store.drop_value(lhs.get_heap_pos());
 
                            self.store.drop_value(rhs.get_heap_pos());
 
                        },
 
                        Expression::Unary(expr) => {
 
                            let val = cur_frame.expr_values.pop_back().unwrap();
 
                            let result = apply_unary_operator(&mut self.store, expr.operation, &val);
 
                            cur_frame.expr_values.push_back(result);
 
                            self.store.drop_value(val.get_heap_pos());
 
                        },
 
                        Expression::Indexing(_expr) => {
 
                            // Evaluate index. Never heap allocated so we do
 
                            // not have to drop it.
 
                            let index = cur_frame.expr_values.pop_back().unwrap();
 
                            let index = self.store.maybe_read_ref(&index);
 

	
 
                            debug_assert!(index.is_integer());
 
                            let index = if index.is_signed_integer() {
 
                                index.as_signed_integer() as i64
 
                            } else {
 
                                index.as_unsigned_integer() as i64
 
                            };
 

	
 
                            let subject = cur_frame.expr_values.pop_back().unwrap();
 

	
 
                            let (deallocate_heap_pos, value_to_push) = match subject {
 
                                Value::Ref(value_ref) => {
 
                                    // Our expression stack value is a reference to something that
 
                                    // exists in the normal stack/heap. We don't want to deallocate
 
                                    // this thing. Rather we want to return a reference to it.
 
                                    let subject = self.store.read_ref(value_ref);
 
                                    let subject_heap_pos = match subject {
 
                                        Value::String(v) => *v,
 
                                        Value::Array(v) => *v,
 
                                        Value::Message(v) => *v,
 
                                        _ => unreachable!(),
 
                                    };
 

	
 
                                    if array_inclusive_index_is_invalid(&self.store, subject_heap_pos, index) {
 
                                        return Err(construct_array_error(self, modules, heap, expr_id, subject_heap_pos, index));
 
                                    }
 

	
 
                                    (None, Value::Ref(ValueId::Heap(subject_heap_pos, index as u32)))
 
                                },
 
                                _ => {
 
                                    // Our value lives on the expression stack, hence we need to
 
                                    // clone whatever we're referring to. Then drop the subject.
 
                                    let subject_heap_pos = match &subject {
 
                                        Value::String(v) => *v,
 
                                        Value::Array(v) => *v,
 
                                        Value::Message(v) => *v,
 
                                        _ => unreachable!(),
 
                                    };
 

	
 
                                    if array_inclusive_index_is_invalid(&self.store, subject_heap_pos, index) {
 
                                        return Err(construct_array_error(self, modules, heap, expr_id, subject_heap_pos, index));
 
                                    }
 

	
 
                                    let subject_indexed = Value::Ref(ValueId::Heap(subject_heap_pos, index as u32));
 
                                    (Some(subject_heap_pos), self.store.clone_value(subject_indexed))
 
                                },
 
                            };
 

	
 
                            cur_frame.expr_values.push_back(value_to_push);
 
                            self.store.drop_value(deallocate_heap_pos);
 
                        },
 
                        Expression::Slicing(expr) => {
 
                            // Evaluate indices
 
                            let from_index = cur_frame.expr_values.pop_back().unwrap();
 
                            let from_index = self.store.maybe_read_ref(&from_index);
 
                            let to_index = cur_frame.expr_values.pop_back().unwrap();
 
                            let to_index = self.store.maybe_read_ref(&to_index);
 

	
 
                            debug_assert!(from_index.is_integer() && to_index.is_integer());
 
                            let from_index = if from_index.is_signed_integer() {
 
                                from_index.as_signed_integer()
 
                            } else {
 
                                from_index.as_unsigned_integer() as i64
 
                            };
 
                            let to_index = if to_index.is_signed_integer() {
 
                                to_index.as_signed_integer()
 
                            } else {
 
                                to_index.as_unsigned_integer() as i64
 
                            };
 

	
 
                            // Dereference subject if needed
 
                            let subject = cur_frame.expr_values.pop_back().unwrap();
 
                            let deref_subject = self.store.maybe_read_ref(&subject);
 

	
 
                            // Slicing needs to produce a copy anyway (with the
 
                            // current evaluator implementation)
 
                            enum ValueKind{ Array, String, Message }
 
                            let (value_kind, array_heap_pos) = match deref_subject {
 
                                Value::Array(v) => (ValueKind::Array, *v),
 
                                Value::String(v) => (ValueKind::String, *v),
 
                                Value::Message(v) => (ValueKind::Message, *v),
 
                                _ => unreachable!()
 
                            };
 

	
 
                            if array_inclusive_index_is_invalid(&self.store, array_heap_pos, from_index) {
 
                                return Err(construct_array_error(self, modules, heap, expr.from_index, array_heap_pos, from_index));
 
                            }
 
                            if array_exclusive_index_is_invalid(&self.store, array_heap_pos, to_index) {
 
                                return Err(construct_array_error(self, modules, heap, expr.to_index, array_heap_pos, to_index));
 
                            }
 

	
 
                            // Again: would love to push directly, but rust...
 
                            let new_heap_pos = self.store.alloc_heap();
 
                            debug_assert!(self.store.heap_regions[new_heap_pos as usize].values.is_empty());
 
                            if to_index > from_index {
 
                                let from_index = from_index as usize;
 
                                let to_index = to_index as usize;
 
                                let mut values = Vec::with_capacity(to_index - from_index);
 
                                for idx in from_index..to_index {
 
                                    let value = self.store.heap_regions[array_heap_pos as usize].values[idx].clone();
 
                                    values.push(self.store.clone_value(value));
 
                                }
 

	
 
                                self.store.heap_regions[new_heap_pos as usize].values = values;
 

	
 
                            } // else: empty range
 

	
 
                            cur_frame.expr_values.push_back(match value_kind {
 
                                ValueKind::Array => Value::Array(new_heap_pos),
 
                                ValueKind::String => Value::String(new_heap_pos),
 
                                ValueKind::Message => Value::Message(new_heap_pos),
 
                            });
 

	
 
                            // Dropping the original subject, because we don't
 
                            // want to drop something on the stack
 
                            self.store.drop_value(subject.get_heap_pos());
 
                        },
 
                        Expression::Select(expr) => {
 
                            let subject= cur_frame.expr_values.pop_back().unwrap();
 
                            let mono_data = types.get_procedure_monomorph(cur_frame.monomorph_idx);
 
                            let mono_data = types.get_procedure_monomorph(cur_frame.monomorph_type_id);
 
                            let field_idx = mono_data.expr_data[expr.unique_id_in_definition as usize].field_or_monomorph_idx as u32;
 

	
 
                            // Note: same as above: clone if value lives on expr stack, simply
 
                            // refer to it if it already lives on the stack/heap.
 
                            let (deallocate_heap_pos, value_to_push) = match subject {
 
                                Value::Ref(value_ref) => {
 
                                    let subject = self.store.read_ref(value_ref);
 
                                    let subject_heap_pos = match expr.kind {
 
                                        SelectKind::StructField(_) => subject.as_struct(),
 
                                        SelectKind::TupleMember(_) => subject.as_tuple(),
 
                                    };
 

	
 
                                    (None, Value::Ref(ValueId::Heap(subject_heap_pos, field_idx)))
 
                                },
 
                                _ => {
 
                                    let subject_heap_pos = match expr.kind {
 
                                        SelectKind::StructField(_) => subject.as_struct(),
 
                                        SelectKind::TupleMember(_) => subject.as_tuple(),
 
                                    };
 
                                    let subject_indexed = Value::Ref(ValueId::Heap(subject_heap_pos, field_idx));
 
                                    (Some(subject_heap_pos), self.store.clone_value(subject_indexed))
 
                                },
 
                            };
 

	
 
                            cur_frame.expr_values.push_back(value_to_push);
 
                            self.store.drop_value(deallocate_heap_pos);
 
                        },
 
                        Expression::Literal(expr) => {
 
                            let value = match &expr.value {
 
                                Literal::Null => Value::Null,
 
                                Literal::True => Value::Bool(true),
 
                                Literal::False => Value::Bool(false),
 
                                Literal::Character(lit_value) => Value::Char(*lit_value),
 
                                Literal::String(lit_value) => {
 
                                    let heap_pos = self.store.alloc_heap();
 
                                    let values = &mut self.store.heap_regions[heap_pos as usize].values;
 
                                    let value = lit_value.as_str();
 
                                    debug_assert!(values.is_empty());
 
                                    values.reserve(value.len());
 
                                    for character in value.as_bytes() {
 
                                        debug_assert!(character.is_ascii());
 
                                        values.push(Value::Char(*character as char));
 
                                    }
 
                                    Value::String(heap_pos)
 
                                }
 
                                Literal::Integer(lit_value) => {
 
                                    use ConcreteTypePart as CTP;
 
                                    let def_types = types.get_procedure_monomorph(cur_frame.monomorph_idx);
 
                                    let def_types = types.get_procedure_monomorph(cur_frame.monomorph_type_id);
 
                                    let concrete_type = &def_types.expr_data[expr.unique_id_in_definition as usize].expr_type;
 

	
 
                                    debug_assert_eq!(concrete_type.parts.len(), 1);
 
                                    match concrete_type.parts[0] {
 
                                        CTP::UInt8  => Value::UInt8(lit_value.unsigned_value as u8),
 
                                        CTP::UInt16 => Value::UInt16(lit_value.unsigned_value as u16),
 
                                        CTP::UInt32 => Value::UInt32(lit_value.unsigned_value as u32),
 
                                        CTP::UInt64 => Value::UInt64(lit_value.unsigned_value as u64),
 
                                        CTP::SInt8  => Value::SInt8(lit_value.unsigned_value as i8),
 
                                        CTP::SInt16 => Value::SInt16(lit_value.unsigned_value as i16),
 
                                        CTP::SInt32 => Value::SInt32(lit_value.unsigned_value as i32),
 
                                        CTP::SInt64 => Value::SInt64(lit_value.unsigned_value as i64),
 
                                        _ => unreachable!("got concrete type {:?} for integer literal at expr {:?}", concrete_type, expr_id),
 
                                    }
 
                                }
 
                                Literal::Struct(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.fields.len()
 
                                    );
 
                                    Value::Struct(heap_pos)
 
                                }
 
                                Literal::Enum(lit_value) => {
 
                                    Value::Enum(lit_value.variant_idx as i64)
 
                                }
 
                                Literal::Union(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.values.len()
 
                                    );
 
                                    Value::Union(lit_value.variant_idx as i64, heap_pos)
 
                                }
 
                                Literal::Array(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.len()
 
                                    );
 
                                    Value::Array(heap_pos)
 
                                }
 
                                Literal::Tuple(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.len()
 
                                    );
 
                                    Value::Tuple(heap_pos)
 
                                }
 
                            };
 

	
 
                            cur_frame.expr_values.push_back(value);
 
                        },
 
                        Expression::Cast(expr) => {
 
                            let mono_data = types.get_procedure_monomorph(cur_frame.monomorph_idx);
 
                            let mono_data = types.get_procedure_monomorph(cur_frame.monomorph_type_id);
 
                            let output_type = &mono_data.expr_data[expr.unique_id_in_definition as usize].expr_type;
 

	
 
                            // Typechecking reduced this to two cases: either we
 
                            // have casting noop (same types), or we're casting
 
                            // between integer/bool/char types.
 
                            let subject = cur_frame.expr_values.pop_back().unwrap();
 
                            match apply_casting(&mut self.store, output_type, &subject) {
 
                                Ok(value) => cur_frame.expr_values.push_back(value),
 
                                Err(msg) => {
 
                                    return Err(EvalError::new_error_at_expr(self, modules, heap, expr.this.upcast(), msg));
 
                                }
 
                            }
 

	
 
                            self.store.drop_value(subject.get_heap_pos());
 
                        }
 
                        Expression::Call(expr) => {
 
                            // If we're dealing with a builtin we don't do any
 
                            // fancy shenanigans at all, just push the result.
 
                            match expr.method {
 
                                Method::Get => {
 
                                    let value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let value = self.store.maybe_read_ref(&value).clone();
 

	
 
                                    let port_id = if let Value::Input(port_id) = value {
 
                                        port_id
 
                                    } else {
 
                                        unreachable!("executor calling 'get' on value {:?}", value)
 
                                    };
 

	
 
                                    match ctx.performed_get(port_id) {
 
                                        Some(result) => {
 
                                            // We have the result. Merge the `ValueGroup` with the
 
                                            // stack/heap storage.
 
                                            debug_assert_eq!(result.values.len(), 1);
 
                                            result.into_stack(&mut cur_frame.expr_values, &mut self.store);
 
                                        },
 
                                        None => {
 
                                            // Don't have the result yet, prepare the expression to
 
                                            // get run again after we've received a message.
 
                                            cur_frame.expr_values.push_front(value.clone());
 
                                            cur_frame.expr_stack.push_back(ExprInstruction::EvalExpr(expr_id));
 
                                            return Ok(EvalContinuation::BlockGet(port_id));
 
                                        }
 
                                    }
 
                                },
 
                                Method::Put => {
 
                                    let port_value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let deref_port_value = self.store.maybe_read_ref(&port_value).clone();
 

	
 
                                    let port_id = if let Value::Output(port_id) = deref_port_value {
 
                                        port_id
 
                                    } else {
 
                                        unreachable!("executor calling 'put' on value {:?}", deref_port_value)
 
                                    };
 

	
 
                                    let msg_value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let deref_msg_value = self.store.maybe_read_ref(&msg_value).clone();
 

	
 
                                    if ctx.performed_put(port_id) {
 
                                        // We're fine, deallocate in case the expression value stack
 
                                        // held an owned value
 
                                        self.store.drop_value(msg_value.get_heap_pos());
 
                                    } else {
 
                                        // Prepare to execute again
 
                                        cur_frame.expr_values.push_front(msg_value);
 
                                        cur_frame.expr_values.push_front(port_value);
 
                                        cur_frame.expr_stack.push_back(ExprInstruction::EvalExpr(expr_id));
 
                                        let value_group = ValueGroup::from_store(&self.store, &[deref_msg_value]);
 
                                        return Ok(EvalContinuation::Put(port_id, value_group));
 
                                    }
 
                                },
 
                                Method::Fires => {
 
                                    let port_value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let port_value_deref = self.store.maybe_read_ref(&port_value).clone();
 

	
 
                                    let port_id = match port_value_deref {
 
                                        Value::Input(port_id) => port_id,
 
                                        Value::Output(port_id) => port_id,
 
                                        _ => unreachable!("executor calling 'fires' on value {:?}", port_value_deref),
 
                                    };
 

	
 
                                    match ctx.fires(port_id) {
 
                                        None => {
 
                                            cur_frame.expr_values.push_front(port_value);
 
                                            cur_frame.expr_stack.push_back(ExprInstruction::EvalExpr(expr_id));
 
                                            return Ok(EvalContinuation::BlockFires(port_id));
 
                                        },
 
                                        Some(value) => {
 
                                            cur_frame.expr_values.push_back(value);
 
                                        }
 
                                    }
 
                                },
 
                                Method::Create => {
 
                                    let length_value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let length_value = self.store.maybe_read_ref(&length_value);
 
                                    let length = if length_value.is_signed_integer() {
 
                                        let length_value = length_value.as_signed_integer();
 
                                        if length_value < 0 {
 
                                            return Err(EvalError::new_error_at_expr(
 
                                                self, modules, heap, expr_id,
 
                                                format!("got length '{}', can only create a message with a non-negative length", length_value)
 
                                            ));
 
                                        }
 

	
 
                                        length_value as u64
 
                                    } else {
 
                                        debug_assert!(length_value.is_unsigned_integer());
 
                                        length_value.as_unsigned_integer()
 
                                    };
 

	
 
                                    let heap_pos = self.store.alloc_heap();
 
                                    let values = &mut self.store.heap_regions[heap_pos as usize].values;
 
                                    debug_assert!(values.is_empty());
 
                                    values.resize(length as usize, Value::UInt8(0));
 
                                    cur_frame.expr_values.push_back(Value::Message(heap_pos));
 
                                },
 
                                Method::Length => {
 
                                    let value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let value_heap_pos = value.get_heap_pos();
 
                                    let value = self.store.maybe_read_ref(&value);
 

	
 
                                    let heap_pos = match value {
 
                                        Value::Array(pos) => *pos,
 
                                        Value::String(pos) => *pos,
 
                                        _ => unreachable!("length(...) on {:?}", value),
 
                                    };
 

	
 
                                    let len = self.store.heap_regions[heap_pos as usize].values.len();
 

	
 
                                    // TODO: @PtrInt
 
                                    cur_frame.expr_values.push_back(Value::UInt32(len as u32));
 
                                    self.store.drop_value(value_heap_pos);
 
                                },
 
                                Method::Assert => {
 
                                    let value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let value = self.store.maybe_read_ref(&value).clone();
 
                                    if !value.as_bool() {
 
                                        return Ok(EvalContinuation::BranchInconsistent)
 
                                    }
 
                                },
 
                                Method::Print => {
 
                                    // Convert the runtime-variant of a string
 
                                    // into an actual string.
 
                                    let value = cur_frame.expr_values.pop_front().unwrap();
 
                                    let value_heap_pos = value.as_string();
 
                                    let elements = &self.store.heap_regions[value_heap_pos as usize].values;
 

	
 
                                    let mut message = String::with_capacity(elements.len());
 
                                    for element in elements {
 
                                        message.push(element.as_char());
 
                                    }
 

	
 
                                    // Drop the heap-allocated value from the
 
                                    // store
 
                                    self.store.drop_heap_pos(value_heap_pos);
 
                                    println!("{}", message);
 
                                },
 
                                Method::SelectStart => {
 
                                    todo!("select start");
 
                                },
 
                                Method::SelectRegisterCasePort => {
 
                                    todo!("select register");
 
                                },
 
                                Method::SelectWait => {
 
                                    todo!("select wait");
 
                                },
 
                                Method::UserComponent => {
 
                                    // This is actually handled by the evaluation
 
                                    // of the statement.
 
                                    debug_assert_eq!(heap[expr.definition].parameters().len(), cur_frame.expr_values.len());
 
                                    debug_assert_eq!(heap[cur_frame.position].as_new().expression, expr.this)
 
                                },
 
                                Method::UserFunction => {
 
                                    // Push a new frame. Note that all expressions have
 
                                    // been pushed to the front, so they're in the order
 
                                    // of the definition.
 
                                    let num_args = expr.arguments.len();
 

	
 
                                    // Determine stack boundaries
 
                                    let cur_stack_boundary = self.store.cur_stack_boundary;
 
                                    let new_stack_boundary = self.store.stack.len();
 

	
 
                                    // Push new boundary and function arguments for new frame
 
                                    self.store.stack.push(Value::PrevStackBoundary(cur_stack_boundary as isize));
 
                                    for _ in 0..num_args {
 
                                        let argument = self.store.read_take_ownership(cur_frame.expr_values.pop_front().unwrap());
 
                                        self.store.stack.push(argument);
 
                                    }
 

	
 
                                    // Determine the monomorph index of the function we're calling
 
                                    let mono_data = types.get_procedure_monomorph(cur_frame.monomorph_idx);
 
                                    let mono_data = types.get_procedure_monomorph(cur_frame.monomorph_type_id);
 
                                    let call_data = &mono_data.expr_data[expr.unique_id_in_definition as usize];
 

	
 
                                    // Push the new frame and reserve its stack size
 
                                    let new_frame = Frame::new(heap, expr.definition, call_data.field_or_monomorph_idx);
 
                                    let new_frame = Frame::new(heap, expr.definition, call_data.type_id);
 
                                    let new_stack_size = new_frame.max_stack_size;
 
                                    self.frames.push(new_frame);
 
                                    self.store.cur_stack_boundary = new_stack_boundary;
 
                                    self.store.reserve_stack(new_stack_size);
 

	
 
                                    // To simplify the logic a little bit we will now
 
                                    // return and ask our caller to call us again
 
                                    return Ok(EvalContinuation::Stepping);
 
                                },
 
                            }
 
                        },
 
                        Expression::Variable(expr) => {
 
                            let variable = &heap[expr.declaration.unwrap()];
 
                            let ref_value = if expr.used_as_binding_target {
 
                                Value::Binding(variable.unique_id_in_scope as StackPos)
 
                            } else {
 
                                Value::Ref(ValueId::Stack(variable.unique_id_in_scope as StackPos))
 
                            };
 
                            cur_frame.expr_values.push_back(ref_value);
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        debug_log!("Frame [{:?}] at {:?}", cur_frame.definition, cur_frame.position);
 
        if debug_enabled!() {
 
            debug_log!("Expression value stack (size = {}):", cur_frame.expr_values.len());
 
            for (_stack_idx, _stack_val) in cur_frame.expr_values.iter().enumerate() {
 
                debug_log!("  [{:03}] {:?}", _stack_idx, _stack_val);
 
            }
 

	
 
            debug_log!("Stack (size = {}):", self.store.stack.len());
 
            for (_stack_idx, _stack_val) in self.store.stack.iter().enumerate() {
 
                debug_log!("  [{:03}] {:?}", _stack_idx, _stack_val);
 
            }
 

	
 
            debug_log!("Heap:");
 
            for (_heap_idx, _heap_region) in self.store.heap_regions.iter().enumerate() {
 
                let _is_free = self.store.free_regions.iter().any(|idx| *idx as usize == _heap_idx);
 
                debug_log!("  [{:03}] in_use: {}, len: {}, vals: {:?}", _heap_idx, !_is_free, _heap_region.values.len(), &_heap_region.values);
 
            }
 
        }
 
        // No (more) expressions to evaluate. So evaluate statement (that may
 
        // depend on the result on the last evaluated expression(s))
 
        let stmt = &heap[cur_frame.position];
 
        let return_value = match stmt {
 
            Statement::Block(stmt) => {
 
                debug_assert!(stmt.statements.is_empty() || stmt.next == stmt.statements[0]);
 
                cur_frame.position = stmt.next;
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndBlock(stmt) => {
 
                let block = &heap[stmt.start_block];
 
                let scope = &heap[block.scope];
 
                self.store.clear_stack(scope.first_unique_id_in_scope as usize);
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Local(stmt) => {
 
                match stmt {
 
                    LocalStatement::Memory(stmt) => {
 
                        if cfg!(debug_assertions) {
 
                        dbg_code!({
 
                            let variable = &heap[stmt.variable];
 
                            debug_assert!(match self.store.read_ref(ValueId::Stack(variable.unique_id_in_scope as u32)) {
 
                                Value::Unassigned => false,
 
                                _ => true,
 
                            });
 
                        }
 
                        });
 

	
 
                        cur_frame.position = stmt.next;
 
                        Ok(EvalContinuation::Stepping)
 
                    },
 
                    LocalStatement::Channel(stmt) => {
 
                        // Need to create a new channel by requesting it from
 
                        // the runtime.
 
                        match ctx.created_channel() {
 
                            None => {
 
                                // No channel is pending. So request one
 
                                    Ok(EvalContinuation::NewChannel)
 
                            },
 
                            Some((put_port, get_port)) => {
 
                                self.store.write(ValueId::Stack(heap[stmt.from].unique_id_in_scope as u32), put_port);
 
                                self.store.write(ValueId::Stack(heap[stmt.to].unique_id_in_scope as u32), get_port);
 
                                cur_frame.position = stmt.next;
 
                                Ok(EvalContinuation::Stepping)
 
                            }
 
                        }
 
                    }
 
                }
 
            },
 
            Statement::Labeled(stmt) => {
 
                cur_frame.position = stmt.body;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::If(stmt) => {
 
                debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for if statement");
 
                let test_value = cur_frame.expr_values.pop_back().unwrap();
 
                let test_value = self.store.maybe_read_ref(&test_value).as_bool();
 
                if test_value {
 
                    cur_frame.position = stmt.true_case.body;
 
                } else if let Some(false_body) = stmt.false_case {
 
                    cur_frame.position = false_body.body;
 
                } else {
 
                    // Not true, and no false body
 
                    cur_frame.position = stmt.end_if.upcast();
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndIf(stmt) => {
 
                cur_frame.position = stmt.next;
 
                let if_stmt = &heap[stmt.start_if];
 
                debug_assert_eq!(
 
                    heap[if_stmt.true_case.scope].first_unique_id_in_scope,
 
                    heap[if_stmt.false_case.unwrap_or(if_stmt.true_case).scope].first_unique_id_in_scope,
 
                );
 
                let scope = &heap[if_stmt.true_case.scope];
 
                self.store.clear_stack(scope.first_unique_id_in_scope as usize);
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::While(stmt) => {
 
                debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for while statement");
 
                let test_value = cur_frame.expr_values.pop_back().unwrap();
 
                let test_value = self.store.maybe_read_ref(&test_value).as_bool();
 
                if test_value {
 
                    cur_frame.position = stmt.body;
 
                } else {
 
                    cur_frame.position = stmt.end_while.upcast();
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndWhile(stmt) => {
 
                cur_frame.position = stmt.next;
 
                let start_while = &heap[stmt.start_while];
 
                let scope = &heap[start_while.scope];
 
                self.store.clear_stack(scope.first_unique_id_in_scope as usize);
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Break(stmt) => {
 
                cur_frame.position = stmt.target.upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Continue(stmt) => {
 
                cur_frame.position = stmt.target.upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Synchronous(stmt) => {
 
                cur_frame.position = stmt.body;
 

	
 
                Ok(EvalContinuation::SyncBlockStart)
 
            },
 
            Statement::EndSynchronous(stmt) => {
 
                cur_frame.position = stmt.next;
 
                let start_synchronous = &heap[stmt.start_sync];
 
                let scope = &heap[start_synchronous.scope];
 
                self.store.clear_stack(scope.first_unique_id_in_scope as usize);
 

	
 
                Ok(EvalContinuation::SyncBlockEnd)
 
            },
 
            Statement::Fork(stmt) => {
 
                if stmt.right_body.is_none() {
 
                    // No reason to fork
 
                    cur_frame.position = stmt.left_body;
 
                } else {
 
                    // Need to fork
 
                    if let Some(go_left) = ctx.performed_fork() {
 
                        // Runtime has created a fork
 
                        if go_left {
 
                            cur_frame.position = stmt.left_body;
 
                        } else {
 
                            cur_frame.position = stmt.right_body.unwrap();
 
                        }
 
                    } else {
 
                        // Request the runtime to create a fork of the current
 
                        // branch
 
                        return Ok(EvalContinuation::NewFork);
 
                    }
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndFork(stmt) => {
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Select(_stmt) => {
 
                todo!("implement select evaluation")
 
            },
 
            Statement::EndSelect(stmt) => {
 
                cur_frame.position = stmt.next;
 
                let start_select = &heap[stmt.start_select];
 
                if let Some(select_case) = start_select.cases.first() {
 
                    let scope = &heap[select_case.scope];
 
                    self.store.clear_stack(scope.first_unique_id_in_scope as usize);
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Return(_stmt) => {
 
                debug_assert!(heap[cur_frame.definition].is_function());
 
                debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for return statement");
 

	
 
                // The preceding frame has executed a call, so is expecting the
 
                // return expression on its expression value stack. Note that
 
                // we may be returning a reference to something on our stack,
 
                // so we need to read that value and clone it.
 
                let return_value = cur_frame.expr_values.pop_back().unwrap();
 
                let return_value = match return_value {
 
                    Value::Ref(value_id) => self.store.read_copy(value_id),
 
                    _ => return_value,
 
                };
 

	
 
                // Pre-emptively pop our stack frame
 
                self.frames.pop();
 

	
 
                // Clean up our section of the stack
 
                self.store.clear_stack(0);
 
                self.store.stack.truncate(self.store.cur_stack_boundary + 1);
 
                let prev_stack_idx = self.store.stack.pop().unwrap().as_stack_boundary();
 

	
 
                // TODO: Temporary hack for testing, remove at some point
 
                if self.frames.is_empty() {
 
                    debug_assert!(prev_stack_idx == -1);
 
                    debug_assert!(self.store.stack.len() == 0);
 
                    self.store.stack.push(return_value);
 
                    return Ok(EvalContinuation::ComponentTerminated);
 
                }
 

	
 
                debug_assert!(prev_stack_idx >= 0);
 
                // Return to original state of stack frame
 
                self.store.cur_stack_boundary = prev_stack_idx as usize;
 
                let cur_frame = self.frames.last_mut().unwrap();
 
                cur_frame.expr_values.push_back(return_value);
 

	
 
                // We just returned to the previous frame, which might be in
 
                // the middle of evaluating expressions for a particular
 
                // statement. So we don't want to enter the code below.
 
                return Ok(EvalContinuation::Stepping);
 
            },
 
            Statement::Goto(stmt) => {
 
                cur_frame.position = stmt.target.upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::New(stmt) => {
 
                let call_expr = &heap[stmt.expression];
 
                debug_assert!(heap[call_expr.definition].is_component());
 
                debug_assert_eq!(
 
                    cur_frame.expr_values.len(), heap[call_expr.definition].parameters().len(),
 
                    "mismatch in expr stack size and number of arguments for new statement"
 
                );
 

	
 
                let mono_data = types.get_procedure_monomorph(cur_frame.monomorph_idx);
 
                let mono_data = types.get_procedure_monomorph(cur_frame.monomorph_type_id);
 
                let expr_data = &mono_data.expr_data[call_expr.unique_id_in_definition as usize];
 

	
 
                // Note that due to expression value evaluation they exist in
 
                // reverse order on the stack.
 
                // TODO: Revise this code, keep it as is to be compatible with current runtime
 
                let mut args = Vec::new();
 
                while let Some(value) = cur_frame.expr_values.pop_front() {
 
                    args.push(value);
 
                }
 

	
 
                // Construct argument group, thereby copying heap regions
 
                let argument_group = ValueGroup::from_store(&self.store, &args);
 
                // println!("Creating {} with\n{:#?}", heap[call_expr.definition].identifier().value.as_str(), argument_group);
 

	
 
                // Clear any heap regions
 
                for arg in &args {
 
                    self.store.drop_value(arg.get_heap_pos());
 
                }
 

	
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::NewComponent(call_expr.definition, expr_data.field_or_monomorph_idx, argument_group))
 
                Ok(EvalContinuation::NewComponent(call_expr.definition, expr_data.type_id, argument_group))
 
            },
 
            Statement::Expression(stmt) => {
 
                // The expression has just been completely evaluated. Some
 
                // values might have remained on the expression value stack.
 
                // cur_frame.expr_values.clear(); PROPER CLEARING
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
        };
 

	
 
        assert!(
 
            cur_frame.expr_values.is_empty(),
 
            "This is a debugging assertion that will fail if you perform expressions without \
 
            assigning to anything. This should be completely valid, and this assertion should be \
 
            replaced by something that clears the expression values if needed, but I'll keep this \
 
            in for now for debugging purposes."
 
        );
 

	
 
        // If the next statement requires evaluating expressions then we push
 
        // these onto the expression stack. This way we will evaluate this
 
        // stack in the next loop, then evaluate the statement using the result
 
        // from the expression evaluation.
 
        if !cur_frame.position.is_invalid() {
 
            let stmt = &heap[cur_frame.position];
 

	
 
            match stmt {
 
                Statement::Local(stmt) => {
 
                    if let LocalStatement::Memory(stmt) = stmt {
 
                        // Setup as unassigned, when we execute the memory
 
                        // statement (after evaluating expression), it should no
 
                        // longer be `Unassigned`.
 
                        let variable = &heap[stmt.variable];
 
                        self.store.write(ValueId::Stack(variable.unique_id_in_scope as u32), Value::Unassigned);
 
                        cur_frame.prepare_single_expression(heap, stmt.initial_expr.upcast());
 
                    }
 
                },
 
                Statement::If(stmt) => cur_frame.prepare_single_expression(heap, stmt.test),
 
                Statement::While(stmt) => cur_frame.prepare_single_expression(heap, stmt.test),
 
                Statement::Return(stmt) => {
 
                    debug_assert_eq!(stmt.expressions.len(), 1); // TODO: @ReturnValues
 
                    cur_frame.prepare_single_expression(heap, stmt.expressions[0]);
 
                },
 
                Statement::New(stmt) => {
 
                    // Note that we will end up not evaluating the call itself.
 
                    // Rather we will evaluate its expressions and then
 
                    // instantiate the component upon reaching the "new" stmt.
 
                    let call_expr = &heap[stmt.expression];
 
                    cur_frame.prepare_multiple_expressions(heap, &call_expr.arguments);
 
                },
 
                Statement::Expression(stmt) => {
 
                    cur_frame.prepare_single_expression(heap, stmt.expression);
 
                }
 
                _ => {},
 
            }
 
        }
 

	
 
        return_value
 
    }
 

	
 
    /// Constructs an error at the current expression that lives at the top of
 
    /// the expression stack. Falls back to constructing an error at the current
 
    /// statement if there is no expression.
 
    pub(crate) fn new_error_at_expr(&self, modules: &[Module], heap: &Heap, error_message: String) -> EvalError {
 
        let last_frame = self.frames.last().unwrap();
 
        for instruction in last_frame.expr_stack.iter().rev() {
 
            if let ExprInstruction::EvalExpr(expression_id) = instruction {
 
                return EvalError::new_error_at_expr(
 
                    self, modules, heap, *expression_id, error_message
 
                );
 
            }
 
        }
 

	
 
        // If here then expression stack was empty (cannot have just rotate
 
        // instructions)
 
        panic!("attempted to construct evaluation error without any expressions to evaluate in frame");
 
    }
 
}
 
\ No newline at end of file
src/protocol/mod.rs
Show inline comments
 
mod arena;
 
pub(crate) mod eval;
 
pub(crate) mod input_source;
 
mod parser;
 
#[cfg(test)] mod tests;
 

	
 
pub(crate) mod ast;
 
pub(crate) mod ast_printer;
 

	
 
use std::sync::Mutex;
 

	
 
use crate::collections::{StringPool, StringRef};
 
use crate::protocol::ast::*;
 
use crate::protocol::eval::*;
 
use crate::protocol::input_source::*;
 
use crate::protocol::parser::*;
 
use crate::protocol::type_table::*;
 

	
 
pub use parser::type_table::TypeId;
 

	
 
/// A protocol description module
 
pub struct Module {
 
    pub(crate) source: InputSource,
 
    pub(crate) root_id: RootId,
 
    pub(crate) name: Option<StringRef<'static>>,
 
}
 
/// Description of a protocol object, used to configure new connectors.
 
#[repr(C)]
 
pub struct ProtocolDescription {
 
    pub(crate) modules: Vec<Module>,
 
    pub(crate) heap: Heap,
 
    pub(crate) types: TypeTable,
 
    pub(crate) pool: Mutex<StringPool>,
 
}
 
#[derive(Debug, Clone)]
 
pub(crate) struct ComponentState {
 
    pub(crate) prompt: Prompt,
 
}
 

	
 
#[derive(Debug)]
 
pub enum ComponentCreationError {
 
    ModuleDoesntExist,
 
    DefinitionDoesntExist,
 
    DefinitionNotComponent,
 
    InvalidNumArguments,
 
    InvalidArgumentType(usize),
 
    UnownedPort,
 
    InSync,
 
}
 

	
 
impl ProtocolDescription {
 
    pub fn parse(buffer: &[u8]) -> Result<Self, String> {
 
        let source = InputSource::new(String::new(), Vec::from(buffer));
 
        let mut parser = Parser::new();
 
        parser.feed(source).expect("failed to feed source");
 
        
 
        if let Err(err) = parser.parse() {
 
            println!("ERROR:\n{}", err);
 
            return Err(format!("{}", err))
 
        }
 

	
 
        debug_assert_eq!(parser.modules.len(), 1, "only supporting one module here for now");
 
        let modules: Vec<Module> = parser.modules.into_iter()
 
            .map(|module| Module{
 
                source: module.source,
 
                root_id: module.root_id,
 
                name: module.name.map(|(_, name)| name)
 
            })
 
            .collect();
 

	
 
        return Ok(ProtocolDescription {
 
            modules,
 
            heap: parser.heap,
 
            types: parser.type_table,
 
            pool: Mutex::new(parser.string_pool),
 
        });
 
    }
 

	
 
    pub(crate) fn new_component(
 
        &self, module_name: &[u8], identifier: &[u8], arguments: ValueGroup
 
    ) -> Result<Prompt, ComponentCreationError> {
 
        // Find the module in which the definition can be found
 
        let module_root = self.lookup_module_root(module_name);
 
        if module_root.is_none() {
 
            return Err(ComponentCreationError::ModuleDoesntExist);
 
        }
 
        let module_root = module_root.unwrap();
 

	
 
        let root = &self.heap[module_root];
 
        let definition_id = root.get_definition_ident(&self.heap, identifier);
 
        if definition_id.is_none() {
 
            return Err(ComponentCreationError::DefinitionDoesntExist);
 
        }
 
        let definition_id = definition_id.unwrap();
 

	
 
        let ast_definition = &self.heap[definition_id];
 
        if !ast_definition.is_component() {
 
            return Err(ComponentCreationError::DefinitionNotComponent);
 
        }
 

	
 
        // Make sure that the types of the provided value group matches that of
 
        // the expected types.
 
        let ast_definition = ast_definition.as_component();
 
        if !ast_definition.poly_vars.is_empty() {
 
            return Err(ComponentCreationError::DefinitionNotComponent);
 
        }
 

	
 
        // - check number of arguments by retrieving the one instantiated
 
        //   monomorph
 
        let concrete_type = ConcreteType{ parts: vec![ConcreteTypePart::Component(definition_id, 0)] };
 
        let mono_index = self.types.get_procedure_monomorph_index(&definition_id, &concrete_type.parts).unwrap();
 
        let mono_index = self.types.get_procedure_monomorph_type_id(&definition_id, &concrete_type.parts).unwrap();
 
        let mono_type = self.types.get_procedure_monomorph(mono_index);
 
        if mono_type.arg_types.len() != arguments.values.len() {
 
            return Err(ComponentCreationError::InvalidNumArguments);
 
        }
 

	
 
        // - for each argument try to make sure the types match
 
        for arg_idx in 0..arguments.values.len() {
 
            let expected_type = &mono_type.arg_types[arg_idx];
 
            let provided_value = &arguments.values[arg_idx];
 
            if !self.verify_same_type(expected_type, 0, &arguments, provided_value) {
 
                return Err(ComponentCreationError::InvalidArgumentType(arg_idx));
 
            }
 
        }
 

	
 
        // By now we're sure that all of the arguments are correct. So create
 
        // the connector.
 
        return Ok(Prompt::new(&self.types, &self.heap, definition_id, mono_index, arguments));
 
    }
 

	
 
    fn lookup_module_root(&self, module_name: &[u8]) -> Option<RootId> {
 
        for module in self.modules.iter() {
 
            match &module.name {
 
                Some(name) => if name.as_bytes() == module_name {
 
                    return Some(module.root_id);
 
                },
 
                None => if module_name.is_empty() {
 
                    return Some(module.root_id);
 
                }
 
            }
 
        }
 

	
 
        return None;
 
    }
 

	
 
    fn verify_same_type(&self, expected: &ConcreteType, expected_idx: usize, arguments: &ValueGroup, argument: &Value) -> bool {
 
        use ConcreteTypePart as CTP;
 

	
 
        match &expected.parts[expected_idx] {
 
            CTP::Void | CTP::Message | CTP::Slice | CTP::Function(_, _) | CTP::Component(_, _) => unreachable!(),
 
            CTP::Bool => if let Value::Bool(_) = argument { true } else { false },
 
            CTP::UInt8 => if let Value::UInt8(_) = argument { true } else { false },
 
            CTP::UInt16 => if let Value::UInt16(_) = argument { true } else { false },
 
            CTP::UInt32 => if let Value::UInt32(_) = argument { true } else { false },
 
            CTP::UInt64 => if let Value::UInt64(_) = argument { true } else { false },
 
            CTP::SInt8 => if let Value::SInt8(_) = argument { true } else { false },
 
            CTP::SInt16 => if let Value::SInt16(_) = argument { true } else { false },
 
            CTP::SInt32 => if let Value::SInt32(_) = argument { true } else { false },
 
            CTP::SInt64 => if let Value::SInt64(_) = argument { true } else { false },
 
            CTP::Character => if let Value::Char(_) = argument { true } else { false },
 
            CTP::String => {
 
                // Match outer string type and embedded character types
 
                if let Value::String(heap_pos) = argument {
 
                    for element in &arguments.regions[*heap_pos as usize] {
 
                        if let Value::Char(_) = element {} else {
 
                            return false;
 
                        }
 
                    }
 
                } else {
 
                    return false;
 
                }
 

	
 
                return true;
 
            },
 
            CTP::Array => {
 
                if let Value::Array(heap_pos) = argument {
 
                    let heap_pos = *heap_pos;
 
                    for element in &arguments.regions[heap_pos as usize] {
 
                        if !self.verify_same_type(expected, expected_idx + 1, arguments, element) {
 
                            return false;
 
                        }
 
                    }
 
                    return true;
 
                } else {
 
                    return false;
 
                }
 
            },
 
            CTP::Input => if let Value::Input(_) = argument { true } else { false },
 
            CTP::Output => if let Value::Output(_) = argument { true } else { false },
 
            CTP::Tuple(_) => todo!("implement full type checking on user-supplied arguments"),
 
            CTP::Instance(definition_id, _num_embedded) => {
 
                let definition = self.types.get_base_definition(definition_id).unwrap();
 
                match &definition.definition {
 
                    DefinedTypeVariant::Enum(definition) => {
 
                        if let Value::Enum(variant_value) = argument {
 
                            let is_valid = definition.variants.iter()
 
                                .any(|v| v.value == *variant_value);
 
                            return is_valid;
 
                        }
 
                    },
 
                    _ => todo!("implement full type checking on user-supplied arguments"),
 
                }
 

	
 
                return false;
 
            },
 
        }
 
    }
 
}
 

	
 
pub trait RunContext {
 
    fn performed_put(&mut self, port: PortId) -> bool;
 
    fn performed_get(&mut self, port: PortId) -> Option<ValueGroup>; // None if still waiting on message
 
    fn fires(&mut self, port: PortId) -> Option<Value>; // None if not yet branched
 
    fn performed_fork(&mut self) -> Option<bool>; // None if not yet forked
 
    fn created_channel(&mut self) -> Option<(Value, Value)>; // None if not yet prepared
 
}
 

	
 
pub struct ProtocolDescriptionBuilder {
 
    parser: Parser,
 
}
 

	
 
impl ProtocolDescriptionBuilder {
 
    pub fn new() -> Self {
 
        return Self{
 
            parser: Parser::new(),
 
        }
 
    }
 

	
 
    pub fn add(&mut self, filename: String, buffer: Vec<u8>) -> Result<(), ParseError> {
 
        let input = InputSource::new(filename, buffer);
 
        self.parser.feed(input)?;
 

	
 
        return Ok(())
 
    }
 

	
 
    pub fn compile(mut self) -> Result<ProtocolDescription, ParseError> {
 
        self.parser.parse()?;
 

	
 
        let modules: Vec<Module> = self.parser.modules.into_iter()
 
            .map(|module| Module{
 
                source: module.source,
 
                root_id: module.root_id,
 
                name: module.name.map(|(_, name)| name)
 
            })
 
            .collect();
 

	
 
        return Ok(ProtocolDescription {
 
            modules,
 
            heap: self.parser.heap,
 
            types: self.parser.type_table,
 
            pool: Mutex::new(self.parser.string_pool),
 
        });
 
    }
 
}
src/protocol/parser/pass_definitions.rs
Show inline comments
 
@@ -97,769 +97,768 @@ impl PassDefinitions {
 
            // Token was not None, so peek_ident returns None if not an ident
 
            let ident = peek_ident(&module.source, &mut iter);
 
            match ident {
 
                Some(KW_STRUCT) => self.visit_struct_definition(module, &mut iter, ctx)?,
 
                Some(KW_ENUM) => self.visit_enum_definition(module, &mut iter, ctx)?,
 
                Some(KW_UNION) => self.visit_union_definition(module, &mut iter, ctx)?,
 
                Some(KW_FUNCTION) => self.visit_function_definition(module, &mut iter, ctx)?,
 
                Some(KW_PRIMITIVE) | Some(KW_COMPOSITE) => self.visit_component_definition(module, &mut iter, ctx)?,
 
                _ => return Err(ParseError::new_error_str_at_pos(
 
                    &module.source, iter.last_valid_pos(),
 
                    "unexpected symbol, expected a keyword marking the start of a definition"
 
                )),
 
            }
 
        }
 
    }
 

	
 
    fn visit_struct_definition(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<(), ParseError> {
 
        consume_exact_ident(&module.source, iter, KW_STRUCT)?;
 
        let (ident_text, _) = consume_ident(&module.source, iter)?;
 

	
 
        // Retrieve preallocated DefinitionId
 
        let module_scope = SymbolScope::Module(module.root_id);
 
        let definition_id = ctx.symbols.get_symbol_by_name_defined_in_scope(module_scope, ident_text)
 
            .unwrap().variant.as_definition().definition_id;
 
        self.cur_definition = definition_id;
 

	
 
        // Parse struct definition
 
        consume_polymorphic_vars_spilled(&module.source, iter, ctx)?;
 

	
 
        let mut fields_section = self.struct_fields.start_section();
 
        consume_comma_separated(
 
            TokenKind::OpenCurly, TokenKind::CloseCurly, &module.source, iter, ctx,
 
            |source, iter, ctx| {
 
                let poly_vars = ctx.heap[definition_id].poly_vars();
 

	
 
                let start_pos = iter.last_valid_pos();
 
                let parser_type = self.type_parser.consume_parser_type(
 
                    iter, &ctx.heap, source, &ctx.symbols, poly_vars, definition_id,
 
                    module_scope, false, None
 
                )?;
 
                let field = consume_ident_interned(source, iter, ctx)?;
 
                Ok(StructFieldDefinition{
 
                    span: InputSpan::from_positions(start_pos, field.span.end),
 
                    field, parser_type
 
                })
 
            },
 
            &mut fields_section, "a struct field", "a list of struct fields", None
 
        )?;
 

	
 
        // Transfer to preallocated definition
 
        let struct_def = ctx.heap[definition_id].as_struct_mut();
 
        struct_def.fields = fields_section.into_vec();
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_enum_definition(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<(), ParseError> {
 
        consume_exact_ident(&module.source, iter, KW_ENUM)?;
 
        let (ident_text, _) = consume_ident(&module.source, iter)?;
 

	
 
        // Retrieve preallocated DefinitionId
 
        let module_scope = SymbolScope::Module(module.root_id);
 
        let definition_id = ctx.symbols.get_symbol_by_name_defined_in_scope(module_scope, ident_text)
 
            .unwrap().variant.as_definition().definition_id;
 
        self.cur_definition = definition_id;
 

	
 
        // Parse enum definition
 
        consume_polymorphic_vars_spilled(&module.source, iter, ctx)?;
 

	
 
        let mut enum_section = self.enum_variants.start_section();
 
        consume_comma_separated(
 
            TokenKind::OpenCurly, TokenKind::CloseCurly, &module.source, iter, ctx,
 
            |source, iter, ctx| {
 
                let identifier = consume_ident_interned(source, iter, ctx)?;
 
                let value = if iter.next() == Some(TokenKind::Equal) {
 
                    iter.consume();
 
                    let (variant_number, _) = consume_integer_literal(source, iter, &mut self.buffer)?;
 
                    EnumVariantValue::Integer(variant_number as i64) // TODO: @int
 
                } else {
 
                    EnumVariantValue::None
 
                };
 
                Ok(EnumVariantDefinition{ identifier, value })
 
            },
 
            &mut enum_section, "an enum variant", "a list of enum variants", None
 
        )?;
 

	
 
        // Transfer to definition
 
        let enum_def = ctx.heap[definition_id].as_enum_mut();
 
        enum_def.variants = enum_section.into_vec();
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_union_definition(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<(), ParseError> {
 
        consume_exact_ident(&module.source, iter, KW_UNION)?;
 
        let (ident_text, _) = consume_ident(&module.source, iter)?;
 

	
 
        // Retrieve preallocated DefinitionId
 
        let module_scope = SymbolScope::Module(module.root_id);
 
        let definition_id = ctx.symbols.get_symbol_by_name_defined_in_scope(module_scope, ident_text)
 
            .unwrap().variant.as_definition().definition_id;
 
        self.cur_definition = definition_id;
 

	
 
        // Parse union definition
 
        consume_polymorphic_vars_spilled(&module.source, iter, ctx)?;
 

	
 
        let mut variants_section = self.union_variants.start_section();
 
        consume_comma_separated(
 
            TokenKind::OpenCurly, TokenKind::CloseCurly, &module.source, iter, ctx,
 
            |source, iter, ctx| {
 
                let identifier = consume_ident_interned(source, iter, ctx)?;
 
                let mut close_pos = identifier.span.end;
 

	
 
                let mut types_section = self.parser_types.start_section();
 

	
 
                let has_embedded = maybe_consume_comma_separated(
 
                    TokenKind::OpenParen, TokenKind::CloseParen, source, iter, ctx,
 
                    |source, iter, ctx| {
 
                        let poly_vars = ctx.heap[definition_id].poly_vars();
 
                        self.type_parser.consume_parser_type(
 
                            iter, &ctx.heap, source, &ctx.symbols, poly_vars, definition_id,
 
                            module_scope, false, None
 
                        )
 
                    },
 
                    &mut types_section, "an embedded type", Some(&mut close_pos)
 
                )?;
 
                let value = if has_embedded {
 
                    types_section.into_vec()
 
                } else {
 
                    types_section.forget();
 
                    Vec::new()
 
                };
 

	
 
                Ok(UnionVariantDefinition{
 
                    span: InputSpan::from_positions(identifier.span.begin, close_pos),
 
                    identifier,
 
                    value
 
                })
 
            },
 
            &mut variants_section, "a union variant", "a list of union variants", None
 
        )?;
 

	
 
        // Transfer to AST
 
        let union_def = ctx.heap[definition_id].as_union_mut();
 
        union_def.variants = variants_section.into_vec();
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_function_definition(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<(), ParseError> {
 
        // Retrieve function name
 
        consume_exact_ident(&module.source, iter, KW_FUNCTION)?;
 
        let (ident_text, _) = consume_ident(&module.source, iter)?;
 

	
 
        // Retrieve preallocated DefinitionId
 
        let module_scope = SymbolScope::Module(module.root_id);
 
        let definition_id = ctx.symbols.get_symbol_by_name_defined_in_scope(module_scope, ident_text)
 
            .unwrap().variant.as_definition().definition_id;
 
        self.cur_definition = definition_id;
 

	
 
        consume_polymorphic_vars_spilled(&module.source, iter, ctx)?;
 

	
 
        // Parse function's argument list
 
        let mut parameter_section = self.variables.start_section();
 
        consume_parameter_list(
 
            &mut self.type_parser, &module.source, iter, ctx, &mut parameter_section, module_scope, definition_id
 
        )?;
 
        let parameters = parameter_section.into_vec();
 

	
 
        // Consume return types
 
        consume_token(&module.source, iter, TokenKind::ArrowRight)?;
 
        let poly_vars = ctx.heap[definition_id].poly_vars();
 
        let parser_type = self.type_parser.consume_parser_type(
 
            iter, &ctx.heap, &module.source, &ctx.symbols, poly_vars, definition_id,
 
            module_scope, false, None
 
        )?;
 

	
 
        // Consume block and the definition's scope
 
        let body_id = self.consume_block_statement(module, iter, ctx)?;
 
        let scope_id = ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::Definition(definition_id)));
 

	
 
        // Assign everything in the preallocated AST node
 
        let function = ctx.heap[definition_id].as_function_mut();
 
        function.return_type = parser_type;
 
        function.parameters = parameters;
 
        function.scope = scope_id;
 
        function.body = body_id;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_component_definition(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<(), ParseError> {
 
        // Consume component variant and name
 
        let (_variant_text, _) = consume_any_ident(&module.source, iter)?;
 
        debug_assert!(_variant_text == KW_PRIMITIVE || _variant_text == KW_COMPOSITE);
 
        let (ident_text, _) = consume_ident(&module.source, iter)?;
 

	
 
        // Retrieve preallocated definition
 
        let module_scope = SymbolScope::Module(module.root_id);
 
        let definition_id = ctx.symbols.get_symbol_by_name_defined_in_scope(module_scope, ident_text)
 
            .unwrap().variant.as_definition().definition_id;
 
        self.cur_definition = definition_id;
 

	
 
        consume_polymorphic_vars_spilled(&module.source, iter, ctx)?;
 

	
 
        // Parse component's argument list
 
        let mut parameter_section = self.variables.start_section();
 
        consume_parameter_list(
 
            &mut self.type_parser, &module.source, iter, ctx, &mut parameter_section, module_scope, definition_id
 
        )?;
 
        let parameters = parameter_section.into_vec();
 

	
 
        // Consume block
 
        let body_id = self.consume_block_statement(module, iter, ctx)?;
 
        let scope_id = ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::Definition(definition_id)));
 

	
 
        // Assign everything in the AST node
 
        let component = ctx.heap[definition_id].as_component_mut();
 
        component.parameters = parameters;
 
        component.scope = scope_id;
 
        component.body = body_id;
 

	
 
        Ok(())
 
    }
 

	
 
    /// Consumes a statement and returns a boolean indicating whether it was a
 
    /// block or not.
 
    fn consume_statement(&mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx) -> Result<StatementId, ParseError> {
 
        let next = iter.next().expect("consume_statement has a next token");
 

	
 
        if next == TokenKind::OpenCurly {
 
            let id = self.consume_block_statement(module, iter, ctx)?;
 
            return Ok(id.upcast());
 
        } else if next == TokenKind::Ident {
 
            let ident = peek_ident(&module.source, iter).unwrap();
 
            if ident == KW_STMT_IF {
 
                // Consume if statement and place end-if statement directly
 
                // after it.
 
                let id = self.consume_if_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_WHILE {
 
                let id = self.consume_while_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_BREAK {
 
                let id = self.consume_break_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_CONTINUE {
 
                let id = self.consume_continue_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_SYNC {
 
                let id = self.consume_synchronous_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_FORK {
 
                let id = self.consume_fork_statement(module, iter, ctx)?;
 

	
 
                let end_fork = ctx.heap.alloc_end_fork_statement(|this| EndForkStatement {
 
                    this,
 
                    start_fork: id,
 
                    next: StatementId::new_invalid(),
 
                });
 

	
 
                let fork_stmt = &mut ctx.heap[id];
 
                fork_stmt.end_fork = end_fork;
 

	
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_SELECT {
 
                let id = self.consume_select_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_RETURN {
 
                let id = self.consume_return_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_GOTO {
 
                let id = self.consume_goto_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_NEW {
 
                let id = self.consume_new_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else if ident == KW_STMT_CHANNEL {
 
                let id = self.consume_channel_statement(module, iter, ctx)?;
 
                return Ok(id.upcast().upcast());
 
            } else if iter.peek() == Some(TokenKind::Colon) {
 
                let id = self.consume_labeled_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            } else {
 
                // Two fallback possibilities: the first one is a memory
 
                // declaration, the other one is to parse it as a normal
 
                // expression. This is a bit ugly.
 
                if let Some(memory_stmt_id) = self.maybe_consume_memory_statement_without_semicolon(module, iter, ctx)? {
 
                    consume_token(&module.source, iter, TokenKind::SemiColon)?;
 
                    return Ok(memory_stmt_id.upcast().upcast());
 
                } else {
 
                    let id = self.consume_expression_statement(module, iter, ctx)?;
 
                    return Ok(id.upcast());
 
                }
 
            }
 
        } else if next == TokenKind::OpenParen {
 
            // Same as above: memory statement or normal expression
 
            if let Some(memory_stmt_id) = self.maybe_consume_memory_statement_without_semicolon(module, iter, ctx)? {
 
                consume_token(&module.source, iter, TokenKind::SemiColon)?;
 
                return Ok(memory_stmt_id.upcast().upcast());
 
            } else {
 
                let id = self.consume_expression_statement(module, iter, ctx)?;
 
                return Ok(id.upcast());
 
            }
 
        } else {
 
            let id = self.consume_expression_statement(module, iter, ctx)?;
 
            return Ok(id.upcast());
 
        }
 
    }
 

	
 
    fn consume_block_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<BlockStatementId, ParseError> {
 
        let open_curly_span = consume_token(&module.source, iter, TokenKind::OpenCurly)?;
 

	
 
        let mut stmt_section = self.statements.start_section();
 
        let mut next = iter.next();
 
        while next != Some(TokenKind::CloseCurly) {
 
            if next.is_none() {
 
                return Err(ParseError::new_error_str_at_pos(
 
                    &module.source, iter.last_valid_pos(), "expected a statement or '}'"
 
                ));
 
            }
 
            let stmt_id = self.consume_statement(module, iter, ctx)?;
 
            stmt_section.push(stmt_id);
 
            next = iter.next();
 
        }
 

	
 
        let statements = stmt_section.into_vec();
 
        let mut block_span = consume_token(&module.source, iter, TokenKind::CloseCurly)?;
 
        block_span.begin = open_curly_span.begin;
 

	
 
        let block_id = ctx.heap.alloc_block_statement(|this| BlockStatement{
 
            this,
 
            span: block_span,
 
            statements,
 
            end_block: EndBlockStatementId::new_invalid(),
 
            scope: ScopeId::new_invalid(),
 
            next: StatementId::new_invalid(),
 
        });
 
        let scope_id = ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::Block(block_id)));
 

	
 
        let end_block_id = ctx.heap.alloc_end_block_statement(|this| EndBlockStatement{
 
            this, start_block: block_id, next: StatementId::new_invalid()
 
        });
 

	
 
        let block_stmt = &mut ctx.heap[block_id];
 
        block_stmt.end_block = end_block_id;
 
        block_stmt.scope = scope_id;
 

	
 
        Ok(block_id)
 
    }
 

	
 
    fn consume_if_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<IfStatementId, ParseError> {
 
        let if_span = consume_exact_ident(&module.source, iter, KW_STMT_IF)?;
 
        consume_token(&module.source, iter, TokenKind::OpenParen)?;
 
        let test = self.consume_expression(module, iter, ctx)?;
 
        consume_token(&module.source, iter, TokenKind::CloseParen)?;
 

	
 
        // Consume bodies of if-statement
 
        let true_body = IfStatementCase{
 
            body: self.consume_statement(module, iter, ctx)?,
 
            scope: ScopeId::new_invalid(),
 
        };
 

	
 
        let false_body = if has_ident(&module.source, iter, KW_STMT_ELSE) {
 
            iter.consume();
 
            let false_body = IfStatementCase{
 
                body: self.consume_statement(module, iter, ctx)?,
 
                scope: ScopeId::new_invalid(),
 
            };
 

	
 
            let false_body_scope_id = false_body.scope;
 
            Some(false_body)
 
        } else {
 
            None
 
        };
 

	
 
        // Construct AST elements
 
        let if_stmt_id = ctx.heap.alloc_if_statement(|this| IfStatement{
 
            this,
 
            span: if_span,
 
            test,
 
            true_case: true_body,
 
            false_case: false_body,
 
            end_if: EndIfStatementId::new_invalid(),
 
        });
 
        let end_if_stmt_id = ctx.heap.alloc_end_if_statement(|this| EndIfStatement{
 
            this,
 
            start_if: if_stmt_id,
 
            next: StatementId::new_invalid(),
 
        });
 
        let true_scope_id = ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::If(if_stmt_id, true)));
 
        let false_scope_id = if false_body.is_some() {
 
            Some(ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::If(if_stmt_id, false))))
 
        } else {
 
            None
 
        };
 

	
 
        let if_stmt = &mut ctx.heap[if_stmt_id];
 
        if_stmt.end_if = end_if_stmt_id;
 
        if_stmt.true_case.scope = true_scope_id;
 
        if let Some(false_case) = &mut if_stmt.false_case {
 
            false_case.scope = false_scope_id.unwrap();
 
        }
 

	
 
        return Ok(if_stmt_id);
 
    }
 

	
 
    fn consume_while_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<WhileStatementId, ParseError> {
 
        let while_span = consume_exact_ident(&module.source, iter, KW_STMT_WHILE)?;
 
        consume_token(&module.source, iter, TokenKind::OpenParen)?;
 
        let test = self.consume_expression(module, iter, ctx)?;
 
        consume_token(&module.source, iter, TokenKind::CloseParen)?;
 
        let body = self.consume_statement(module, iter, ctx)?;
 

	
 
        let while_stmt_id = ctx.heap.alloc_while_statement(|this| WhileStatement{
 
            this,
 
            span: while_span,
 
            test,
 
            scope: ScopeId::new_invalid(),
 
            body,
 
            end_while: EndWhileStatementId::new_invalid(),
 
            in_sync: SynchronousStatementId::new_invalid(),
 
        });
 
        let end_while_stmt_id = ctx.heap.alloc_end_while_statement(|this| EndWhileStatement{
 
            this,
 
            start_while: while_stmt_id,
 
            next: StatementId::new_invalid(),
 
        });
 
        let scope_id = ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::While(while_stmt_id)));
 

	
 
        let while_stmt = &mut ctx.heap[while_stmt_id];
 
        while_stmt.scope = scope_id;
 
        while_stmt.end_while = end_while_stmt_id;
 

	
 
        Ok(while_stmt_id)
 
    }
 

	
 
    fn consume_break_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<BreakStatementId, ParseError> {
 
        let break_span = consume_exact_ident(&module.source, iter, KW_STMT_BREAK)?;
 
        let label = if Some(TokenKind::Ident) == iter.next() {
 
            let label = consume_ident_interned(&module.source, iter, ctx)?;
 
            Some(label)
 
        } else {
 
            None
 
        };
 
        consume_token(&module.source, iter, TokenKind::SemiColon)?;
 
        Ok(ctx.heap.alloc_break_statement(|this| BreakStatement{
 
            this,
 
            span: break_span,
 
            label,
 
            target: EndWhileStatementId::new_invalid(),
 
        }))
 
    }
 

	
 
    fn consume_continue_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<ContinueStatementId, ParseError> {
 
        let continue_span = consume_exact_ident(&module.source, iter, KW_STMT_CONTINUE)?;
 
        let label=  if Some(TokenKind::Ident) == iter.next() {
 
            let label = consume_ident_interned(&module.source, iter, ctx)?;
 
            Some(label)
 
        } else {
 
            None
 
        };
 
        consume_token(&module.source, iter, TokenKind::SemiColon)?;
 
        Ok(ctx.heap.alloc_continue_statement(|this| ContinueStatement{
 
            this,
 
            span: continue_span,
 
            label,
 
            target: WhileStatementId::new_invalid(),
 
        }))
 
    }
 

	
 
    fn consume_synchronous_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<SynchronousStatementId, ParseError> {
 
        let synchronous_span = consume_exact_ident(&module.source, iter, KW_STMT_SYNC)?;
 
        let body = self.consume_statement(module, iter, ctx)?;
 

	
 
        let sync_stmt_id = ctx.heap.alloc_synchronous_statement(|this| SynchronousStatement{
 
            this,
 
            span: synchronous_span,
 
            scope: ScopeId::new_invalid(),
 
            body,
 
            end_sync: EndSynchronousStatementId::new_invalid(),
 
        });
 
        let end_sync_stmt_id = ctx.heap.alloc_end_synchronous_statement(|this| EndSynchronousStatement{
 
            this,
 
            start_sync: sync_stmt_id,
 
            next: StatementId::new_invalid(),
 
        });
 
        let scope_id = ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::Synchronous(sync_stmt_id)));
 

	
 
        let sync_stmt = &mut ctx.heap[sync_stmt_id];
 
        sync_stmt.scope = scope_id;
 
        sync_stmt.end_sync = end_sync_stmt_id;
 

	
 
        return Ok(sync_stmt_id);
 
    }
 

	
 
    fn consume_fork_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<ForkStatementId, ParseError> {
 
        let fork_span = consume_exact_ident(&module.source, iter, KW_STMT_FORK)?;
 
        let left_body = self.consume_statement(module, iter, ctx)?;
 

	
 
        let right_body = if has_ident(&module.source, iter, KW_STMT_OR) {
 
            iter.consume();
 
            let right_body = self.consume_statement(module, iter, ctx)?;
 
            Some(right_body)
 
        } else {
 
            None
 
        };
 

	
 
        Ok(ctx.heap.alloc_fork_statement(|this| ForkStatement{
 
            this,
 
            span: fork_span,
 
            left_body,
 
            right_body,
 
            end_fork: EndForkStatementId::new_invalid(),
 
        }))
 
    }
 

	
 
    fn consume_select_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<SelectStatementId, ParseError> {
 
        let select_span = consume_exact_ident(&module.source, iter, KW_STMT_SELECT)?;
 
        consume_token(&module.source, iter, TokenKind::OpenCurly)?;
 

	
 
        let mut cases = Vec::new();
 
        let mut next = iter.next();
 

	
 
        while Some(TokenKind::CloseCurly) != next {
 
            let guard = match self.maybe_consume_memory_statement_without_semicolon(module, iter, ctx)? {
 
                Some(guard_mem_stmt) => guard_mem_stmt.upcast().upcast(),
 
                None => {
 
                    let start_pos = iter.last_valid_pos();
 
                    let expr = self.consume_expression(module, iter, ctx)?;
 
                    let end_pos = iter.last_valid_pos();
 

	
 
                    let guard_expr_stmt = ctx.heap.alloc_expression_statement(|this| ExpressionStatement{
 
                        this,
 
                        span: InputSpan::from_positions(start_pos, end_pos),
 
                        expression: expr,
 
                        next: StatementId::new_invalid(),
 
                    });
 

	
 
                    guard_expr_stmt.upcast()
 
                },
 
            };
 
            consume_token(&module.source, iter, TokenKind::ArrowRight)?;
 
            let block = self.consume_statement(module, iter, ctx)?;
 
            cases.push(SelectCase{
 
                guard,
 
                body: block,
 
                scope: ScopeId::new_invalid(),
 
                involved_ports: Vec::with_capacity(1)
 
            });
 

	
 
            next = iter.next();
 
        }
 

	
 
        consume_token(&module.source, iter, TokenKind::CloseCurly)?;
 

	
 
        let num_cases = cases.len();
 
        let select_stmt_id = ctx.heap.alloc_select_statement(|this| SelectStatement{
 
            this,
 
            span: select_span,
 
            cases,
 
            end_select: EndSelectStatementId::new_invalid(),
 
            relative_pos_in_parent: -1,
 
            next: StatementId::new_invalid(),
 
        });
 

	
 
        let end_select_stmt_id = ctx.heap.alloc_end_select_statement(|this| EndSelectStatement{
 
            this,
 
            start_select: select_stmt_id,
 
            next: StatementId::new_invalid(),
 
        });
 

	
 
        let select_stmt = &mut ctx.heap[select_stmt_id];
 
        select_stmt.end_select = end_select_stmt_id;
 

	
 
        for case_index in 0..num_cases {
 
            let scope_id = ctx.heap.alloc_scope(|this| Scope::new(this, ScopeAssociation::SelectCase(select_stmt_id, case_index as u32)));
 
            let select_stmt = &mut ctx.heap[select_stmt_id];
 
            let select_case = &mut select_stmt.cases[case_index];
 
            select_case.scope = scope_id;
 
        }
 

	
 
        return Ok(select_stmt_id)
 
    }
 

	
 
    fn consume_return_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<ReturnStatementId, ParseError> {
 
        let return_span = consume_exact_ident(&module.source, iter, KW_STMT_RETURN)?;
 
        let mut scoped_section = self.expressions.start_section();
 

	
 
        consume_comma_separated_until(
 
            TokenKind::SemiColon, &module.source, iter, ctx,
 
            |_source, iter, ctx| self.consume_expression(module, iter, ctx),
 
            &mut scoped_section, "an expression", None
 
        )?;
 
        let expressions = scoped_section.into_vec();
 

	
 
        if expressions.is_empty() {
 
            return Err(ParseError::new_error_str_at_span(&module.source, return_span, "expected at least one return value"));
 
        } else if expressions.len() > 1 {
 
            return Err(ParseError::new_error_str_at_span(&module.source, return_span, "multiple return values are not (yet) supported"))
 
        }
 

	
 
        Ok(ctx.heap.alloc_return_statement(|this| ReturnStatement{
 
            this,
 
            span: return_span,
 
            expressions
 
        }))
 
    }
 

	
 
    fn consume_goto_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<GotoStatementId, ParseError> {
 
        let goto_span = consume_exact_ident(&module.source, iter, KW_STMT_GOTO)?;
 
        let label = consume_ident_interned(&module.source, iter, ctx)?;
 
        consume_token(&module.source, iter, TokenKind::SemiColon)?;
 
        Ok(ctx.heap.alloc_goto_statement(|this| GotoStatement{
 
            this,
 
            span: goto_span,
 
            label,
 
            target: LabeledStatementId::new_invalid(),
 
        }))
 
    }
 

	
 
    fn consume_new_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<NewStatementId, ParseError> {
 
        let new_span = consume_exact_ident(&module.source, iter, KW_STMT_NEW)?;
 

	
 
        let start_pos = iter.last_valid_pos();
 
        let expression_id = self.consume_primary_expression(module, iter, ctx)?;
 
        let expression = &ctx.heap[expression_id];
 
        let mut valid = false;
 

	
 
        let mut call_id = CallExpressionId::new_invalid();
 
        if let Expression::Call(expression) = expression {
 
            // Allow both components and functions, as it makes more sense to
 
            // check their correct use in the validation and linking pass
 
            if expression.method == Method::UserComponent || expression.method == Method::UserFunction {
 
                call_id = expression.this;
 
                valid = true;
 
            }
 
        }
 

	
 
        if !valid {
 
            return Err(ParseError::new_error_str_at_span(
 
                &module.source, InputSpan::from_positions(start_pos, iter.last_valid_pos()), "expected a call expression"
 
            ));
 
        }
 
        consume_token(&module.source, iter, TokenKind::SemiColon)?;
 

	
 
        debug_assert!(!call_id.is_invalid());
 
        Ok(ctx.heap.alloc_new_statement(|this| NewStatement{
 
            this,
 
            span: new_span,
 
            expression: call_id,
 
            next: StatementId::new_invalid(),
 
        }))
 
    }
 

	
 
    fn consume_channel_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<ChannelStatementId, ParseError> {
 
        // Consume channel specification
 
        let channel_span = consume_exact_ident(&module.source, iter, KW_STMT_CHANNEL)?;
 
        let (inner_port_type, end_pos) = if Some(TokenKind::OpenAngle) == iter.next() {
 
            // Retrieve the type of the channel, we're cheating a bit here by
 
            // consuming the first '<' and setting the initial angle depth to 1
 
            // such that our final '>' will be consumed as well.
 
            let angle_start_pos = iter.next_start_position();
 
            iter.consume();
 
            let definition_id = self.cur_definition;
 
            let poly_vars = ctx.heap[definition_id].poly_vars();
 
            let parser_type = self.type_parser.consume_parser_type(
 
                iter, &ctx.heap, &module.source, &ctx.symbols, poly_vars,
 
                definition_id, SymbolScope::Module(module.root_id),
 
                true, Some(angle_start_pos)
 
            )?;
 

	
 
            (parser_type.elements, parser_type.full_span.end)
 
        } else {
 
            // Assume inferred
 
            (
 
                vec![ParserTypeElement{
 
                    element_span: channel_span,
 
                    variant: ParserTypeVariant::Inferred
 
                }],
 
                channel_span.end
 
            )
 
        };
 

	
 
        let from_identifier = consume_ident_interned(&module.source, iter, ctx)?;
 
        consume_token(&module.source, iter, TokenKind::ArrowRight)?;
 
        let to_identifier = consume_ident_interned(&module.source, iter, ctx)?;
 
        consume_token(&module.source, iter, TokenKind::SemiColon)?;
 

	
 
        // Construct ports
 
        let port_type_span = InputSpan::from_positions(channel_span.begin, end_pos);
 
        let port_type_len = inner_port_type.len() + 1;
 
        let mut from_port_type = ParserType{ elements: Vec::with_capacity(port_type_len), full_span: port_type_span };
 
        from_port_type.elements.push(ParserTypeElement{
 
            element_span: channel_span,
 
            variant: ParserTypeVariant::Output,
 
        });
 
        from_port_type.elements.extend_from_slice(&inner_port_type);
 
        let from = ctx.heap.alloc_variable(|this| Variable{
 
            this,
 
            kind: VariableKind::Local,
 
            identifier: from_identifier,
 
            parser_type: from_port_type,
 
            relative_pos_in_parent: 0,
 
            unique_id_in_scope: -1,
 
        });
 

	
 
        let mut to_port_type = ParserType{ elements: Vec::with_capacity(port_type_len), full_span: port_type_span };
 
        to_port_type.elements.push(ParserTypeElement{
 
            element_span: channel_span,
 
            variant: ParserTypeVariant::Input
 
        });
 
        to_port_type.elements.extend_from_slice(&inner_port_type);
 
        let to = ctx.heap.alloc_variable(|this|Variable{
 
            this,
 
            kind: VariableKind::Local,
 
            identifier: to_identifier,
 
            parser_type: to_port_type,
 
            relative_pos_in_parent: 0,
 
            unique_id_in_scope: -1,
 
        });
 

	
 
        // Construct the channel
 
        Ok(ctx.heap.alloc_channel_statement(|this| ChannelStatement{
 
            this,
 
            span: channel_span,
 
            from, to,
 
            relative_pos_in_parent: 0,
 
            next: StatementId::new_invalid(),
 
        }))
 
    }
 

	
 
    fn consume_labeled_statement(&mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx) -> Result<LabeledStatementId, ParseError> {
 
        let label = consume_ident_interned(&module.source, iter, ctx)?;
 
        consume_token(&module.source, iter, TokenKind::Colon)?;
src/protocol/parser/pass_typing.rs
Show inline comments
 
/// pass_typing
 
///
 
/// Performs type inference and type checking. Type inference is implemented by
 
/// applying constraints on (sub)trees of types. During this process the
 
/// resolver takes the `ParserType` structs (the representation of the types
 
/// written by the programmer), converts them to `InferenceType` structs (the
 
/// temporary data structure used during type inference) and attempts to arrive
 
/// at `ConcreteType` structs (the representation of a fully checked and
 
/// validated type).
 
///
 
/// The resolver will visit every statement and expression relevant to the
 
/// procedure and insert and determine its initial type based on context (e.g. a
 
/// return statement's expression must match the function's return type, an
 
/// if statement's test expression must evaluate to a boolean). When all are
 
/// visited we attempt to make progress in evaluating the types. Whenever a type
 
/// is progressed we queue the related expressions for further type progression.
 
/// Once no more expressions are in the queue the algorithm is finished. At this
 
/// point either all types are inferred (or can be trivially implicitly
 
/// determined), or we have incomplete types. In the latter case we return an
 
/// error.
 
///
 
/// TODO: Needs a thorough rewrite:
 
///  0. polymorph_progress is intentionally broken at the moment. Make it work
 
///     again and use a normal VecSomething.
 
///  1. The foundation for doing all of the work with predetermined indices
 
///     instead of with HashMaps is there, but it is not really used because of
 
///     time constraints. When time is available, rewrite the system such that
 
///     AST IDs are not needed, and only indices into arrays are used.
 
///  2. We're doing a lot of extra work. It seems better to apply the initial
 
///     type based on expression parents, and immediately apply forced
 
///     constraints (arg to a fires() call must be port-like). All of the \
 
///     progress_xxx calls should then only be concerned with "transmitting"
 
///     type inference across their parent/child expressions.
 
///  3. Remove the `msg` type?
 
///  4. Disallow certain types in certain operations (e.g. `Void`).
 

	
 
macro_rules! debug_log_enabled {
 
    () => { false };
 
}
 

	
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(false, "types", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(false, "types", $format, $($args),*);
 
    };
 
}
 

	
 
use std::collections::{HashMap, HashSet};
 

	
 
use crate::collections::{ScopedBuffer, ScopedSection, DequeSet};
 
use crate::protocol::ast::*;
 
use crate::protocol::input_source::ParseError;
 
use crate::protocol::parser::ModuleCompilationPhase;
 
use crate::protocol::parser::type_table::*;
 
use crate::protocol::parser::token_parsing::*;
 
use super::visitor::{
 
    BUFFER_INIT_CAP_LARGE,
 
    BUFFER_INIT_CAP_SMALL,
 
    Ctx,
 
    Visitor,
 
    VisitorResult
 
};
 

	
 
const VOID_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Void ];
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Message, InferenceTypePart::UInt8 ];
 
const BOOL_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Bool ];
 
const CHARACTER_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Character ];
 
const STRING_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::String, InferenceTypePart::Character ];
 
const NUMBERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::NumberLike ];
 
const INTEGERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::IntegerLike ];
 
const ARRAY_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Array, InferenceTypePart::Unknown ];
 
const SLICE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Slice, InferenceTypePart::Unknown ];
 
const ARRAYLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::ArrayLike, InferenceTypePart::Unknown ];
 

	
 
/// TODO: @performance Turn into PartialOrd+Ord to simplify checks
 
#[derive(Debug, Clone, Eq, PartialEq)]
 
pub(crate) enum InferenceTypePart {
 
    // When we infer types of AST elements that support polymorphic arguments,
 
    // then we might have the case that multiple embedded types depend on the
 
    // polymorphic type (e.g. func bla(T a, T[] b) -> T[][]). If we can infer
 
    // the type in one place (e.g. argument a), then we may propagate this
 
    // information to other types (e.g. argument b and the return type). For
 
    // this reason we place markers in the `InferenceType` instances such that
 
    // we know which part of the type was originally a polymorphic argument.
 
    Marker(u32),
 
    // Completely unknown type, needs to be inferred
 
    Unknown,
 
    // Partially known type, may be inferred to to be the appropriate related 
 
    // type.
 
    // IndexLike,      // index into array/slice
 
    NumberLike,     // any kind of integer/float
 
    IntegerLike,    // any kind of integer
 
    ArrayLike,      // array or slice. Note that this must have a subtype
 
    PortLike,       // input or output port
 
    // Special types that cannot be instantiated by the user
 
    Void, // For builtin functions that do not return anything
 
    // Concrete types without subtypes
 
    Bool,
 
    UInt8,
 
    UInt16,
 
    UInt32,
 
    UInt64,
 
    SInt8,
 
    SInt16,
 
    SInt32,
 
    SInt64,
 
    Character,
 
    String,
 
    // One subtype
 
    Message,
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // Tuple with any number of subtypes (for practical reasons 1 element is impossible)
 
    Tuple(u32),
 
    // A user-defined type with any number of subtypes
 
    Instance(DefinitionId, u32)
 
}
 

	
 
impl InferenceTypePart {
 
    fn is_marker(&self) -> bool {
 
        match self {
 
            InferenceTypePart::Marker(_) => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if the type is concrete, markers are interpreted as concrete
 
    /// types.
 
    fn is_concrete(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Unknown | ITP::NumberLike |
 
            ITP::IntegerLike | ITP::ArrayLike | ITP::PortLike => false,
 
            _ => true
 
        }
 
    }
 

	
 
    fn is_concrete_number(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_integer(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_arraylike(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Array | ITP::Slice | ITP::String | ITP::Message => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_port(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Input | ITP::Output => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if a part is less specific than the argument. Only checks for 
 
    /// single-part inference (i.e. not the replacement of an `Unknown` variant 
 
    /// with the argument)
 
    fn may_be_inferred_from(&self, arg: &InferenceTypePart) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        (*self == ITP::IntegerLike && arg.is_concrete_integer()) ||
 
        (*self == ITP::NumberLike && (arg.is_concrete_number() || *arg == ITP::IntegerLike)) ||
 
        (*self == ITP::ArrayLike && arg.is_concrete_arraylike()) ||
 
        (*self == ITP::PortLike && arg.is_concrete_port())
 
    }
 

	
 
    /// Checks if a part is more specific
 

	
 
    /// Returns the change in "iteration depth" when traversing this particular
 
    /// part. The iteration depth is used to traverse the tree in a linear 
 
    /// fashion. It is basically `number_of_subtypes - 1`
 
    fn depth_change(&self) -> i32 {
 
        use InferenceTypePart as ITP;
 
        match &self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike |
 
            ITP::Void | ITP::Bool |
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 |
 
            ITP::Character => {
 
                -1
 
            },
 
            ITP::Marker(_) |
 
            ITP::ArrayLike | ITP::Message | ITP::Array | ITP::Slice |
 
            ITP::PortLike | ITP::Input | ITP::Output | ITP::String => {
 
                // One subtype, so do not modify depth
 
                0
 
            },
 
            ITP::Tuple(num) | ITP::Instance(_, num) => {
 
                (*num as i32) - 1
 
            }
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
struct InferenceType {
 
    has_marker: bool,
 
    is_done: bool,
 
    parts: Vec<InferenceTypePart>,
 
}
 

	
 
impl InferenceType {
 
    /// Generates a new InferenceType. The two boolean flags will be checked in
 
    /// debug mode.
 
    fn new(has_marker: bool, is_done: bool, parts: Vec<InferenceTypePart>) -> Self {
 
        if cfg!(debug_assertions) {
 
        dbg_code!({
 
            debug_assert!(!parts.is_empty());
 
            let parts_body_marker = parts.iter().any(|v| v.is_marker());
 
            debug_assert_eq!(has_marker, parts_body_marker);
 
            let parts_done = parts.iter().all(|v| v.is_concrete());
 
            debug_assert_eq!(is_done, parts_done, "{:?}", parts);
 
        }
 
        });
 
        Self{ has_marker, is_done, parts }
 
    }
 

	
 
    /// Replaces a type subtree with the provided subtree. The caller must make
 
    /// sure the the replacement is a well formed type subtree.
 
    fn replace_subtree(&mut self, start_idx: usize, with: &[InferenceTypePart]) {
 
        let end_idx = Self::find_subtree_end_idx(&self.parts, start_idx);
 
        debug_assert_eq!(with.len(), Self::find_subtree_end_idx(with, 0));
 
        self.parts.splice(start_idx..end_idx, with.iter().cloned());
 
        self.recompute_is_done();
 
    }
 

	
 
    // TODO: @performance, might all be done inline in the type inference methods
 
    fn recompute_is_done(&mut self) {
 
        self.is_done = self.parts.iter().all(|v| v.is_concrete());
 
    }
 

	
 
    /// Seeks a body marker starting at the specified position. If a marker is
 
    /// found then its value and the index of the type subtree that follows it
 
    /// is returned.
 
    fn find_marker(&self, mut start_idx: usize) -> Option<(u32, usize)> {
 
        while start_idx < self.parts.len() {
 
            if let InferenceTypePart::Marker(marker) = &self.parts[start_idx] {
 
                return Some((*marker, start_idx + 1))
 
            }
 

	
 
            start_idx += 1;
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Returns an iterator over all body markers and the partial type tree that
 
    /// follows those markers. If it is a problem that `InferenceType` is 
 
    /// borrowed by the iterator, then use `find_body_marker`.
 
    fn marker_iter(&self) -> InferenceTypeMarkerIter {
 
        InferenceTypeMarkerIter::new(&self.parts)
 
    }
 

	
 
    /// Given that the `parts` are a depth-first serialized tree of types, this
 
    /// function finds the subtree anchored at a specific node. The returned 
 
    /// index is exclusive.
 
    fn find_subtree_end_idx(parts: &[InferenceTypePart], start_idx: usize) -> usize {
 
        let mut depth = 1;
 
        let mut idx = start_idx;
 

	
 
        while idx < parts.len() {
 
            depth += parts[idx].depth_change();
 
            if depth == 0 {
 
                return idx + 1;
 
            }
 
            idx += 1;
 
        }
 

	
 
        // If here, then the inference type is malformed
 
        unreachable!("Malformed type: {:?}", parts);
 
    }
 

	
 
    /// Call that attempts to infer the part at `to_infer.parts[to_infer_idx]` 
 
    /// using the subtree at `template.parts[template_idx]`. Will return 
 
    /// `Some(depth_change_due_to_traversal)` if type inference has been 
 
    /// applied. In this case the indices will also be modified to point to the 
 
    /// next part in both templates. If type inference has not (or: could not) 
 
    /// be applied then `None` will be returned. Note that this might mean that 
 
    /// the types are incompatible.
 
    ///
 
    /// As this is a helper functions, some assumptions: the parts are not 
 
    /// exactly equal, and neither of them contains a marker. Also: only the
 
    /// `to_infer` parts are checked for inference. It might be that this 
 
    /// function returns `None`, but that that `template` is still compatible
 
    /// with `to_infer`, e.g. when `template` has an `Unknown` part.
 
    fn infer_part_for_single_type(
 
        to_infer: &mut InferenceType, to_infer_idx: &mut usize,
 
        template_parts: &[InferenceTypePart], template_idx: &mut usize,
 
    ) -> Option<i32> {
 
        use InferenceTypePart as ITP;
 

	
 
        let to_infer_part = &to_infer.parts[*to_infer_idx];
 
        let template_part = &template_parts[*template_idx];
 

	
 
        // Check for programmer mistakes
 
        debug_assert_ne!(to_infer_part, template_part);
 
        debug_assert!(!to_infer_part.is_marker(), "marker encountered in 'infer part'");
 
        debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'");
 

	
 
        // Inference of a somewhat-specified type
 
        if to_infer_part.may_be_inferred_from(template_part) {
 
            let depth_change = to_infer_part.depth_change();
 
            debug_assert_eq!(depth_change, template_part.depth_change());
 

	
 
            to_infer.parts[*to_infer_idx] = template_part.clone();
 

	
 
            *to_infer_idx += 1;
 
            *template_idx += 1;
 
            return Some(depth_change);
 
        }
 

	
 
        // Inference of a completely unknown type
 
        if *to_infer_part == ITP::Unknown {
 
            // template part is different, so cannot be unknown, hence copy the
 
            // entire subtree. Make sure not to copy markers.
 
            let template_end_idx = Self::find_subtree_end_idx(template_parts, *template_idx);
 
            to_infer.parts[*to_infer_idx] = template_parts[*template_idx].clone(); // first element
 

	
 
            *to_infer_idx += 1;
 
            for template_idx in *template_idx + 1..template_end_idx {
 
                let template_part = &template_parts[template_idx];
 
                if !template_part.is_marker() {
 
                    to_infer.parts.insert(*to_infer_idx, template_part.clone());
 
                    *to_infer_idx += 1;
 
                }
 
            }
 
            *template_idx = template_end_idx;
 

	
 
            // Note: by definition the LHS was Unknown and the RHS traversed a 
 
            // full subtree.
 
            return Some(-1);
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Call that checks if the `to_check` part is compatible with the `infer`
 
    /// part. This is essentially a copy of `infer_part_for_single_type`, but
 
    /// without actually copying the type parts.
 
    fn check_part_for_single_type(
 
        to_check_parts: &[InferenceTypePart], to_check_idx: &mut usize,
 
        template_parts: &[InferenceTypePart], template_idx: &mut usize
 
    ) -> Option<i32> {
 
        use InferenceTypePart as ITP;
 

	
 
        let to_check_part = &to_check_parts[*to_check_idx];
 
        let template_part = &template_parts[*template_idx];
 

	
 
        // Checking programmer errors
 
        debug_assert_ne!(to_check_part, template_part);
 
        debug_assert!(!to_check_part.is_marker(), "marker encountered in 'to_check part'");
 
        debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'");
 

	
 
        if to_check_part.may_be_inferred_from(template_part) {
 
            let depth_change = to_check_part.depth_change();
 
            debug_assert_eq!(depth_change, template_part.depth_change());
 
            *to_check_idx += 1;
 
            *template_idx += 1;
 
            return Some(depth_change);
 
        }
 

	
 
        if *to_check_part == ITP::Unknown {
 
            *to_check_idx += 1;
 
            *template_idx = Self::find_subtree_end_idx(template_parts, *template_idx);
 

	
 
            // By definition LHS and RHS had depth change of -1
 
            return Some(-1);
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Attempts to infer types between two `InferenceType` instances. This 
 
    /// function is unsafe as it accepts pointers to work around Rust's 
 
    /// borrowing rules. The caller must ensure that the pointers are distinct.
 
    unsafe fn infer_subtrees_for_both_types(
 
        type_a: *mut InferenceType, start_idx_a: usize,
 
        type_b: *mut InferenceType, start_idx_b: usize
 
    ) -> DualInferenceResult {
 
        debug_assert!(!std::ptr::eq(type_a, type_b), "encountered pointers to the same inference type");
 
        let type_a = &mut *type_a;
 
        let type_b = &mut *type_b;
 

	
 
        let mut modified_a = false;
 
        let mut modified_b = false;
 
        let mut idx_a = start_idx_a;
 
        let mut idx_b = start_idx_b;
 
        let mut depth = 1;
 

	
 
        while depth > 0 {
 
            // Advance indices if we encounter markers or equal parts
 
            let part_a = &type_a.parts[idx_a];
 
            let part_b = &type_b.parts[idx_b];
 
            
 
            if part_a == part_b {
 
                let depth_change = part_a.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, part_b.depth_change());
 
                idx_a += 1;
 
                idx_b += 1;
 
                continue;
 
            }
 
            if part_a.is_marker() { idx_a += 1; continue; }
 
            if part_b.is_marker() { idx_b += 1; continue; }
 

	
 
            // Types are not equal and are both not markers
 
            if let Some(depth_change) = Self::infer_part_for_single_type(type_a, &mut idx_a, &type_b.parts, &mut idx_b) {
 
                depth += depth_change;
 
                modified_a = true;
 
                continue;
 
            }
 
            if let Some(depth_change) = Self::infer_part_for_single_type(type_b, &mut idx_b, &type_a.parts, &mut idx_a) {
 
                depth += depth_change;
 
                modified_b = true;
 
                continue;
 
            }
 

	
 
            // Types can not be inferred in any way: types must be incompatible
 
            return DualInferenceResult::Incompatible;
 
        }
 

	
 
        if modified_a { type_a.recompute_is_done(); }
 
        if modified_b { type_b.recompute_is_done(); }
 

	
 
        // If here then we completely inferred the subtrees.
 
        match (modified_a, modified_b) {
 
            (false, false) => DualInferenceResult::Neither,
 
            (false, true) => DualInferenceResult::Second,
 
            (true, false) => DualInferenceResult::First,
 
            (true, true) => DualInferenceResult::Both
 
        }
 
    }
 

	
 
    /// Attempts to infer the first subtree based on the template. Like
 
    /// `infer_subtrees_for_both_types`, but now only applying inference to
 
    /// `to_infer` based on the type information in `template`.
 
    ///
 
    /// The `forced_template` flag controls whether `to_infer` is considered
 
    /// valid if it is more specific then the template. When `forced_template`
 
    /// is false, then as long as the `to_infer` and `template` types are
 
    /// compatible the inference will succeed. If `forced_template` is true,
 
    /// then `to_infer` MUST be less specific than `template` (e.g.
 
    /// `IntegerLike` is less specific than `UInt32`)
 
    fn infer_subtree_for_single_type(
 
        to_infer: &mut InferenceType, mut to_infer_idx: usize,
 
        template: &[InferenceTypePart], mut template_idx: usize,
 
        forced_template: bool,
 
    ) -> SingleInferenceResult {
 
        let mut modified = false;
 
        let mut depth = 1;
 

	
 
        while depth > 0 {
 
            let to_infer_part = &to_infer.parts[to_infer_idx];
 
            let template_part = &template[template_idx];
 

	
 
            if to_infer_part == template_part {
 
                let depth_change = to_infer_part.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, template_part.depth_change());
 
                to_infer_idx += 1;
 
                template_idx += 1;
 
                continue;
 
            }
 
            if to_infer_part.is_marker() { to_infer_idx += 1; continue; }
 
            if template_part.is_marker() { template_idx += 1; continue; }
 

	
 
            // Types are not equal and not markers. So check if we can infer 
 
            // anything
 
            if let Some(depth_change) = Self::infer_part_for_single_type(
 
                to_infer, &mut to_infer_idx, template, &mut template_idx
 
            ) {
 
                depth += depth_change;
 
                modified = true;
 
                continue;
 
            }
 

	
 
            if !forced_template {
 
                // We cannot infer anything, but the template may still be
 
                // compatible with the type we're inferring
 
                if let Some(depth_change) = Self::check_part_for_single_type(
 
                    template, &mut template_idx, &to_infer.parts, &mut to_infer_idx
 
                ) {
 
                    depth += depth_change;
 
                    continue;
 
                }
 
            }
 

	
 
            return SingleInferenceResult::Incompatible
 
        }
 

	
 
        if modified {
 
            to_infer.recompute_is_done();
 
            return SingleInferenceResult::Modified;
 
        } else {
 
            return SingleInferenceResult::Unmodified;
 
        }
 
    }
 

	
 
    /// Checks if both types are compatible, doesn't perform any inference
 
    fn check_subtrees(
 
        type_parts_a: &[InferenceTypePart], start_idx_a: usize,
 
        type_parts_b: &[InferenceTypePart], start_idx_b: usize
 
    ) -> bool {
 
        let mut depth = 1;
 
        let mut idx_a = start_idx_a;
 
        let mut idx_b = start_idx_b;
 

	
 
        while depth > 0 {
 
            let part_a = &type_parts_a[idx_a];
 
            let part_b = &type_parts_b[idx_b];
 

	
 
            if part_a == part_b {
 
                let depth_change = part_a.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, part_b.depth_change());
 
                idx_a += 1;
 
                idx_b += 1;
 
                continue;
 
            }
 
            
 
            if part_a.is_marker() { idx_a += 1; continue; }
 
            if part_b.is_marker() { idx_b += 1; continue; }
 

	
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                type_parts_a, &mut idx_a, type_parts_b, &mut idx_b
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                type_parts_b, &mut idx_b, type_parts_a, &mut idx_a
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 

	
 
            return false;
 
        }
 

	
 
        true
 
    }
 

	
 
    /// Performs the conversion of the inference type into a concrete type.
 
    /// By calling this function you must make sure that no unspecified types
 
    /// (e.g. Unknown or IntegerLike) exist in the type. Will not clear or check
 
    /// if the supplied `ConcreteType` is empty, will simply append to the parts
 
    /// vector.
 
    fn write_concrete_type(&self, concrete_type: &mut ConcreteType) {
 
        use InferenceTypePart as ITP;
 
        use ConcreteTypePart as CTP;
 

	
 
        // Make sure inference type is specified but concrete type is not yet specified
 
        debug_assert!(!self.parts.is_empty());
 
        concrete_type.parts.reserve(self.parts.len());
 

	
 
        let mut idx = 0;
 
        while idx < self.parts.len() {
 
            let part = &self.parts[idx];
 
            let converted_part = match part {
 
                ITP::Marker(_) => {
 
                    // Markers are removed when writing to the concrete type.
 
                    idx += 1;
 
                    continue;
 
                },
 
                ITP::Unknown | ITP::NumberLike |
 
                ITP::IntegerLike | ITP::ArrayLike | ITP::PortLike => {
 
                    // Should not happen if type inferencing works correctly: we
 
                    // should have returned a programmer-readable error or have
 
                    // inferred all types.
 
                    unreachable!("attempted to convert inference type part {:?} into concrete type", part);
 
                },
 
                ITP::Void => CTP::Void,
 
                ITP::Message => CTP::Message,
 
                ITP::Bool => CTP::Bool,
 
                ITP::UInt8 => CTP::UInt8,
 
                ITP::UInt16 => CTP::UInt16,
 
                ITP::UInt32 => CTP::UInt32,
 
                ITP::UInt64 => CTP::UInt64,
 
                ITP::SInt8 => CTP::SInt8,
 
                ITP::SInt16 => CTP::SInt16,
 
                ITP::SInt32 => CTP::SInt32,
 
                ITP::SInt64 => CTP::SInt64,
 
                ITP::Character => CTP::Character,
 
                ITP::String => {
 
                    // Inferred type has a 'char' subtype to simplify array
 
                    // checking, we remove it here.
 
                    debug_assert_eq!(self.parts[idx + 1], InferenceTypePart::Character);
 
                    idx += 1;
 
                    CTP::String
 
                },
 
                ITP::Array => CTP::Array,
 
                ITP::Slice => CTP::Slice,
 
                ITP::Input => CTP::Input,
 
                ITP::Output => CTP::Output,
 
                ITP::Tuple(num) => CTP::Tuple(*num),
 
                ITP::Instance(id, num) => CTP::Instance(*id, *num),
 
            };
 

	
 
            concrete_type.parts.push(converted_part);
 
            idx += 1;
 
        }
 
    }
 

	
 
    /// Writes a human-readable version of the type to a string. This is used
 
    /// to display error messages
 
    fn write_display_name(
 
        buffer: &mut String, heap: &Heap, parts: &[InferenceTypePart], mut idx: usize
 
    ) -> usize {
 
        use InferenceTypePart as ITP;
 

	
 
        match &parts[idx] {
 
            ITP::Marker(_marker_idx) => {
 
                if debug_log_enabled!() {
 
                    buffer.push_str(&format!("{{Marker:{}}}", *_marker_idx));
 
                }
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
            },
 
            ITP::Unknown => buffer.push_str("?"),
 
            ITP::NumberLike => buffer.push_str("numberlike"),
 
            ITP::IntegerLike => buffer.push_str("integerlike"),
 
            ITP::ArrayLike => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[?]");
 
            },
 
            ITP::PortLike => {
 
                buffer.push_str("portlike<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            }
 
            ITP::Void => buffer.push_str("void"),
 
            ITP::Bool => buffer.push_str(KW_TYPE_BOOL_STR),
 
            ITP::UInt8 => buffer.push_str(KW_TYPE_UINT8_STR),
 
            ITP::UInt16 => buffer.push_str(KW_TYPE_UINT16_STR),
 
            ITP::UInt32 => buffer.push_str(KW_TYPE_UINT32_STR),
 
            ITP::UInt64 => buffer.push_str(KW_TYPE_UINT64_STR),
 
            ITP::SInt8 => buffer.push_str(KW_TYPE_SINT8_STR),
 
            ITP::SInt16 => buffer.push_str(KW_TYPE_SINT16_STR),
 
            ITP::SInt32 => buffer.push_str(KW_TYPE_SINT32_STR),
 
            ITP::SInt64 => buffer.push_str(KW_TYPE_SINT64_STR),
 
            ITP::Character => buffer.push_str(KW_TYPE_CHAR_STR),
 
            ITP::String => {
 
                buffer.push_str(KW_TYPE_STRING_STR);
 
                idx += 1; // skip the 'char' subtype
 
            },
 
            ITP::Message => {
 
                buffer.push_str(KW_TYPE_MESSAGE_STR);
 
                buffer.push('<');
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Array => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[]");
 
            },
 
            ITP::Slice => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[..]");
 
            },
 
            ITP::Input => {
 
                buffer.push_str(KW_TYPE_IN_PORT_STR);
 
                buffer.push('<');
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Output => {
 
                buffer.push_str(KW_TYPE_OUT_PORT_STR);
 
                buffer.push('<');
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Tuple(num_sub) => {
 
                buffer.push('(');
 
                if *num_sub > 0 {
 
                    idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    for _sub_idx in 1..*num_sub {
 
                        buffer.push_str(", ");
 
                        idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    }
 
                }
 
                buffer.push(')');
 
            }
 
            ITP::Instance(definition_id, num_sub) => {
 
                let definition = &heap[*definition_id];
 
                buffer.push_str(definition.identifier().value.as_str());
 
                if *num_sub > 0 {
 
                    buffer.push('<');
 
                    idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    for _sub_idx in 1..*num_sub {
 
                        buffer.push_str(", ");
 
                        idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    }
 
                    buffer.push('>');
 
                }
 
            },
 
        }
 

	
 
        idx
 
    }
 

	
 
    /// Returns the display name of a (part of) the type tree. Will allocate a
 
    /// string.
 
    fn partial_display_name(heap: &Heap, parts: &[InferenceTypePart]) -> String {
 
        let mut buffer = String::with_capacity(parts.len() * 6);
 
        Self::write_display_name(&mut buffer, heap, parts, 0);
 
        buffer
 
    }
 

	
 
    /// Returns the display name of the full type tree. Will allocate a string.
 
    fn display_name(&self, heap: &Heap) -> String {
 
        Self::partial_display_name(heap, &self.parts)
 
    }
 
}
 

	
 
impl Default for InferenceType {
 
    fn default() -> Self {
 
        Self{
 
            has_marker: false,
 
            is_done: false,
 
            parts: Vec::new(),
 
        }
 
    }
 
}
 

	
 
/// Iterator over the subtrees that follow a marker in an `InferenceType`
 
/// instance. Returns immutable slices over the internal parts
 
struct InferenceTypeMarkerIter<'a> {
 
    parts: &'a [InferenceTypePart],
 
    idx: usize,
 
}
 

	
 
impl<'a> InferenceTypeMarkerIter<'a> {
 
    fn new(parts: &'a [InferenceTypePart]) -> Self {
 
        Self{ parts, idx: 0 }
 
    }
 
}
 

	
 
impl<'a> Iterator for InferenceTypeMarkerIter<'a> {
 
    type Item = (u32, &'a [InferenceTypePart]);
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        // Iterate until we find a marker
 
        while self.idx < self.parts.len() {
 
            if let InferenceTypePart::Marker(marker) = self.parts[self.idx] {
 
                // Found a marker, find the subtree end
 
                let start_idx = self.idx + 1;
 
                let end_idx = InferenceType::find_subtree_end_idx(self.parts, start_idx);
 

	
 
                // Modify internal index, then return items
 
                self.idx = end_idx;
 
                return Some((marker, &self.parts[start_idx..end_idx]));
 
            }
 

	
 
            self.idx += 1;
 
        }
 

	
 
        None
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum DualInferenceResult {
 
    Neither,        // neither argument is clarified
 
    First,          // first argument is clarified using the second one
 
    Second,         // second argument is clarified using the first one
 
    Both,           // both arguments are clarified
 
    Incompatible,   // types are incompatible: programmer error
 
}
 

	
 
impl DualInferenceResult {
 
    fn modified_lhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::First | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
    fn modified_rhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::Second | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum SingleInferenceResult {
 
    Unmodified,
 
    Modified,
 
    Incompatible
 
}
 

	
 
enum DefinitionType{
 
    Component(ComponentDefinitionId),
 
    Function(FunctionDefinitionId),
 
}
 

	
 
impl DefinitionType {
 
    fn definition_id(&self) -> DefinitionId {
 
        match self {
 
            DefinitionType::Component(v) => v.upcast(),
 
            DefinitionType::Function(v) => v.upcast(),
 
        }
 
    }
 
}
 

	
 
pub(crate) struct ResolveQueueElement {
 
    // Note that using the `definition_id` and the `monomorph_idx` one may
 
    // query the type table for the full procedure type, thereby retrieving
 
    // the polymorphic arguments to the procedure.
 
    pub(crate) root_id: RootId,
 
    pub(crate) definition_id: DefinitionId,
 
    pub(crate) reserved_monomorph_idx: i32,
 
    pub(crate) reserved_type_id: TypeId,
 
}
 

	
 
pub(crate) type ResolveQueue = Vec<ResolveQueueElement>;
 

	
 
#[derive(Clone)]
 
struct InferenceExpression {
 
    expr_type: InferenceType,       // result type from expression
 
    expr_id: ExpressionId,          // expression that is evaluated
 
    field_or_monomorph_idx: i32,    // index of field, of index of monomorph array in type table
 
    field_or_monomorph_idx: i32,    // index of field
 
    extra_data_idx: i32,            // index of extra data needed for inference
 
    type_id: TypeId,                // when applicable indexes into type table
 
}
 

	
 
impl Default for InferenceExpression {
 
    fn default() -> Self {
 
        Self{
 
            expr_type: InferenceType::default(),
 
            expr_id: ExpressionId::new_invalid(),
 
            field_or_monomorph_idx: -1,
 
            extra_data_idx: -1,
 
            type_id: TypeId::new_invalid(),
 
        }
 
    }
 
}
 

	
 
/// This particular visitor will recurse depth-first into the AST and ensures
 
/// that all expressions have the appropriate types.
 
pub(crate) struct PassTyping {
 
    // Current definition we're typechecking.
 
    reserved_idx: i32,
 
    reserved_type_id: TypeId,
 
    definition_type: DefinitionType,
 
    poly_vars: Vec<ConcreteType>,
 
    // Buffers for iteration over various types
 
    var_buffer: ScopedBuffer<VariableId>,
 
    expr_buffer: ScopedBuffer<ExpressionId>,
 
    stmt_buffer: ScopedBuffer<StatementId>,
 
    bool_buffer: ScopedBuffer<bool>,
 
    // Mapping from parser type to inferred type. We attempt to continue to
 
    // specify these types until we're stuck or we've fully determined the type.
 
    var_types: HashMap<VariableId, VarData>,            // types of variables
 
    expr_types: Vec<InferenceExpression>,                     // will be transferred to type table at end
 
    extra_data: Vec<ExtraData>,       // data for polymorph inference
 
    // Keeping track of which expressions need to be reinferred because the
 
    // expressions they're linked to made progression on an associated type
 
    expr_queued: DequeSet<i32>,
 
}
 

	
 
// TODO: @Rename, this is used for a lot of type inferencing. It seems like
 
//  there is a different underlying architecture waiting to surface.
 
struct ExtraData {
 
    expr_id: ExpressionId, // the expression with which this data is associated
 
    definition_id: DefinitionId, // the definition, only used for user feedback
 
    /// Progression of polymorphic variables (if any)
 
    poly_vars: Vec<InferenceType>,
 
    /// Progression of types of call arguments or struct members
 
    embedded: Vec<InferenceType>,
 
    returned: InferenceType,
 
}
 

	
 
impl Default for ExtraData {
 
    fn default() -> Self {
 
        Self{
 
            expr_id: ExpressionId::new_invalid(),
 
            definition_id: DefinitionId::new_invalid(),
 
            poly_vars: Vec::new(),
 
            embedded: Vec::new(),
 
            returned: InferenceType::default(),
 
        }
 
    }
 
}
 

	
 
struct VarData {
 
    /// Type of the variable
 
    var_type: InferenceType,
 
    /// VariableExpressions that use the variable
 
    used_at: Vec<ExpressionId>,
 
    /// For channel statements we link to the other variable such that when one
 
    /// channel's interior type is resolved, we can also resolve the other one.
 
    linked_var: Option<VariableId>,
 
}
 

	
 
impl VarData {
 
    fn new_channel(var_type: InferenceType, other_port: VariableId) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: Some(other_port) }
 
    }
 
    fn new_local(var_type: InferenceType) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: None }
 
    }
 
}
 

	
 
impl PassTyping {
 
    pub(crate) fn new() -> Self {
 
        PassTyping {
 
            reserved_idx: -1,
 
            reserved_type_id: TypeId::new_invalid(),
 
            definition_type: DefinitionType::Function(FunctionDefinitionId::new_invalid()),
 
            poly_vars: Vec::new(),
 
            var_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            expr_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            stmt_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            bool_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            var_types: HashMap::new(),
 
            expr_types: Vec::new(),
 
            extra_data: Vec::new(),
 
            expr_queued: DequeSet::new(),
 
        }
 
    }
 

	
 
    pub(crate) fn queue_module_definitions(ctx: &mut Ctx, queue: &mut ResolveQueue) {
 
        debug_assert_eq!(ctx.module().phase, ModuleCompilationPhase::ValidatedAndLinked);
 
        let root_id = ctx.module().root_id;
 
        let root = &ctx.heap.protocol_descriptions[root_id];
 
        for definition_id in &root.definitions {
 
            let definition = &ctx.heap[*definition_id];
 

	
 
            let first_concrete_part = match definition {
 
                Definition::Function(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        Some(ConcreteTypePart::Function(*definition_id, 0))
 
                    } else {
 
                        None
 
                    }
 
                }
 
                Definition::Component(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        Some(ConcreteTypePart::Component(*definition_id, 0))
 
                    } else {
 
                        None
 
                    }
 
                },
 
                Definition::Enum(_) | Definition::Struct(_) | Definition::Union(_) => None,
 
            };
 

	
 
            if let Some(first_concrete_part) = first_concrete_part {
 
                let concrete_type = ConcreteType{ parts: vec![first_concrete_part] };
 
                let reserved_idx = ctx.types.reserve_procedure_monomorph_index(definition_id, concrete_type);
 
                let type_id = ctx.types.reserve_procedure_monomorph_type_id(definition_id, concrete_type);
 
                queue.push(ResolveQueueElement{
 
                    root_id,
 
                    definition_id: *definition_id,
 
                    reserved_monomorph_idx: reserved_idx,
 
                    reserved_type_id: type_id,
 
                })
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn handle_module_definition(
 
        &mut self, ctx: &mut Ctx, queue: &mut ResolveQueue, element: ResolveQueueElement
 
    ) -> VisitorResult {
 
        self.reset();
 
        debug_assert_eq!(ctx.module().root_id, element.root_id);
 
        debug_assert!(self.poly_vars.is_empty());
 

	
 
        // Prepare for visiting the definition
 
        self.reserved_idx = element.reserved_monomorph_idx;
 
        self.reserved_type_id = element.reserved_type_id;
 

	
 
        let proc_base = ctx.types.get_base_definition(&element.definition_id).unwrap();
 
        if proc_base.is_polymorph {
 
            let monomorph = ctx.types.get_monomorph(element.reserved_monomorph_idx);
 
            let monomorph = ctx.types.get_monomorph(element.reserved_type_id);
 
            for poly_arg in monomorph.concrete_type.embedded_iter(0) {
 
                self.poly_vars.push(ConcreteType{ parts: Vec::from(poly_arg) });
 
            }
 
        }
 

	
 
        // Visit the definition, setting up the type resolving process, then
 
        // (attempt to) resolve all types
 
        self.visit_definition(ctx, element.definition_id)?;
 
        self.resolve_types(ctx, queue)?;
 
        Ok(())
 
    }
 

	
 
    fn reset(&mut self) {
 
        self.reserved_idx = -1;
 
        self.reserved_type_id = TypeId::new_invalid();
 
        self.definition_type = DefinitionType::Function(FunctionDefinitionId::new_invalid());
 
        self.poly_vars.clear();
 
        self.var_types.clear();
 
        self.expr_types.clear();
 
        self.extra_data.clear();
 
        self.expr_queued.clear();
 
    }
 
}
 

	
 
impl Visitor for PassTyping {
 
    // Definitions
 

	
 
    fn visit_component_definition(&mut self, ctx: &mut Ctx, id: ComponentDefinitionId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Component(id);
 

	
 
        let comp_def = &ctx.heap[id];
 
        debug_assert_eq!(comp_def.poly_vars.len(), self.poly_vars.len(), "component polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting component '{}': {}", comp_def.identifier.value.as_str(), id.0.index);
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        // Reserve data for expression types
 
        debug_assert!(self.expr_types.is_empty());
 
        self.expr_types.resize(comp_def.num_expressions_in_body as usize, Default::default());
 

	
 
        // Visit parameters
 
        let section = self.var_buffer.start_section_initialized(comp_def.parameters.as_slice());
 
        for param_id in section.iter_copied() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, true);
 
            debug_assert!(var_type.is_done, "expected component arguments to be concrete types");
 
            self.var_types.insert(param_id, VarData::new_local(var_type));
 
        }
 
        section.forget();
 

	
 
        // Visit the body and all of its expressions
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_block_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    fn visit_function_definition(&mut self, ctx: &mut Ctx, id: FunctionDefinitionId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Function(id);
 

	
 
        let func_def = &ctx.heap[id];
 
        debug_assert_eq!(func_def.poly_vars.len(), self.poly_vars.len(), "function polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting function '{}': {}", func_def.identifier.value.as_str(), id.0.index);
 
        if debug_log_enabled!() {
 
            debug_log!("Polymorphic variables:");
 
            for (_idx, poly_var) in self.poly_vars.iter().enumerate() {
 
                let mut infer_type_parts = Vec::new();
 
                Self::determine_inference_type_from_concrete_type(
 
                    &mut infer_type_parts, &poly_var.parts
 
                );
 
                let _infer_type = InferenceType::new(false, true, infer_type_parts);
 
                debug_log!(" - [{:03}] {:?}", _idx, _infer_type.display_name(&ctx.heap));
 
            }
 
        }
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        // Reserve data for expression types
 
        debug_assert!(self.expr_types.is_empty());
 
        self.expr_types.resize(func_def.num_expressions_in_body as usize, Default::default());
 

	
 
        // Visit parameters
 
        let section = self.var_buffer.start_section_initialized(func_def.parameters.as_slice());
 
        for param_id in section.iter_copied() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, true);
 
            debug_assert!(var_type.is_done, "expected function arguments to be concrete types");
 
            self.var_types.insert(param_id, VarData::new_local(var_type));
 
        }
 
        section.forget();
 

	
 
        // Visit all of the expressions within the body
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_block_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    // Statements
 

	
 
    fn visit_block_stmt(&mut self, ctx: &mut Ctx, id: BlockStatementId) -> VisitorResult {
 
        // Transfer statements for traversal
 
        let block = &ctx.heap[id];
 

	
 
        let section = self.stmt_buffer.start_section_initialized(block.statements.as_slice());
 
        for stmt_id in section.iter_copied() {
 
            self.visit_stmt(ctx, stmt_id)?;
 
        }
 
        section.forget();
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_memory_stmt(&mut self, ctx: &mut Ctx, id: MemoryStatementId) -> VisitorResult {
 
        let memory_stmt = &ctx.heap[id];
 
        let initial_expr_id = memory_stmt.initial_expr;
 

	
 
        // Setup memory statement inference
 
        let local = &ctx.heap[memory_stmt.variable];
 
        let var_type = self.determine_inference_type_from_parser_type_elements(&local.parser_type.elements, true);
 
        self.var_types.insert(memory_stmt.variable, VarData::new_local(var_type));
 

	
 
        // Process the initial value
 
        self.visit_assignment_expr(ctx, initial_expr_id)?;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_channel_stmt(&mut self, ctx: &mut Ctx, id: ChannelStatementId) -> VisitorResult {
 
        let channel_stmt = &ctx.heap[id];
 

	
 
        let from_local = &ctx.heap[channel_stmt.from];
 
        let from_var_type = self.determine_inference_type_from_parser_type_elements(&from_local.parser_type.elements, true);
 
        self.var_types.insert(from_local.this, VarData::new_channel(from_var_type, channel_stmt.to));
 

	
 
        let to_local = &ctx.heap[channel_stmt.to];
 
        let to_var_type = self.determine_inference_type_from_parser_type_elements(&to_local.parser_type.elements, true);
 
        self.var_types.insert(to_local.this, VarData::new_channel(to_var_type, channel_stmt.from));
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_labeled_stmt(&mut self, ctx: &mut Ctx, id: LabeledStatementId) -> VisitorResult {
 
        let labeled_stmt = &ctx.heap[id];
 
        let substmt_id = labeled_stmt.body;
 
        self.visit_stmt(ctx, substmt_id)
 
    }
 

	
 
    fn visit_if_stmt(&mut self, ctx: &mut Ctx, id: IfStatementId) -> VisitorResult {
 
        let if_stmt = &ctx.heap[id];
 

	
 
        let true_body_case = if_stmt.true_case;
 
        let false_body_case = if_stmt.false_case;
 
        let test_expr_id = if_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_stmt(ctx, true_body_case.body)?;
 
        if let Some(false_body_case) = false_body_case {
 
            self.visit_stmt(ctx, false_body_case.body)?;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_while_stmt(&mut self, ctx: &mut Ctx, id: WhileStatementId) -> VisitorResult {
 
        let while_stmt = &ctx.heap[id];
 

	
 
        let body_id = while_stmt.body;
 
        let test_expr_id = while_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_stmt(ctx, body_id)?;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_synchronous_stmt(&mut self, ctx: &mut Ctx, id: SynchronousStatementId) -> VisitorResult {
 
        let sync_stmt = &ctx.heap[id];
 
        let body_id = sync_stmt.body;
 

	
 
        self.visit_stmt(ctx, body_id)
 
    }
 

	
 
    fn visit_fork_stmt(&mut self, ctx: &mut Ctx, id: ForkStatementId) -> VisitorResult {
 
        let fork_stmt = &ctx.heap[id];
 
        let left_body_id = fork_stmt.left_body;
 
        let right_body_id = fork_stmt.right_body;
 

	
 
        self.visit_stmt(ctx, left_body_id)?;
 
        if let Some(right_body_id) = right_body_id {
 
            self.visit_stmt(ctx, right_body_id)?;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_select_stmt(&mut self, ctx: &mut Ctx, id: SelectStatementId) -> VisitorResult {
 
        let select_stmt = &ctx.heap[id];
 

	
 
        let mut section = self.stmt_buffer.start_section();
 
        let num_cases = select_stmt.cases.len();
 

	
 
        for case in &select_stmt.cases {
 
            section.push(case.guard);
 
            section.push(case.body);
 
        }
 

	
 
        for case_index in 0..num_cases {
 
            let base_index = 2 * case_index;
 
            let guard_stmt_id = section[base_index    ];
 
            let block_stmt_id = section[base_index + 1];
 

	
 
            self.visit_stmt(ctx, guard_stmt_id)?;
 
            self.visit_stmt(ctx, block_stmt_id)?;
 
        }
 
        section.forget();
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_return_stmt(&mut self, ctx: &mut Ctx, id: ReturnStatementId) -> VisitorResult {
 
        let return_stmt = &ctx.heap[id];
 
        debug_assert_eq!(return_stmt.expressions.len(), 1);
 
        let expr_id = return_stmt.expressions[0];
 

	
 
        self.visit_expr(ctx, expr_id)
 
    }
 

	
 
    fn visit_new_stmt(&mut self, ctx: &mut Ctx, id: NewStatementId) -> VisitorResult {
 
        let new_stmt = &ctx.heap[id];
 
        let call_expr_id = new_stmt.expression;
 

	
 
        self.visit_call_expr(ctx, call_expr_id)
 
    }
 

	
 
    fn visit_expr_stmt(&mut self, ctx: &mut Ctx, id: ExpressionStatementId) -> VisitorResult {
 
        let expr_stmt = &ctx.heap[id];
 
        let subexpr_id = expr_stmt.expression;
 

	
 
        self.visit_expr(ctx, subexpr_id)
 
    }
 

	
 
    // Expressions
 

	
 
    fn visit_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let assign_expr = &ctx.heap[id];
 
        let left_expr_id = assign_expr.left;
 
        let right_expr_id = assign_expr.right;
 

	
 
        self.visit_expr(ctx, left_expr_id)?;
 
        self.visit_expr(ctx, right_expr_id)?;
 

	
 
        self.progress_assignment_expr(ctx, id)
 
    }
 

	
 
    fn visit_binding_expr(&mut self, ctx: &mut Ctx, id: BindingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let binding_expr = &ctx.heap[id];
 
        let bound_to_id = binding_expr.bound_to;
 
        let bound_from_id = binding_expr.bound_from;
 

	
 
        self.visit_expr(ctx, bound_to_id)?;
 
        self.visit_expr(ctx, bound_from_id)?;
 

	
 
        self.progress_binding_expr(ctx, id)
 
    }
 

	
 
    fn visit_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let conditional_expr = &ctx.heap[id];
 
        let test_expr_id = conditional_expr.test;
 
        let true_expr_id = conditional_expr.true_expression;
 
        let false_expr_id = conditional_expr.false_expression;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_expr(ctx, true_expr_id)?;
 
        self.visit_expr(ctx, false_expr_id)?;
 

	
 
        self.progress_conditional_expr(ctx, id)
 
    }
 

	
 
    fn visit_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let binary_expr = &ctx.heap[id];
 
        let lhs_expr_id = binary_expr.left;
 
        let rhs_expr_id = binary_expr.right;
 

	
 
        self.visit_expr(ctx, lhs_expr_id)?;
 
        self.visit_expr(ctx, rhs_expr_id)?;
 

	
 
        self.progress_binary_expr(ctx, id)
 
    }
 

	
 
    fn visit_unary_expr(&mut self, ctx: &mut Ctx, id: UnaryExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let unary_expr = &ctx.heap[id];
 
        let arg_expr_id = unary_expr.expression;
 

	
 
        self.visit_expr(ctx, arg_expr_id)?;
 

	
 
        self.progress_unary_expr(ctx, id)
 
    }
 

	
 
    fn visit_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let indexing_expr = &ctx.heap[id];
 
        let subject_expr_id = indexing_expr.subject;
 
        let index_expr_id = indexing_expr.index;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.visit_expr(ctx, index_expr_id)?;
 

	
 
        self.progress_indexing_expr(ctx, id)
 
    }
 

	
 
    fn visit_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let slicing_expr = &ctx.heap[id];
 
        let subject_expr_id = slicing_expr.subject;
 
        let from_expr_id = slicing_expr.from_index;
 
        let to_expr_id = slicing_expr.to_index;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.visit_expr(ctx, from_expr_id)?;
 
        self.visit_expr(ctx, to_expr_id)?;
 

	
 
        self.progress_slicing_expr(ctx, id)
 
    }
 

	
 
    fn visit_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let select_expr = &ctx.heap[id];
 
        let subject_expr_id = select_expr.subject;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 

	
 
        self.progress_select_expr(ctx, id)
 
    }
 

	
 
    fn visit_literal_expr(&mut self, ctx: &mut Ctx, id: LiteralExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let literal_expr = &ctx.heap[id];
 
        match &literal_expr.value {
 
            Literal::Null | Literal::False | Literal::True |
 
            Literal::Integer(_) | Literal::Character(_) | Literal::String(_) => {
 
                // No subexpressions
 
            },
 
            Literal::Struct(literal) => {
 
                let mut expr_ids = self.expr_buffer.start_section();
 
                for field in &literal.fields {
 
                    expr_ids.push(field.value);
 
                }
 
                self.insert_initial_struct_polymorph_data(ctx, id);
 

	
 
                for expr_id in expr_ids.iter_copied() {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
                expr_ids.forget();
 
            },
 
            Literal::Enum(_) => {
 
                // Enumerations do not carry any subexpressions, but may still
 
                // have a user-defined polymorphic marker variable. For this 
 
                // reason we may still have to apply inference to this 
 
                // polymorphic variable
 
                self.insert_initial_enum_polymorph_data(ctx, id);
 
            },
 
            Literal::Union(literal) => {
 
                // May carry subexpressions and polymorphic arguments
 
                let expr_ids = self.expr_buffer.start_section_initialized(literal.values.as_slice());
 
                self.insert_initial_union_polymorph_data(ctx, id);
 

	
 
                for expr_id in expr_ids.iter_copied() {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
                expr_ids.forget();
 
            },
 
            Literal::Array(expressions) | Literal::Tuple(expressions) => {
 
                let expr_ids = self.expr_buffer.start_section_initialized(expressions.as_slice());
 
                for expr_id in expr_ids.iter_copied() {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
                expr_ids.forget();
 
            }
 
        }
 

	
 
        self.progress_literal_expr(ctx, id)
 
    }
 

	
 
    fn visit_cast_expr(&mut self, ctx: &mut Ctx, id: CastExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let cast_expr = &ctx.heap[id];
 
        let subject_expr_id = cast_expr.subject;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 

	
 
        self.progress_cast_expr(ctx, id)
 
    }
 

	
 
    fn visit_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 
        self.insert_initial_call_polymorph_data(ctx, id);
 

	
 
        // By default we set the polymorph idx for calls to 0. If the call ends
 
        // up not being a polymorphic one, then we will select the default
 
        // expression types in the type table
 
        let call_expr = &ctx.heap[id];
 
        self.expr_types[call_expr.unique_id_in_definition as usize].field_or_monomorph_idx = 0;
 

	
 
        // Visit all arguments
 
        let expr_ids = self.expr_buffer.start_section_initialized(call_expr.arguments.as_slice());
 
        for arg_expr_id in expr_ids.iter_copied() {
 
            self.visit_expr(ctx, arg_expr_id)?;
 
        }
 
        expr_ids.forget();
 

	
 
        self.progress_call_expr(ctx, id)
 
    }
 

	
 
    fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let var_expr = &ctx.heap[id];
 
        debug_assert!(var_expr.declaration.is_some());
 

	
 
        // Not pretty: if a binding expression, then this is the first time we
 
        // encounter the variable, so we still need to insert the variable data.
 
        let declaration = &ctx.heap[var_expr.declaration.unwrap()];
 
        if !self.var_types.contains_key(&declaration.this)  {
 
            debug_assert!(declaration.kind == VariableKind::Binding);
 
            let var_type = self.determine_inference_type_from_parser_type_elements(
 
                &declaration.parser_type.elements, true
 
            );
 
            self.var_types.insert(declaration.this, VarData{
 
                var_type,
 
                used_at: vec![upcast_id],
 
                linked_var: None
 
            });
 
        } else {
 
            let var_data = self.var_types.get_mut(&declaration.this).unwrap();
 
            var_data.used_at.push(upcast_id);
 
        }
 

	
 
        self.progress_variable_expr(ctx, id)
 
    }
 
}
 

	
 
impl PassTyping {
 
    #[allow(dead_code)] // used when debug flag at the top of this file is true.
 
    fn debug_get_display_name(&self, ctx: &Ctx, expr_id: ExpressionId) -> String {
 
        let expr_idx = ctx.heap[expr_id].get_unique_id_in_definition();
 
        let expr_type = &self.expr_types[expr_idx as usize].expr_type;
 
        expr_type.display_name(&ctx.heap)
 
    }
 

	
 
    fn resolve_types(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) -> Result<(), ParseError> {
 
        // Keep inferring until we can no longer make any progress
 
        while !self.expr_queued.is_empty() {
 
            // Make as much progress as possible without forced integer
 
            // inference.
 
            while !self.expr_queued.is_empty() {
 
                let next_expr_idx = self.expr_queued.pop_front().unwrap();
 
                self.progress_expr(ctx, next_expr_idx)?;
 
            }
 

	
 
            // Nothing is queued anymore. However we might have integer literals
 
            // whose type cannot be inferred. For convenience's sake we'll
 
            // infer these to be s32.
 
            for (infer_expr_idx, infer_expr) in self.expr_types.iter_mut().enumerate() {
 
                let expr_type = &mut infer_expr.expr_type;
 
                if !expr_type.is_done && expr_type.parts.len() == 1 && expr_type.parts[0] == InferenceTypePart::IntegerLike {
 
                    // Force integer type to s32
 
                    expr_type.parts[0] = InferenceTypePart::SInt32;
 
                    expr_type.is_done = true;
 

	
 
                    // Requeue expression (and its parent, if it exists)
 
                    self.expr_queued.push_back(infer_expr_idx as i32);
 

	
 
                    if let Some(parent_expr) = ctx.heap[infer_expr.expr_id].parent_expr_id() {
 
                        let parent_idx = ctx.heap[parent_expr].get_unique_id_in_definition();
 
                        self.expr_queued.push_back(parent_idx);
 
                    }
 
                }
 
            }
 
        }
 

	
 
        // Helper for transferring polymorphic variables to concrete types and
 
        // checking if they're completely specified
 
        fn inference_type_to_concrete_type(
 
            ctx: &Ctx, expr_id: ExpressionId, inference: &Vec<InferenceType>,
 
            first_concrete_part: ConcreteTypePart,
 
        ) -> Result<ConcreteType, ParseError> {
 
            // Prepare storage vector
 
            let mut num_inference_parts = 0;
 
            for inference_type in inference {
 
                num_inference_parts += inference_type.parts.len();
 
            }
 

	
 
            let mut concrete_type = ConcreteType{
 
                parts: Vec::with_capacity(1 + num_inference_parts),
 
            };
 
            concrete_type.parts.push(first_concrete_part);
 

	
 
            // Go through all polymorphic arguments and add them to the concrete
 
            // types.
 
            for (poly_idx, poly_type) in inference.iter().enumerate() {
 
                if !poly_type.is_done {
 
                    let expr = &ctx.heap[expr_id];
 
                    let definition = match expr {
 
                        Expression::Call(expr) => expr.definition,
 
                        Expression::Literal(expr) => match &expr.value {
 
                            Literal::Enum(lit) => lit.definition,
 
                            Literal::Union(lit) => lit.definition,
 
                            Literal::Struct(lit) => lit.definition,
 
                            _ => unreachable!()
 
                        },
 
                        _ => unreachable!(),
 
                    };
 
                    let poly_vars = ctx.heap[definition].poly_vars();
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module().source, expr.operation_span(), format!(
 
                            "could not fully infer the type of polymorphic variable '{}' of this expression (got '{}')",
 
                            poly_vars[poly_idx].value.as_str(), poly_type.display_name(&ctx.heap)
 
                        )
 
                    ));
 
                }
 

	
 
                poly_type.write_concrete_type(&mut concrete_type);
 
            }
 

	
 
            Ok(concrete_type)
 
        }
 

	
 
        // Inference is now done. But we may still have uninferred types. So we
 
        // check for these.
 
        for infer_expr in self.expr_types.iter_mut() {
 
            if !infer_expr.expr_type.is_done {
 
                let expr = &ctx.heap[infer_expr.expr_id];
 
                return Err(ParseError::new_error_at_span(
 
                    &ctx.module().source, expr.full_span(), format!(
 
                        "could not fully infer the type of this expression (got '{}')",
 
                        infer_expr.expr_type.display_name(&ctx.heap)
 
                    )
 
                ));
 
            }
 

	
 
            // Expression is fine, check if any extra data is attached
 
            if infer_expr.extra_data_idx < 0 { continue; }
 

	
 
            // Extra data is attached, perform typechecking and transfer
 
            // resolved information to the expression
 
            let extra_data = &self.extra_data[infer_expr.extra_data_idx as usize];
 

	
 
            // Note that only call and literal expressions need full inference.
 
            // Select expressions also use `extra_data`, but only for temporary
 
            // storage of the struct type whose field it is selecting.
 
            match &ctx.heap[extra_data.expr_id] {
 
                Expression::Call(expr) => {
 
                    // Check if it is not a builtin function. If not, then
 
                    // construct the first part of the concrete type.
 
                    let first_concrete_part = if expr.method == Method::UserFunction {
 
                        ConcreteTypePart::Function(expr.definition, extra_data.poly_vars.len() as u32)
 
                    } else if expr.method == Method::UserComponent {
 
                        ConcreteTypePart::Component(expr.definition, extra_data.poly_vars.len() as u32)
 
                    } else {
 
                        // Builtin function
 
                        continue;
 
                    };
 

	
 
                    let definition_id = expr.definition;
 
                    let concrete_type = inference_type_to_concrete_type(
 
                        ctx, extra_data.expr_id, &extra_data.poly_vars, first_concrete_part
 
                    )?;
 

	
 
                    match ctx.types.get_procedure_monomorph_index(&definition_id, &concrete_type.parts) {
 
                        Some(reserved_idx) => {
 
                    match ctx.types.get_procedure_monomorph_type_id(&definition_id, &concrete_type.parts) {
 
                        Some(type_id) => {
 
                            // Already typechecked, or already put into the resolve queue
 
                            infer_expr.field_or_monomorph_idx = reserved_idx;
 
                            infer_expr.type_id = type_id;
 
                        },
 
                        None => {
 
                            // Not typechecked yet, so add an entry in the queue
 
                            let reserved_idx = ctx.types.reserve_procedure_monomorph_index(&definition_id, concrete_type);
 
                            infer_expr.field_or_monomorph_idx = reserved_idx;
 
                            let reserved_type_id = ctx.types.reserve_procedure_monomorph_type_id(&definition_id, concrete_type);
 
                            infer_expr.type_id = reserved_type_id;
 
                            queue.push(ResolveQueueElement {
 
                                root_id: ctx.heap[definition_id].defined_in(),
 
                                definition_id,
 
                                reserved_monomorph_idx: reserved_idx,
 
                                reserved_type_id,
 
                            });
 
                        }
 
                    }
 
                },
 
                Expression::Literal(expr) => {
 
                    let definition_id = match &expr.value {
 
                        Literal::Enum(lit) => lit.definition,
 
                        Literal::Union(lit) => lit.definition,
 
                        Literal::Struct(lit) => lit.definition,
 
                        _ => unreachable!(),
 
                    };
 
                    let first_concrete_part = ConcreteTypePart::Instance(definition_id, extra_data.poly_vars.len() as u32);
 
                    let concrete_type = inference_type_to_concrete_type(
 
                        ctx, extra_data.expr_id, &extra_data.poly_vars, first_concrete_part
 
                    )?;
 
                    let mono_index = ctx.types.add_data_monomorph(ctx.modules, ctx.heap, ctx.arch, definition_id, concrete_type)?;
 
                    infer_expr.field_or_monomorph_idx = mono_index;
 
                    let type_id = ctx.types.add_monomorphed_type(ctx.modules, ctx.heap, ctx.arch, definition_id, concrete_type)?;
 
                    infer_expr.type_id = type_id;
 
                },
 
                Expression::Select(_) => {
 
                    debug_assert!(infer_expr.field_or_monomorph_idx >= 0);
 
                },
 
                _ => {
 
                    unreachable!("handling extra data for expression {:?}", &ctx.heap[extra_data.expr_id]);
 
                }
 
            }
 
        }
 

	
 
        // Every expression checked, and new monomorphs are queued. Transfer the
 
        // expression information to the type table.
 
        let procedure_arguments = match &self.definition_type {
 
            DefinitionType::Component(id) => {
 
                let definition = &ctx.heap[*id];
 
                &definition.parameters
 
            },
 
            DefinitionType::Function(id) => {
 
                let definition = &ctx.heap[*id];
 
                &definition.parameters
 
            },
 
        };
 

	
 
        let target = ctx.types.get_procedure_monomorph_mut(self.reserved_idx);
 
        let target = ctx.types.get_procedure_monomorph_mut(self.reserved_type_id);
 
        debug_assert!(target.arg_types.is_empty()); // makes sure we never queue a procedure's type inferencing twice
 
        debug_assert!(target.expr_data.is_empty());
 

	
 
        // - Write the arguments to the procedure
 
        target.arg_types.reserve(procedure_arguments.len());
 
        for argument_id in procedure_arguments {
 
            let mut concrete = ConcreteType::default();
 
            let argument_type = self.var_types.get(argument_id).unwrap();
 
            argument_type.var_type.write_concrete_type(&mut concrete);
 
            target.arg_types.push(concrete);
 
        }
 

	
 
        // - Write the expression data
 
        target.expr_data.reserve(self.expr_types.len());
 
        for infer_expr in self.expr_types.iter() {
 
            let mut concrete = ConcreteType::default();
 
            infer_expr.expr_type.write_concrete_type(&mut concrete);
 
            target.expr_data.push(MonomorphExpression{
 
                expr_type: concrete,
 
                field_or_monomorph_idx: infer_expr.field_or_monomorph_idx
 
                field_or_monomorph_idx: infer_expr.field_or_monomorph_idx,
 
                type_id: infer_expr.type_id,
 
            });
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_expr(&mut self, ctx: &mut Ctx, idx: i32) -> Result<(), ParseError> {
 
        let id = self.expr_types[idx as usize].expr_id;
 
        match &ctx.heap[id] {
 
            Expression::Assignment(expr) => {
 
                let id = expr.this;
 
                self.progress_assignment_expr(ctx, id)
 
            },
 
            Expression::Binding(expr) => {
 
                let id = expr.this;
 
                self.progress_binding_expr(ctx, id)
 
            },
 
            Expression::Conditional(expr) => {
 
                let id = expr.this;
 
                self.progress_conditional_expr(ctx, id)
 
            },
 
            Expression::Binary(expr) => {
 
                let id = expr.this;
 
                self.progress_binary_expr(ctx, id)
 
            },
 
            Expression::Unary(expr) => {
 
                let id = expr.this;
 
                self.progress_unary_expr(ctx, id)
 
            },
 
            Expression::Indexing(expr) => {
 
                let id = expr.this;
 
                self.progress_indexing_expr(ctx, id)
 
            },
 
            Expression::Slicing(expr) => {
 
                let id = expr.this;
 
                self.progress_slicing_expr(ctx, id)
 
            },
 
            Expression::Select(expr) => {
 
                let id = expr.this;
 
                self.progress_select_expr(ctx, id)
 
            },
 
            Expression::Literal(expr) => {
 
                let id = expr.this;
 
                self.progress_literal_expr(ctx, id)
 
            },
 
            Expression::Cast(expr) => {
 
                let id = expr.this;
 
                self.progress_cast_expr(ctx, id)
 
            },
 
            Expression::Call(expr) => {
 
                let id = expr.this;
 
                self.progress_call_expr(ctx, id)
 
            },
 
            Expression::Variable(expr) => {
 
                let id = expr.this;
 
                self.progress_variable_expr(ctx, id)
 
            }
 
        }
 
    }
 

	
 
    fn progress_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> Result<(), ParseError> {
 
        use AssignmentOperator as AO;
 

	
 
        let upcast_id = id.upcast();
 

	
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.left;
 
        let arg2_expr_id = expr.right;
 

	
 
        debug_log!("Assignment expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.debug_get_display_name(ctx, arg1_expr_id));
 
        debug_log!("   - Arg2 type: {}", self.debug_get_display_name(ctx, arg2_expr_id));
 
        debug_log!("   - Expr type: {}", self.debug_get_display_name(ctx, upcast_id));
 

	
 
        // Assignment does not return anything (it operates like a statement)
 
        let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &VOID_TEMPLATE)?;
 

	
 
        // Apply forced constraint to LHS value
 
        let progress_forced = match expr.operation {
 
            AO::Set =>
 
                false,
 
            AO::Concatenated =>
 
                self.apply_template_constraint(ctx, arg1_expr_id, &ARRAYLIKE_TEMPLATE)?,
 
            AO::Multiplied | AO::Divided | AO::Added | AO::Subtracted =>
 
                self.apply_template_constraint(ctx, arg1_expr_id, &NUMBERLIKE_TEMPLATE)?,
 
            AO::Remained | AO::ShiftedLeft | AO::ShiftedRight |
 
            AO::BitwiseAnded | AO::BitwiseXored | AO::BitwiseOred =>
 
                self.apply_template_constraint(ctx, arg1_expr_id, &INTEGERLIKE_TEMPLATE)?,
 
        };
 

	
 
        let (progress_arg1, progress_arg2) = self.apply_equal2_constraint(
 
            ctx, upcast_id, arg1_expr_id, 0, arg2_expr_id, 0
 
        )?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_forced || progress_arg1, self.debug_get_display_name(ctx, arg1_expr_id));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.debug_get_display_name(ctx, arg2_expr_id));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.debug_get_display_name(ctx, upcast_id));
 

	
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_forced || progress_arg1 { self.queue_expr(ctx, arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(ctx, arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_binding_expr(&mut self, ctx: &mut Ctx, id: BindingExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let binding_expr = &ctx.heap[id];
 
        let bound_from_id = binding_expr.bound_from;
 
        let bound_to_id = binding_expr.bound_to;
 

	
 
        // Output is always a boolean. The two arguments should be of equal
 
        // type.
 
        let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
        let (progress_from, progress_to) = self.apply_equal2_constraint(ctx, upcast_id, bound_from_id, 0, bound_to_id, 0)?;
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_from { self.queue_expr(ctx, bound_from_id); }
 
        if progress_to { self.queue_expr(ctx, bound_to_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> Result<(), ParseError> {
 
        // Note: test expression type is already enforced
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.true_expression;
 
        let arg2_expr_id = expr.false_expression;
 

	
 
        debug_log!("Conditional expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.debug_get_display_name(ctx, arg1_expr_id));
 
        debug_log!("   - Arg2 type: {}", self.debug_get_display_name(ctx, arg2_expr_id));
 
        debug_log!("   - Expr type: {}", self.debug_get_display_name(ctx, upcast_id));
 

	
 
        // I keep confusing myself: this applies equality of types between the
 
        // condition branches' types, and the result from the conditional
 
        // expression, because the result from the conditional is one of the
 
        // branches.
 
        let (progress_expr, progress_arg1, progress_arg2) = self.apply_equal3_constraint(
 
            ctx, upcast_id, arg1_expr_id, arg2_expr_id, 0
 
        )?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.debug_get_display_name(ctx, arg1_expr_id));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.debug_get_display_name(ctx, arg2_expr_id));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.debug_get_display_name(ctx, upcast_id));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(ctx, arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(ctx, arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> Result<(), ParseError> {
 
        // Note: our expression type might be fixed by our parent, but we still
 
        // need to make sure it matches the type associated with our operation.
 
        use BinaryOperator as BO;
 

	
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_id = expr.left;
 
        let arg2_id = expr.right;
 

	
 
        debug_log!("Binary expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.debug_get_display_name(ctx, arg1_id));
 
        debug_log!("   - Arg2 type: {}", self.debug_get_display_name(ctx, arg2_id));
 
        debug_log!("   - Expr type: {}", self.debug_get_display_name(ctx, upcast_id));
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = match expr.operation {
 
            BO::Concatenate => {
 
                // Two cases: if one of the arguments or the output type is a
 
                // string, then all must be strings. Otherwise the arguments
 
                // must be arraylike and the output will be a array.
 
                let (expr_is_str, expr_is_not_str) = self.type_is_certainly_or_certainly_not_string(ctx, upcast_id);
 
                let (arg1_is_str, arg1_is_not_str) = self.type_is_certainly_or_certainly_not_string(ctx, arg1_id);
 
                let (arg2_is_str, arg2_is_not_str) = self.type_is_certainly_or_certainly_not_string(ctx, arg2_id);
 

	
 
                let someone_is_str = expr_is_str || arg1_is_str || arg2_is_str;
 
                let someone_is_not_str = expr_is_not_str || arg1_is_not_str || arg2_is_not_str;
 

	
 
                // Note: this statement is an expression returning the progression bools
 
                if someone_is_str {
 
                    // One of the arguments is a string, then all must be strings
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?
 
                } else {
 
                    let progress_expr = if someone_is_not_str {
 
                        // Output must be a normal array
 
                        self.apply_template_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?
 
                    } else {
 
                        // Output may still be anything
 
                        self.apply_template_constraint(ctx, upcast_id, &ARRAYLIKE_TEMPLATE)?
 
                    };
 

	
 
                    let progress_arg1 = self.apply_template_constraint(ctx, arg1_id, &ARRAYLIKE_TEMPLATE)?;
 
                    let progress_arg2 = self.apply_template_constraint(ctx, arg2_id, &ARRAYLIKE_TEMPLATE)?;
 

	
 
                    // If they're all arraylike, then we want the subtype to match
 
                    let (subtype_expr, subtype_arg1, subtype_arg2) =
 
                        self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 1)?;
 

	
 
                    (progress_expr || subtype_expr, progress_arg1 || subtype_arg1, progress_arg2 || subtype_arg2)
 
                }
 
            },
 
            BO::LogicalAnd => {
 
                // Forced boolean on all
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::LogicalOr => {
 
                // Forced boolean on all
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, arg1_id, &BOOL_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, arg2_id, &BOOL_TEMPLATE)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::BitwiseOr | BO::BitwiseXor | BO::BitwiseAnd | BO::Remainder | BO::ShiftLeft | BO::ShiftRight => {
 
                // All equal of integer type
 
                let progress_base = self.apply_template_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg1, progress_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg1, progress_base || progress_arg2)
 
            },
 
            BO::Equality | BO::Inequality => {
 
                // Equal2 on args, forced boolean output
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let (progress_arg1, progress_arg2) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, arg1_id, 0, arg2_id, 0)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::LessThan | BO::GreaterThan | BO::LessThanEqual | BO::GreaterThanEqual => {
 
                // Equal2 on args with numberlike type, forced boolean output
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg_base = self.apply_template_constraint(ctx, arg1_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_arg1, progress_arg2) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, arg1_id, 0, arg2_id, 0)?;
 

	
 
                (progress_expr, progress_arg_base || progress_arg1, progress_arg_base || progress_arg2)
 
            },
 
            BO::Add | BO::Subtract | BO::Multiply | BO::Divide => {
 
                // All equal of number type
 
                let progress_base = self.apply_template_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg1, progress_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg1, progress_base || progress_arg2)
 
            },
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.debug_get_display_name(ctx, arg1_id));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.debug_get_display_name(ctx, arg2_id));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.debug_get_display_name(ctx, upcast_id));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(ctx, arg1_id); }
 
        if progress_arg2 { self.queue_expr(ctx, arg2_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_unary_expr(&mut self, ctx: &mut Ctx, id: UnaryExpressionId) -> Result<(), ParseError> {
 
        use UnaryOperator as UO;
 

	
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg_id = expr.expression;
 

	
 
        debug_log!("Unary expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg  type: {}", self.debug_get_display_name(ctx, arg_id));
 
        debug_log!("   - Expr type: {}", self.debug_get_display_name(ctx, upcast_id));
 

	
 
        let (progress_expr, progress_arg) = match expr.operation {
 
            UO::Positive | UO::Negative => {
 
                // Equal types of numeric class
 
                let progress_base = self.apply_template_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, arg_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg)
 
            },
 
            UO::BitwiseNot => {
 
                // Equal types of integer class
 
                let progress_base = self.apply_template_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, arg_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg)
 
            },
 
            UO::LogicalNot => {
 
                // Both bools
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                (progress_expr, progress_arg)
 
            }
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg  type [{}]: {}", progress_arg, self.debug_get_display_name(ctx, arg_id));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.debug_get_display_name(ctx, upcast_id));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg { self.queue_expr(ctx, arg_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 
        let index_id = expr.index;
 

	
 
        debug_log!("Indexing expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.debug_get_display_name(ctx, subject_id));
 
        debug_log!("   - Index   type: {}", self.debug_get_display_name(ctx, index_id));
 
        debug_log!("   - Expr    type: {}", self.debug_get_display_name(ctx, upcast_id));
 

	
 
        // Make sure subject is arraylike and index is integerlike
 
        let progress_subject_base = self.apply_template_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
        let progress_index = self.apply_template_constraint(ctx, index_id, &INTEGERLIKE_TEMPLATE)?;
 

	
 
        // Make sure if output is of T then subject is Array<T>
 
        let (progress_expr, progress_subject) =
 
            self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, subject_id, 1)?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject_base || progress_subject, self.debug_get_display_name(ctx, subject_id));
 
        debug_log!("   - Index   type [{}]: {}", progress_index, self.debug_get_display_name(ctx, index_id));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.debug_get_display_name(ctx, upcast_id));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_subject_base || progress_subject { self.queue_expr(ctx, subject_id); }
 
        if progress_index { self.queue_expr(ctx, index_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 
        let from_id = expr.from_index;
 
        let to_id = expr.to_index;
 

	
 
        debug_log!("Slicing expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.debug_get_display_name(ctx, subject_id));
 
        debug_log!("   - FromIdx type: {}", self.debug_get_display_name(ctx, from_id));
 
        debug_log!("   - ToIdx   type: {}", self.debug_get_display_name(ctx, to_id));
 
        debug_log!("   - Expr    type: {}", self.debug_get_display_name(ctx, upcast_id));
 

	
 
        // Make sure subject is arraylike and indices are of equal integerlike
 
        let progress_subject_base = self.apply_template_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
        let progress_idx_base = self.apply_template_constraint(ctx, from_id, &INTEGERLIKE_TEMPLATE)?;
 
        let (progress_from, progress_to) = self.apply_equal2_constraint(ctx, upcast_id, from_id, 0, to_id, 0)?;
 

	
 
        let (progress_expr, progress_subject) = match self.type_is_certainly_or_certainly_not_string(ctx, subject_id) {
 
            (true, _) => {
 
                // Certainly a string
 
                (self.apply_forced_constraint(ctx, upcast_id, &STRING_TEMPLATE)?, false)
 
            },
 
            (_, true) => {
 
                // Certainly not a string
 
                let progress_expr_base = self.apply_template_constraint(ctx, upcast_id, &SLICE_TEMPLATE)?;
 
                let (progress_expr, progress_subject) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 1, subject_id, 1)?;
 

	
 
                (progress_expr_base || progress_expr, progress_subject)
 
            },
src/protocol/parser/pass_validation_linking.rs
Show inline comments
 
@@ -1002,774 +1002,774 @@ impl Visitor for PassValidationLinking {
 
                        let literal = ctx.heap[id].value.as_struct();
 
                        let ast_definition = &ctx.heap[literal.definition];
 
                        return Err(ParseError::new_error_at_span(
 
                            &ctx.module().source, field_span, format!(
 
                                "This field does not exist on the struct '{}'",
 
                                ast_definition.identifier().value.as_str()
 
                            )
 
                        ));
 
                    }
 
                    field.field_idx = field_idx.unwrap();
 

	
 
                    // Check if specified more than once
 
                    if specified[field.field_idx] {
 
                        let field_span = field.identifier.span;
 
                        return Err(ParseError::new_error_str_at_span(
 
                            &ctx.module().source, field_span,
 
                            "This field is specified more than once"
 
                        ));
 
                    }
 

	
 
                    specified[field.field_idx] = true;
 
                }
 

	
 
                if !specified.iter().all(|v| *v) {
 
                    // Some fields were not specified
 
                    let mut not_specified = String::new();
 
                    let mut num_not_specified = 0;
 
                    for (def_field_idx, is_specified) in specified.iter().enumerate() {
 
                        if !is_specified {
 
                            if !not_specified.is_empty() { not_specified.push_str(", ") }
 
                            let field_ident = &struct_definition.fields[def_field_idx].identifier;
 
                            not_specified.push_str(field_ident.value.as_str());
 
                            num_not_specified += 1;
 
                        }
 
                    }
 

	
 
                    debug_assert!(num_not_specified > 0);
 
                    let msg = if num_not_specified == 1 {
 
                        format!("not all fields are specified, '{}' is missing", not_specified)
 
                    } else {
 
                        format!("not all fields are specified, [{}] are missing", not_specified)
 
                    };
 

	
 
                    let literal_span = literal.parser_type.full_span;
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module().source, literal_span, msg
 
                    ));
 
                }
 

	
 
                // Need to traverse fields expressions in struct and evaluate
 
                // the poly args
 
                let mut expr_section = self.expression_buffer.start_section();
 
                for field in &literal.fields {
 
                    expr_section.push(field.value);
 
                }
 

	
 
                for expr_idx in 0..expr_section.len() {
 
                    let expr_id = expr_section[expr_idx];
 
                    self.expr_parent = ExpressionParent::Expression(upcast_id, expr_idx as u32);
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 

	
 
                expr_section.forget();
 
            },
 
            Literal::Enum(literal) => {
 
                // Make sure the variant exists
 
                let type_definition = ctx.types.get_base_definition(&literal.definition).unwrap();
 
                let enum_definition = type_definition.definition.as_enum();
 

	
 
                let variant_idx = enum_definition.variants.iter().position(|v| {
 
                    v.identifier == literal.variant
 
                });
 

	
 
                if variant_idx.is_none() {
 
                    let literal = ctx.heap[id].value.as_enum();
 
                    let ast_definition = ctx.heap[literal.definition].as_enum();
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module().source, literal.parser_type.full_span, format!(
 
                            "the variant '{}' does not exist on the enum '{}'",
 
                            literal.variant.value.as_str(), ast_definition.identifier.value.as_str()
 
                        )
 
                    ));
 
                }
 

	
 
                literal.variant_idx = variant_idx.unwrap();
 
            },
 
            Literal::Union(literal) => {
 
                // Make sure the variant exists
 
                let type_definition = ctx.types.get_base_definition(&literal.definition).unwrap();
 
                let union_definition = type_definition.definition.as_union();
 

	
 
                let variant_idx = union_definition.variants.iter().position(|v| {
 
                    v.identifier == literal.variant
 
                });
 
                if variant_idx.is_none() {
 
                    let literal = ctx.heap[id].value.as_union();
 
                    let ast_definition = ctx.heap[literal.definition].as_union();
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module().source, literal.parser_type.full_span, format!(
 
                            "the variant '{}' does not exist on the union '{}'",
 
                            literal.variant.value.as_str(), ast_definition.identifier.value.as_str()
 
                        )
 
                    ));
 
                }
 

	
 
                literal.variant_idx = variant_idx.unwrap();
 

	
 
                // Make sure the number of specified values matches the expected
 
                // number of embedded values in the union variant.
 
                let union_variant = &union_definition.variants[literal.variant_idx];
 
                if union_variant.embedded.len() != literal.values.len() {
 
                    let literal = ctx.heap[id].value.as_union();
 
                    let ast_definition = ctx.heap[literal.definition].as_union();
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module().source, literal.parser_type.full_span, format!(
 
                            "The variant '{}' of union '{}' expects {} embedded values, but {} were specified",
 
                            literal.variant.value.as_str(), ast_definition.identifier.value.as_str(),
 
                            union_variant.embedded.len(), literal.values.len()
 
                        ),
 
                    ))
 
                }
 

	
 
                // Traverse embedded values of union (if any) and evaluate the
 
                // polymorphic arguments
 
                let upcast_id = id.upcast();
 
                let mut expr_section = self.expression_buffer.start_section();
 
                for value in &literal.values {
 
                    expr_section.push(*value);
 
                }
 

	
 
                for expr_idx in 0..expr_section.len() {
 
                    let expr_id = expr_section[expr_idx];
 
                    self.expr_parent = ExpressionParent::Expression(upcast_id, expr_idx as u32);
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 

	
 
                expr_section.forget();
 
            },
 
            Literal::Array(literal) | Literal::Tuple(literal) => {
 
                // Visit all expressions in the array
 
                let upcast_id = id.upcast();
 
                let expr_section = self.expression_buffer.start_section_initialized(literal);
 
                for expr_idx in 0..expr_section.len() {
 
                    let expr_id = expr_section[expr_idx];
 
                    self.expr_parent = ExpressionParent::Expression(upcast_id, expr_idx as u32);
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 

	
 
                expr_section.forget();
 
            }
 
        }
 

	
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_cast_expr(&mut self, ctx: &mut Ctx, id: CastExpressionId) -> VisitorResult {
 
        let cast_expr = &mut ctx.heap[id];
 

	
 
        if let Some(span) = self.must_be_assignable {
 
            return Err(ParseError::new_error_str_at_span(
 
                &ctx.module().source, span, "cannot assign to the result from a cast expression"
 
            ))
 
        }
 

	
 
        let upcast_id = id.upcast();
 
        let old_expr_parent = self.expr_parent;
 
        cast_expr.parent = old_expr_parent;
 
        cast_expr.unique_id_in_definition = self.next_expr_index;
 
        self.next_expr_index += 1;
 

	
 
        // Recurse into the thing that we're casting
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 0);
 
        let subject_id = cast_expr.subject;
 
        self.visit_expr(ctx, subject_id)?;
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> VisitorResult {
 
        let call_expr = &ctx.heap[id];
 

	
 
        if let Some(span) = self.must_be_assignable {
 
            return Err(ParseError::new_error_str_at_span(
 
                &ctx.module().source, span, "cannot assign to the result from a call expression"
 
            ))
 
        }
 

	
 
        // Check whether the method is allowed to be called within the code's
 
        // context (in sync, definition type, etc.)
 
        let mut expecting_wrapping_new_stmt = false;
 
        let mut expecting_primitive_def = false;
 
        let mut expecting_wrapping_sync_stmt = false;
 
        let mut expecting_no_select_stmt = false;
 

	
 
        match call_expr.method {
 
            Method::Get => {
 
                expecting_primitive_def = true;
 
                expecting_wrapping_sync_stmt = true;
 
                if !self.in_select_guard.is_invalid() {
 
                    // In a select guard. Take the argument (i.e. the port we're
 
                    // retrieving from) and add it to the list of involved ports
 
                    // of the guard
 
                    if call_expr.arguments.len() == 1 {
 
                        // We're checking the number of arguments later, for now
 
                        // assume it is correct.
 
                        let argument = call_expr.arguments[0];
 
                        let select_stmt = &mut ctx.heap[self.in_select_guard];
 
                        let select_case = &mut select_stmt.cases[self.in_select_arm as usize];
 
                        select_case.involved_ports.push((id, argument));
 
                    }
 
                }
 
            },
 
            Method::Put => {
 
                expecting_primitive_def = true;
 
                expecting_wrapping_sync_stmt = true;
 
                expecting_no_select_stmt = true;
 
            },
 
            Method::Fires => {
 
                expecting_primitive_def = true;
 
                expecting_wrapping_sync_stmt = true;
 
            },
 
            Method::Create => {},
 
            Method::Length => {},
 
            Method::Assert => {
 
                expecting_wrapping_sync_stmt = true;
 
                expecting_no_select_stmt = true;
 
                if self.def_type.is_function() {
 
                    let call_span = call_expr.func_span;
 
                    return Err(ParseError::new_error_str_at_span(
 
                        &ctx.module().source, call_span,
 
                        "assert statement may only occur in components"
 
                    ));
 
                }
 
            },
 
            Method::Print => {},
 
            Method::SelectStart
 
            | Method::SelectRegisterCasePort
 
            | Method::SelectWait => unreachable!(), // not usable by programmer directly
 
            Method::UserFunction => {},
 
            Method::UserComponent => {
 
                expecting_wrapping_new_stmt = true;
 
            },
 
        }
 

	
 
        let call_expr = &mut ctx.heap[id];
 

	
 
        fn get_span_and_name<'a>(ctx: &'a Ctx, id: CallExpressionId) -> (InputSpan, String) {
 
            let call = &ctx.heap[id];
 
            let span = call.func_span;
 
            let name = String::from_utf8_lossy(ctx.module().source.section_at_span(span)).to_string();
 
            return (span, name);
 
        }
 
        if expecting_primitive_def {
 
            if !self.def_type.is_primitive() {
 
                let (call_span, func_name) = get_span_and_name(ctx, id);
 
                return Err(ParseError::new_error_at_span(
 
                    &ctx.module().source, call_span,
 
                    format!("a call to '{}' may only occur in primitive component definitions", func_name)
 
                ));
 
            }
 
        }
 

	
 
        if expecting_wrapping_sync_stmt {
 
            if self.in_sync.is_invalid() {
 
                let (call_span, func_name) = get_span_and_name(ctx, id);
 
                return Err(ParseError::new_error_at_span(
 
                    &ctx.module().source, call_span,
 
                    format!("a call to '{}' may only occur inside synchronous blocks", func_name)
 
                ))
 
            }
 
        }
 

	
 
        if expecting_no_select_stmt {
 
            if !self.in_select_guard.is_invalid() {
 
                let (call_span, func_name) = get_span_and_name(ctx, id);
 
                return Err(ParseError::new_error_at_span(
 
                    &ctx.module().source, call_span,
 
                    format!("a call to '{}' may not occur in a select statement's guard", func_name)
 
                ));
 
            }
 
        }
 

	
 
        if expecting_wrapping_new_stmt {
 
            if !self.expr_parent.is_new() {
 
                let call_span = call_expr.func_span;
 
                return Err(ParseError::new_error_str_at_span(
 
                    &ctx.module().source, call_span,
 
                    "cannot call a component, it can only be instantiated by using 'new'"
 
                ));
 
            }
 
        } else {
 
            if self.expr_parent.is_new() {
 
                let call_span = call_expr.func_span;
 
                return Err(ParseError::new_error_str_at_span(
 
                    &ctx.module().source, call_span,
 
                    "only components can be instantiated, this is a function"
 
                ));
 
            }
 
        }
 

	
 
        // Check the number of arguments
 
        let call_definition = ctx.types.get_base_definition(&call_expr.definition).unwrap();
 
        let num_expected_args = match &call_definition.definition {
 
            DefinedTypeVariant::Function(definition) => definition.arguments.len(),
 
            DefinedTypeVariant::Component(definition) => definition.arguments.len(),
 
            v => unreachable!("encountered {} type in call expression", v.type_class()),
 
        };
 

	
 
        let num_provided_args = call_expr.arguments.len();
 
        if num_provided_args != num_expected_args {
 
            let argument_text = if num_expected_args == 1 { "argument" } else { "arguments" };
 
            let call_span = call_expr.full_span;
 
            return Err(ParseError::new_error_at_span(
 
                &ctx.module().source, call_span, format!(
 
                    "expected {} {}, but {} were provided",
 
                    num_expected_args, argument_text, num_provided_args
 
                )
 
            ));
 
        }
 

	
 
        // Recurse into all of the arguments and set the expression's parent
 
        let upcast_id = id.upcast();
 

	
 
        let section = self.expression_buffer.start_section_initialized(&call_expr.arguments);
 
        let old_expr_parent = self.expr_parent;
 
        call_expr.parent = old_expr_parent;
 
        call_expr.unique_id_in_definition = self.next_expr_index;
 
        self.next_expr_index += 1;
 

	
 
        for arg_expr_idx in 0..section.len() {
 
            let arg_expr_id = section[arg_expr_idx];
 
            self.expr_parent = ExpressionParent::Expression(upcast_id, arg_expr_idx as u32);
 
            self.visit_expr(ctx, arg_expr_id)?;
 
        }
 

	
 
        section.forget();
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitorResult {
 
        let var_expr = &ctx.heap[id];
 

	
 
        // Check if declaration was already resolved (this occurs for the
 
        // variable expr that is on the LHS of the assignment expr that is
 
        // associated with a variable declaration)
 
        let mut variable_id = var_expr.declaration;
 
        let mut is_binding_target = false;
 

	
 
        // Otherwise try to find it
 
        if variable_id.is_none() {
 
            variable_id = self.find_variable(ctx, self.relative_pos_in_parent, &var_expr.identifier);
 
        }
 

	
 
        // Otherwise try to see if is a variable introduced by a binding expr
 
        let variable_id = if let Some(variable_id) = variable_id {
 
            variable_id
 
        } else {
 
            if self.in_binding_expr.is_invalid() || !self.in_binding_expr_lhs {
 
                return Err(ParseError::new_error_str_at_span(
 
                    &ctx.module().source, var_expr.identifier.span, "unresolved variable"
 
                ));
 
            }
 

	
 
            // This is a binding variable, but it may only appear in very
 
            // specific locations.
 
            let is_valid_binding = match self.expr_parent {
 
                ExpressionParent::Expression(expr_id, idx) => {
 
                    match &ctx.heap[expr_id] {
 
                        Expression::Binding(_binding_expr) => {
 
                            // Nested binding is disallowed, and because of
 
                            // the check above we know we're directly at the
 
                            // LHS of the binding expression
 
                            debug_assert_eq!(_binding_expr.this, self.in_binding_expr);
 
                            debug_assert_eq!(idx, 0);
 
                            true
 
                        }
 
                        Expression::Literal(lit_expr) => {
 
                            // Only struct, unions, tuples and arrays can
 
                            // have subexpressions, so we're always fine
 
                            if cfg!(debug_assertions) {
 
                            dbg_code!({
 
                                match lit_expr.value {
 
                                    Literal::Struct(_) | Literal::Union(_) | Literal::Array(_) | Literal::Tuple(_) => {},
 
                                    _ => unreachable!(),
 
                                }
 
                            }
 
                            });
 

	
 
                            true
 
                        },
 
                        _ => false,
 
                    }
 
                },
 
                _ => {
 
                    false
 
                }
 
            };
 

	
 
            if !is_valid_binding {
 
                let binding_expr = &ctx.heap[self.in_binding_expr];
 
                return Err(ParseError::new_error_str_at_span(
 
                    &ctx.module().source, var_expr.identifier.span,
 
                    "illegal location for binding variable: binding variables may only be nested under a binding expression, or a struct, union or array literal"
 
                ).with_info_at_span(
 
                    &ctx.module().source, binding_expr.operator_span, format!(
 
                        "'{}' was interpreted as a binding variable because the variable is not declared and it is nested under this binding expression",
 
                        var_expr.identifier.value.as_str()
 
                    )
 
                ));
 
            }
 

	
 
            // By now we know that this is a valid binding expression. Given
 
            // that a binding expression must be nested under an if/while
 
            // statement, we now add the variable to the scope associated with
 
            // that statement.
 
            let bound_identifier = var_expr.identifier.clone();
 
            let bound_variable_id = ctx.heap.alloc_variable(|this| Variable {
 
                this,
 
                kind: VariableKind::Binding,
 
                parser_type: ParserType {
 
                    elements: vec![ParserTypeElement {
 
                        element_span: bound_identifier.span,
 
                        variant: ParserTypeVariant::Inferred
 
                    }],
 
                    full_span: bound_identifier.span
 
                },
 
                identifier: bound_identifier,
 
                relative_pos_in_parent: 0,
 
                unique_id_in_scope: -1,
 
            });
 

	
 
            let scope_id = match &ctx.heap[self.in_test_expr] {
 
                Statement::If(stmt) => stmt.true_case.scope,
 
                Statement::While(stmt) => stmt.scope,
 
                _ => unreachable!(),
 
            };
 

	
 
            self.checked_at_single_scope_add_local(ctx, scope_id, -1, bound_variable_id)?; // add at -1 such that first statement can find the variable if needed
 

	
 
            is_binding_target = true;
 
            bound_variable_id
 
        };
 

	
 
        let var_expr = &mut ctx.heap[id];
 
        var_expr.declaration = Some(variable_id);
 
        var_expr.used_as_binding_target = is_binding_target;
 
        var_expr.parent = self.expr_parent;
 
        var_expr.unique_id_in_definition = self.next_expr_index;
 
        self.next_expr_index += 1;
 

	
 
        Ok(())
 
    }
 
}
 

	
 
impl PassValidationLinking {
 
    //--------------------------------------------------------------------------
 
    // Special traversal
 
    //--------------------------------------------------------------------------
 

	
 
    /// Pushes a new scope associated with a particular statement. If that
 
    /// statement already has an associated scope (i.e. scope associated with
 
    /// sync statement or select statement's arm) then we won't do anything.
 
    /// In all cases the caller must call `pop_statement_scope` with the scope
 
    /// and relative scope position returned by this function.
 
    fn push_scope(&mut self, ctx: &mut Ctx, is_top_level_scope: bool, pushed_scope_id: ScopeId) -> (ScopeId, i32) {
 
        // Set the properties of the pushed scope (it is already created during
 
        // AST construction, but most values are not yet set to their correct
 
        // values)
 
        let old_scope_id = self.cur_scope;
 

	
 
        let scope = &mut ctx.heap[pushed_scope_id];
 
        if !is_top_level_scope {
 
            scope.parent = Some(old_scope_id);
 
        }
 

	
 
        scope.relative_pos_in_parent = self.relative_pos_in_parent;
 
        let old_relative_pos = self.relative_pos_in_parent;
 
        self.relative_pos_in_parent = 0;
 

	
 
        // Link up scopes
 
        if !is_top_level_scope {
 
            let old_scope = &mut ctx.heap[old_scope_id];
 
            old_scope.nested.push(pushed_scope_id);
 
        }
 

	
 
        // Set as current traversal scope, then return old scope
 
        self.cur_scope = pushed_scope_id;
 
        return (old_scope_id, old_relative_pos)
 
    }
 

	
 
    fn pop_scope(&mut self, scope_to_restore: (ScopeId, i32)) {
 
        self.cur_scope = scope_to_restore.0;
 
        self.relative_pos_in_parent = scope_to_restore.1;
 
    }
 

	
 
    fn resolve_pending_control_flow_targets(&mut self, ctx: &mut Ctx) -> Result<(), ParseError> {
 
        for entry in &self.control_flow_stmts {
 
            let stmt = &ctx.heap[entry.statement];
 

	
 
            match stmt {
 
                Statement::Break(stmt) => {
 
                    let stmt_id = stmt.this;
 
                    let target_while_id = Self::resolve_break_or_continue_target(ctx, entry, stmt.span, &stmt.label)?;
 
                    let target_while_stmt = &ctx.heap[target_while_id];
 
                    let target_end_while_id = target_while_stmt.end_while;
 
                    debug_assert!(!target_end_while_id.is_invalid());
 

	
 
                    let break_stmt = &mut ctx.heap[stmt_id];
 
                    break_stmt.target = target_end_while_id;
 
                },
 
                Statement::Continue(stmt) => {
 
                    let stmt_id = stmt.this;
 
                    let target_while_id = Self::resolve_break_or_continue_target(ctx, entry, stmt.span, &stmt.label)?;
 

	
 
                    let continue_stmt = &mut ctx.heap[stmt_id];
 
                    continue_stmt.target = target_while_id;
 
                },
 
                Statement::Goto(stmt) => {
 
                    let stmt_id = stmt.this;
 
                    let target_id = Self::find_label(entry.in_scope, ctx, &stmt.label)?;
 
                    let target_stmt = &ctx.heap[target_id];
 
                    if entry.in_sync != target_stmt.in_sync {
 
                        // Nested sync not allowed. And goto can only go to
 
                        // outer scopes, so we must be escaping from a sync.
 
                        debug_assert!(target_stmt.in_sync.is_invalid());    // target not in sync
 
                        debug_assert!(!entry.in_sync.is_invalid()); // but the goto is in sync
 
                        let goto_stmt = &ctx.heap[stmt_id];
 
                        let sync_stmt = &ctx.heap[entry.in_sync];
 
                        return Err(
 
                            ParseError::new_error_str_at_span(&ctx.module().source, goto_stmt.span, "goto may not escape the surrounding synchronous block")
 
                            .with_info_str_at_span(&ctx.module().source, target_stmt.label.span, "this is the target of the goto statement")
 
                            .with_info_str_at_span(&ctx.module().source, sync_stmt.span, "which will jump past this statement")
 
                        );
 
                    }
 

	
 
                    let goto_stmt = &mut ctx.heap[stmt_id];
 
                    goto_stmt.target = target_id;
 
                },
 
                _ => unreachable!("cannot resolve control flow target for {:?}", stmt),
 
            }
 
        }
 

	
 
        return Ok(())
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Utilities
 
    //--------------------------------------------------------------------------
 

	
 
    /// Adds a local variable to the current scope. It will also annotate the
 
    /// `Local` in the AST with its relative position in the block.
 
    fn checked_add_local(&mut self, ctx: &mut Ctx, target_scope_id: ScopeId, target_relative_pos: i32, new_variable_id: VariableId) -> Result<(), ParseError> {
 
        let new_variable = &ctx.heap[new_variable_id];
 

	
 
        // We immediately go to the parent scope. We check the target scope
 
        // in the call at the end. That is also where we check for collisions
 
        // with symbols.
 
        let mut scope = &ctx.heap[target_scope_id];
 
        let mut cur_relative_pos = scope.relative_pos_in_parent;
 
        while let Some(scope_parent_id) = scope.parent {
 
            scope = &ctx.heap[scope_parent_id];
 

	
 
            // Check for collisions
 
            for variable_id in scope.variables.iter().copied() {
 
                let existing_variable = &ctx.heap[variable_id];
 
                if existing_variable.identifier == new_variable.identifier &&
 
                    existing_variable.this != new_variable_id &&
 
                    cur_relative_pos >= existing_variable.relative_pos_in_parent {
 
                    return Err(
 
                        ParseError::new_error_str_at_span(
 
                            &ctx.module().source, new_variable.identifier.span, "Local variable name conflicts with another variable"
 
                        ).with_info_str_at_span(
 
                            &ctx.module().source, existing_variable.identifier.span, "Previous variable is found here"
 
                        )
 
                    );
 
                }
 
            }
 

	
 
            cur_relative_pos = scope.relative_pos_in_parent;
 
        }
 

	
 
        // No collisions in any of the parent scope, attempt to add to scope
 
        self.checked_at_single_scope_add_local(ctx, target_scope_id, target_relative_pos, new_variable_id)
 
    }
 

	
 
    /// Adds a local variable to the specified scope. Will check the specified
 
    /// scope for variable conflicts and the symbol table for global conflicts.
 
    /// Will NOT check parent scopes of the specified scope.
 
    fn checked_at_single_scope_add_local(
 
        &mut self, ctx: &mut Ctx, scope_id: ScopeId, relative_pos: i32, new_variable_id: VariableId
 
    ) -> Result<(), ParseError> {
 
        // Check the symbol table for conflicts
 
        {
 
            let cur_scope = SymbolScope::Definition(self.def_type.definition_id());
 
            let ident = &ctx.heap[new_variable_id].identifier;
 
            if let Some(symbol) = ctx.symbols.get_symbol_by_name(cur_scope, &ident.value.as_bytes()) {
 
                return Err(ParseError::new_error_str_at_span(
 
                    &ctx.module().source, ident.span,
 
                    "local variable declaration conflicts with symbol"
 
                ).with_info_str_at_span(
 
                    &ctx.module().source, symbol.variant.span_of_introduction(&ctx.heap), "the conflicting symbol is introduced here"
 
                ));
 
            }
 
        }
 

	
 
        // Check the specified scope for conflicts
 
        let new_variable = &ctx.heap[new_variable_id];
 
        let scope = &ctx.heap[scope_id];
 

	
 
        for variable_id in scope.variables.iter().copied() {
 
            let old_variable = &ctx.heap[variable_id];
 
            if new_variable.this != old_variable.this &&
 
                // relative_pos >= other_local.relative_pos_in_block &&
 
                new_variable.identifier == old_variable.identifier {
 
                // Collision
 
                return Err(
 
                    ParseError::new_error_str_at_span(
 
                        &ctx.module().source, new_variable.identifier.span, "Local variable name conflicts with another variable"
 
                    ).with_info_str_at_span(
 
                        &ctx.module().source, old_variable.identifier.span, "Previous variable is found here"
 
                    )
 
                );
 
            }
 
        }
 

	
 
        // No collisions
 
        let scope = &mut ctx.heap[scope_id];
 
        scope.variables.push(new_variable_id);
 

	
 
        let variable = &mut ctx.heap[new_variable_id];
 
        variable.relative_pos_in_parent = relative_pos;
 

	
 
        Ok(())
 
    }
 

	
 
    /// Finds a variable in the visitor's scope that must appear before the
 
    /// specified relative position within that block.
 
    fn find_variable(&self, ctx: &Ctx, mut relative_pos: i32, identifier: &Identifier) -> Option<VariableId> {
 
        let mut scope_id = self.cur_scope;
 

	
 
        loop {
 
            // Check if we can find the variable in the current scope
 
            let scope = &ctx.heap[scope_id];
 
            
 
            for variable_id in scope.variables.iter().copied() {
 
                let variable = &ctx.heap[variable_id];
 
                
 
                if variable.relative_pos_in_parent < relative_pos && identifier == &variable.identifier {
 
                    return Some(variable_id);
 
                }
 
            }
 

	
 
            // Could not find variable, move to parent scope and try again
 
            if scope.parent.is_none() {
 
                return None;
 
            }
 

	
 
            scope_id = scope.parent.unwrap();
 
            relative_pos = scope.relative_pos_in_parent;
 
        }
 
    }
 

	
 
    /// Adds a particular label to the current scope. Will return an error if
 
    /// there is another label with the same name visible in the current scope.
 
    fn checked_add_label(&mut self, ctx: &mut Ctx, relative_pos: i32, in_sync: SynchronousStatementId, new_label_id: LabeledStatementId) -> Result<(), ParseError> {
 
        // Make sure label is not defined within the current scope or any of the
 
        // parent scope.
 
        let new_label = &mut ctx.heap[new_label_id];
 
        new_label.relative_pos_in_parent = relative_pos;
 
        new_label.in_sync = in_sync;
 

	
 
        let new_label = &ctx.heap[new_label_id];
 
        let mut scope_id = self.cur_scope;
 

	
 
        loop {
 
            let scope = &ctx.heap[scope_id];
 
            for existing_label_id in scope.labels.iter().copied() {
 
                let existing_label = &ctx.heap[existing_label_id];
 
                if existing_label.label == new_label.label {
 
                    // Collision
 
                    return Err(ParseError::new_error_str_at_span(
 
                        &ctx.module().source, new_label.label.span, "label name is used more than once"
 
                    ).with_info_str_at_span(
 
                        &ctx.module().source, existing_label.label.span, "the other label is found here"
 
                    ));
 
                }
 
            }
 

	
 
            if scope.parent.is_none() {
 
                break;
 
            }
 

	
 
            scope_id = scope.parent.unwrap();
 
        }
 

	
 
        // No collisions
 
        let scope = &mut ctx.heap[self.cur_scope];
 
        scope.labels.push(new_label_id);
 

	
 
        Ok(())
 
    }
 

	
 
    /// Finds a particular labeled statement by its identifier. Once found it
 
    /// will make sure that the target label does not skip over any variable
 
    /// declarations within the scope in which the label was found.
 
    fn find_label(mut scope_id: ScopeId, ctx: &Ctx, identifier: &Identifier) -> Result<LabeledStatementId, ParseError> {
 
        loop {
 
            let scope = &ctx.heap[scope_id];
 
            let relative_scope_pos = scope.relative_pos_in_parent;
 

	
 
            for label_id in scope.labels.iter().copied() {
 
                let label = &ctx.heap[label_id];
 
                if label.label == *identifier {
 
                    // Found the target label, now make sure that the jump to
 
                    // the label doesn't imply a skipped variable declaration
 
                    for variable_id in scope.variables.iter().copied() {
 
                        // TODO: Better to do this in control flow analysis, it
 
                        //  is legal to skip over a variable declaration if it
 
                        //  is not actually being used.
 
                        let variable = &ctx.heap[variable_id];
 
                        if variable.relative_pos_in_parent > relative_scope_pos && variable.relative_pos_in_parent < label.relative_pos_in_parent {
 
                            return Err(
 
                                ParseError::new_error_str_at_span(&ctx.module().source, identifier.span, "this target label skips over a variable declaration")
 
                                .with_info_str_at_span(&ctx.module().source, label.label.span, "because it jumps to this label")
 
                                .with_info_str_at_span(&ctx.module().source, variable.identifier.span, "which skips over this variable")
 
                            );
 
                        }
 
                    }
 
                    return Ok(label_id);
 
                }
 
            }
 

	
 
            if scope.parent.is_none() {
 
                return Err(ParseError::new_error_str_at_span(
 
                    &ctx.module().source, identifier.span, "could not find this label"
 
                ));
 
            }
 

	
 
            scope_id = scope.parent.unwrap();
 
        }
 
    }
 

	
 
    /// This function will check if the provided scope has a parent that belongs
 
    /// to a while statement.
 
    fn scope_is_nested_in_while_statement(mut scope_id: ScopeId, ctx: &Ctx, expected_while_id: WhileStatementId) -> bool {
 
        let while_stmt = &ctx.heap[expected_while_id];
 

	
 
        loop {
 
            let scope = &ctx.heap[scope_id];
 
            if scope.this == while_stmt.scope {
 
                return true;
 
            }
 

	
 
            match scope.parent {
 
                Some(new_scope_id) => scope_id = new_scope_id,
 
                None => return false, // walked all the way up, not encountering the while statement
 
            }
 
        }
 
    }
 

	
 
    /// This function should be called while dealing with break/continue
 
    /// statements. It will try to find the targeted while statement, using the
 
    /// target label if provided. If a valid target is found then the loop's
 
    /// ID will be returned, otherwise a parsing error is constructed.
 
    /// The provided input position should be the position of the break/continue
 
    /// statement.
 
    fn resolve_break_or_continue_target(ctx: &Ctx, control_flow: &ControlFlowStatement, span: InputSpan, label: &Option<Identifier>) -> Result<WhileStatementId, ParseError> {
 
        let target = match label {
 
            Some(label) => {
 
                let target_id = Self::find_label(control_flow.in_scope, ctx, label)?;
 

	

Changeset was too big and was cut off... Show full diff anyway

0 comments (0 inline, 0 general)