Changeset - 8294209da734
[Not reviewed]
0 6 0
MH - 4 years ago 2021-05-21 15:32:11
contact@maxhenger.nl
Add test for struct field initialization/modification
6 files changed with 116 insertions and 33 deletions:
0 comments (0 inline, 0 general)
src/collections/scoped_buffer.rs
Show inline comments
 
/// scoped_buffer.rs
 
///
 
/// Solves the common pattern where we are performing some kind of recursive
 
/// pattern while using a temporary buffer. At the start, or during the
 
/// procedure, we push stuff into the buffer. At the end we take out what we
 
/// have put in.
 
///
 
/// It is unsafe because we're using pointers to circumvent borrowing rules in
 
/// the name of code cleanliness. The correctness of use is checked in debug
 
/// mode.
 

	
 
use std::iter::FromIterator;
 

	
 
pub(crate) struct ScopedBuffer<T: Sized> {
 
    pub inner: Vec<T>,
 
}
 

	
 
/// A section of the buffer. Keeps track of where we started the section. When
 
/// done with the section one must call `into_vec` or `forget` to remove the
 
/// section from the underlying buffer. This will also be done upon dropping the
 
/// ScopedSection in case errors are being handled.
 
pub(crate) struct ScopedSection<T: Sized> {
 
    inner: *mut Vec<T>,
 
    start_size: u32,
 
    #[cfg(debug_assertions)] cur_size: u32,
 
}
 

	
 
impl<T: Sized> ScopedBuffer<T> {
 
    pub(crate) fn new_reserved(capacity: usize) -> Self {
 
        Self { inner: Vec::with_capacity(capacity) }
 
    }
 

	
 
    pub(crate) fn start_section(&mut self) -> ScopedSection<T> {
 
        let start_size = self.inner.len() as u32;
 
        ScopedSection {
 
            inner: &mut self.inner,
 
            start_size,
 
            cur_size: start_size
 
            #[cfg(debug_assertions)] cur_size: start_size
 
        }
 
    }
 
}
 

	
 
impl<T: Clone> ScopedBuffer<T> {
 
    pub(crate) fn start_section_initialized(&mut self, initialize_with: &[T]) -> ScopedSection<T> {
 
        let start_size = self.inner.len() as u32;
 
        let data_size = initialize_with.len() as u32;
 
        self.inner.extend_from_slice(initialize_with);
 
        ScopedSection{
 
            inner: &mut self.inner,
 
            start_size,
 
            cur_size: start_size + data_size,
 
            #[cfg(debug_assertions)] cur_size: start_size + data_size,
 
        }
 
    }
 
}
 

	
 
#[cfg(debug_assertions)]
 
impl<T: Sized> Drop for ScopedBuffer<T> {
 
    fn drop(&mut self) {
 
        // Make sure that everyone cleaned up the buffer neatly
 
        debug_assert!(self.inner.is_empty(), "dropped non-empty scoped buffer");
 
    }
 
}
 

	
 
impl<T: Sized> ScopedSection<T> {
 
    #[inline]
 
    pub(crate) fn push(&mut self, value: T) {
 
        let vec = unsafe{&mut *self.inner};
 
        debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to push onto section, but size is larger than expected");
 
        #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to push onto section, but size is larger than expected");
 
        vec.push(value);
 
        if cfg!(debug_assertions) { self.cur_size += 1; }
 
        #[cfg(debug_assertions)] { self.cur_size += 1; }
 
    }
 

	
 
    pub(crate) fn len(&self) -> usize {
 
        let vec = unsafe{&mut *self.inner};
 
        debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to get section length, but size is larger than expected");
 
        #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to get section length, but size is larger than expected");
 
        return vec.len() - self.start_size as usize;
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn forget(mut self) {
 
        let vec = unsafe{&mut *self.inner};
 
        if cfg!(debug_assertions) {
 
        #[cfg(debug_assertions)] {
 
            debug_assert_eq!(
 
                vec.len(), self.cur_size as usize,
 
                "trying to forget section, but size is larger than expected"
 
            );
 
            self.cur_size = self.start_size;
 
        }
 
        vec.truncate(self.start_size as usize);
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn into_vec(mut self) -> Vec<T> {
 
        let vec = unsafe{&mut *self.inner};
 
        if cfg!(debug_assertions) {
 
        #[cfg(debug_assertions)]  {
 
            debug_assert_eq!(
 
                vec.len(), self.cur_size as usize,
 
                "trying to turn section into vec, but size is larger than expected"
 
            );
 
            self.cur_size = self.start_size;
 
        }
 
        let section = Vec::from_iter(vec.drain(self.start_size as usize..));
 
        section
 
    }
 
}
 

	
 
impl<T: Sized> std::ops::Index<usize> for ScopedSection<T> {
 
    type Output = T;
 

	
 
    fn index(&self, idx: usize) -> &Self::Output {
 
        let vec = unsafe{&*self.inner};
 
        return &vec[self.start_size as usize + idx]
 
    }
 
}
 

	
 
#[cfg(debug_assertions)]
 
impl<T: Sized> Drop for ScopedSection<T> {
 
    fn drop(&mut self) {
 
        let vec = unsafe{&mut *self.inner};
 
        debug_assert_eq!(vec.len(), self.cur_size as usize);
 
        #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize);
 
        vec.truncate(self.start_size as usize);
 
    }
 
}
 
\ No newline at end of file
src/protocol/eval/executor.rs
Show inline comments
 

	
 
use std::collections::VecDeque;
 

	
 
use super::value::*;
 
use super::store::*;
 
use super::error::*;
 
use crate::protocol::*;
 
use crate::protocol::ast::*;
 
use crate::protocol::type_table::*;
 

	
 
macro_rules! debug_enabled { () => { true }; }
 
macro_rules! debug_enabled { () => { false }; }
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(true, "exec", $format);
 
        enabled_debug_print!(false, "exec", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(true, "exec", $format, $($args),*);
 
        enabled_debug_print!(false, "exec", $format, $($args),*);
 
    };
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) enum ExprInstruction {
 
    EvalExpr(ExpressionId),
 
    PushValToFront,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct Frame {
 
    pub(crate) definition: DefinitionId,
 
    pub(crate) monomorph_idx: i32,
 
    pub(crate) position: StatementId,
 
    pub(crate) expr_stack: VecDeque<ExprInstruction>, // hack for expression evaluation, evaluated by popping from back
 
    pub(crate) expr_values: VecDeque<Value>, // hack for expression results, evaluated by popping from front/back
 
}
 

	
 
impl Frame {
 
    /// Creates a new execution frame. Does not modify the stack in any way.
 
    pub fn new(heap: &Heap, definition_id: DefinitionId, monomorph_idx: i32) -> Self {
 
        let definition = &heap[definition_id];
 
        let first_statement = match definition {
 
            Definition::Component(definition) => definition.body,
 
            Definition::Function(definition) => definition.body,
 
            _ => unreachable!("initializing frame with {:?} instead of a function/component", definition),
 
        };
 

	
 
        Frame{
 
            definition: definition_id,
 
            monomorph_idx,
 
            position: first_statement.upcast(),
 
            expr_stack: VecDeque::with_capacity(128),
 
            expr_values: VecDeque::with_capacity(128),
 
        }
 
    }
 

	
 
    /// Prepares a single expression for execution. This involves walking the
 
    /// expression tree and putting them in the `expr_stack` such that
 
    /// continuously popping from its back will evaluate the expression. The
 
    /// results of each expression will be stored by pushing onto `expr_values`.
 
    pub fn prepare_single_expression(&mut self, heap: &Heap, expr_id: ExpressionId) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear(); // May not be empty if last expression result(s) were discarded
 

	
 
        self.serialize_expression(heap, expr_id);
 
    }
 

	
 
    /// Prepares multiple expressions for execution (i.e. evaluating all
 
    /// function arguments or all elements of an array/union literal). Per
 
    /// expression this works the same as `prepare_single_expression`. However
 
    /// after each expression is evaluated we insert a `PushValToFront`
 
    /// instruction
 
    pub fn prepare_multiple_expressions(&mut self, heap: &Heap, expr_ids: &[ExpressionId]) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear();
 

	
 
        for expr_id in expr_ids {
 
            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
            self.serialize_expression(heap, *expr_id);
 
        }
 
    }
 

	
 
    /// Performs depth-first serialization of expression tree. Let's not care
 
    /// about performance for a temporary runtime implementation
 
    fn serialize_expression(&mut self, heap: &Heap, id: ExpressionId) {
 
        self.expr_stack.push_back(ExprInstruction::EvalExpr(id));
 

	
 
        match &heap[id] {
 
            Expression::Assignment(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Binding(expr) => {
 
                todo!("implement binding expression");
 
            },
 
            Expression::Conditional(expr) => {
 
                self.serialize_expression(heap, expr.test);
 
            },
 
            Expression::Binary(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Unary(expr) => {
 
                self.serialize_expression(heap, expr.expression);
 
            },
 
            Expression::Indexing(expr) => {
 
                self.serialize_expression(heap, expr.index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Slicing(expr) => {
 
                self.serialize_expression(heap, expr.from_index);
 
                self.serialize_expression(heap, expr.to_index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Select(expr) => {
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Literal(expr) => {
 
                // Here we only care about literals that have subexpressions
 
                match &expr.value {
 
                    Literal::Null | Literal::True | Literal::False |
 
                    Literal::Character(_) | Literal::String(_) |
 
                    Literal::Integer(_) | Literal::Enum(_) => {
 
                        // No subexpressions
 
                    },
 
                    Literal::Struct(literal) => {
 
                        for field in &literal.fields {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, field.value);
 
                        // Note: fields expressions are evaluated in programmer-
 
                        // specified order. But struct construction expects them
 
                        // in type-defined order. I might want to come back to
 
                        // this.
 
                        let mut _num_pushed = 0;
 
                        for want_field_idx in 0..literal.fields.len() {
 
                            for field in &literal.fields {
 
                                if field.field_idx == want_field_idx {
 
                                    _num_pushed += 1;
 
                                    self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                                    self.serialize_expression(heap, field.value);
 
                                }
 
                            }
 
                        }
 
                        debug_assert_eq!(_num_pushed, literal.fields.len())
 
                    },
 
                    Literal::Union(literal) => {
 
                        for value_expr_id in &literal.values {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    },
 
                    Literal::Array(value_expr_ids) => {
 
                        for value_expr_id in value_expr_ids {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    }
 
                }
 
            },
 
            Expression::Call(expr) => {
 
                for arg_expr_id in &expr.arguments {
 
                    self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                    self.serialize_expression(heap, *arg_expr_id);
 
                }
 
            },
 
            Expression::Variable(expr) => {
 
                // No subexpressions
 
            }
 
        }
 
    }
 
}
 

	
 
type EvalResult = Result<EvalContinuation, EvalError>;
 

	
 
pub enum EvalContinuation {
 
    Stepping,
 
    Inconsistent,
 
    Terminal,
 
    SyncBlockStart,
 
    SyncBlockEnd,
 
    NewComponent(DefinitionId, i32, ValueGroup),
 
    BlockFires(Value),
 
    BlockGet(Value),
 
    Put(Value, Value),
 
}
 

	
 
// Note: cloning is fine, methinks. cloning all values and the heap regions then
 
// we end up with valid "pointers" to heap regions.
 
#[derive(Debug, Clone)]
 
pub struct Prompt {
 
    pub(crate) frames: Vec<Frame>,
 
    pub(crate) store: Store,
 
}
 

	
 
impl Prompt {
 
    pub fn new(_types: &TypeTable, heap: &Heap, def: DefinitionId, monomorph_idx: i32, args: ValueGroup) -> Self {
 
        let mut prompt = Self{
 
            frames: Vec::new(),
 
            store: Store::new(),
 
        };
 

	
 
        // Maybe do typechecking in the future?
 
        debug_assert!((monomorph_idx as usize) < _types.get_base_definition(&def).unwrap().definition.procedure_monomorphs().len());
 
        prompt.frames.push(Frame::new(heap, def, monomorph_idx));
 
        args.into_store(&mut prompt.store);
 

	
 
        prompt
 
    }
 

	
 
    pub(crate) fn step(&mut self, types: &TypeTable, heap: &Heap, modules: &[Module], ctx: &mut EvalContext) -> EvalResult {
 
        // Helper function to transfer multiple values from the expression value
 
        // array into a heap region (e.g. constructing arrays or structs).
 
        fn transfer_expression_values_front_into_heap(cur_frame: &mut Frame, store: &mut Store, num_values: usize) -> HeapPos {
 
            let heap_pos = store.alloc_heap();
 

	
 
            // Do the transformation first (because Rust...)
 
            for val_idx in 0..num_values {
 
                cur_frame.expr_values[val_idx] = store.read_take_ownership(cur_frame.expr_values[val_idx].clone());
 
            }
 

	
 
            // And now transfer to the heap region
 
            let values = &mut store.heap_regions[heap_pos as usize].values;
 
            debug_assert!(values.is_empty());
 
            values.reserve(num_values);
 
            for _ in 0..num_values {
 
                values.push(cur_frame.expr_values.pop_front().unwrap());
 
            }
 

	
 
            heap_pos
 
        }
 

	
 
        // Helper function to make sure that an index into an aray is valid.
 
        fn array_inclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx >= array_len as i64;
 
        }
 

	
 
        fn array_exclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx > array_len as i64;
 
        }
 

	
 
        fn construct_array_error(prompt: &Prompt, modules: &[Module], heap: &Heap, expr_id: ExpressionId, heap_pos: u32, idx: i64) -> EvalError {
 
            let array_len = prompt.store.heap_regions[heap_pos as usize].values.len();
 
            return EvalError::new_error_at_expr(
 
                prompt, modules, heap, expr_id,
 
                format!("index {} is out of bounds: array length is {}", idx, array_len)
 
            )
 
        }
 

	
 
        // Checking if we're at the end of execution
 
        let cur_frame = self.frames.last_mut().unwrap();
 
        if cur_frame.position.is_invalid() {
 
            if heap[cur_frame.definition].is_function() {
 
                todo!("End of function without return, return an evaluation error");
 
            }
 
            return Ok(EvalContinuation::Terminal);
 
        }
 

	
 
        debug_log!("Taking step in '{}'", heap[cur_frame.definition].identifier().value.as_str());
 

	
 
        // Execute all pending expressions
 
        while !cur_frame.expr_stack.is_empty() {
 
            let next = cur_frame.expr_stack.pop_back().unwrap();
 
            debug_log!("Expr stack: {:?}", next);
 
            match next {
 
                ExprInstruction::PushValToFront => {
 
                    cur_frame.expr_values.rotate_right(1);
 
                },
 
                ExprInstruction::EvalExpr(expr_id) => {
 
                    let expr = &heap[expr_id];
 
                    match expr {
 
                        Expression::Assignment(expr) => {
 
                            let to = cur_frame.expr_values.pop_back().unwrap().as_ref();
 
                            let rhs = cur_frame.expr_values.pop_back().unwrap();
 

	
 
                            // Note: although not pretty, the assignment operator takes ownership
 
                            // of the right-hand side value when possible. So we do not drop the
 
                            // rhs's optionally owned heap data.
 
                            let rhs = self.store.read_take_ownership(rhs);
 
                            apply_assignment_operator(&mut self.store, to, expr.operation, rhs);
 
                        },
 
                        Expression::Binding(_expr) => {
 
                            todo!("Binding expression");
 
                        },
 
                        Expression::Conditional(expr) => {
 
                            // Evaluate testing expression, then extend the
 
                            // expression stack with the appropriate expression
 
                            let test_result = cur_frame.expr_values.pop_back().unwrap().as_bool();
 
                            if test_result {
 
                                cur_frame.serialize_expression(heap, expr.true_expression);
 
                            } else {
 
                                cur_frame.serialize_expression(heap, expr.false_expression);
 
                            }
 
                        },
 
                        Expression::Binary(expr) => {
 
                            let lhs = cur_frame.expr_values.pop_back().unwrap();
 
                            let rhs = cur_frame.expr_values.pop_back().unwrap();
 
                            let result = apply_binary_operator(&mut self.store, &lhs, expr.operation, &rhs);
 
                            cur_frame.expr_values.push_back(result);
 
                            self.store.drop_value(lhs.get_heap_pos());
 
                            self.store.drop_value(rhs.get_heap_pos());
 
                        },
 
                        Expression::Unary(expr) => {
 
                            let val = cur_frame.expr_values.pop_back().unwrap();
 
                            let result = apply_unary_operator(&mut self.store, expr.operation, &val);
 
                            cur_frame.expr_values.push_back(result);
 
                            self.store.drop_value(val.get_heap_pos());
 
                        },
 
                        Expression::Indexing(expr) => {
 
                            // Evaluate index. Never heap allocated so we do
 
                            // not have to drop it.
 
                            let index = cur_frame.expr_values.pop_back().unwrap();
 
                            let index = self.store.maybe_read_ref(&index);
 

	
 
                            debug_assert!(index.is_integer());
 
                            let index = if index.is_signed_integer() {
 
                                index.as_signed_integer() as i64
 
                            } else {
 
                                index.as_unsigned_integer() as i64
 
                            };
 

	
 
                            let subject = cur_frame.expr_values.pop_back().unwrap();
 

	
 
                            let (deallocate_heap_pos, value_to_push, subject_heap_pos) = match subject {
 
                                Value::Ref(value_ref) => {
 
                                    // Our expression stack value is a reference to something that
 
                                    // exists in the normal stack/heap. We don't want to deallocate
 
                                    // this thing. Rather we want to return a reference to it.
 
                                    let subject = self.store.read_ref(value_ref);
 
                                    let subject_heap_pos = subject.as_array();
 

	
 
                                    if array_inclusive_index_is_invalid(&self.store, subject_heap_pos, index) {
 
                                        return Err(construct_array_error(self, modules, heap, expr_id, subject_heap_pos, index));
 
                                    }
 

	
src/protocol/parser/mod.rs
Show inline comments
 
@@ -38,277 +38,277 @@ pub enum ModuleCompilationPhase {
 
    DefinitionsParsed,      // produced the AST for the entire module
 
    TypesAddedToTable,      // added all definitions to the type table
 
    ValidatedAndLinked,     // AST is traversed and has linked the required AST nodes
 
    Typed,                  // Type inference and checking has been performed
 
}
 

	
 
pub struct Module {
 
    // Buffers
 
    pub source: InputSource,
 
    pub tokens: TokenBuffer,
 
    // Identifiers
 
    pub root_id: RootId,
 
    pub name: Option<(PragmaId, StringRef<'static>)>,
 
    pub version: Option<(PragmaId, i64)>,
 
    pub phase: ModuleCompilationPhase,
 
}
 

	
 
pub struct PassCtx<'a> {
 
    heap: &'a mut Heap,
 
    symbols: &'a mut SymbolTable,
 
    pool: &'a mut StringPool,
 
}
 

	
 
pub struct Parser {
 
    pub(crate) heap: Heap,
 
    pub(crate) string_pool: StringPool,
 
    pub(crate) modules: Vec<Module>,
 
    pub(crate) symbol_table: SymbolTable,
 
    pub(crate) type_table: TypeTable,
 
    // Compiler passes
 
    pass_tokenizer: PassTokenizer,
 
    pass_symbols: PassSymbols,
 
    pass_import: PassImport,
 
    pass_definitions: PassDefinitions,
 
    pass_validation: PassValidationLinking,
 
    pass_typing: PassTyping,
 
}
 

	
 
impl Parser {
 
    pub fn new() -> Self {
 
        let mut parser = Parser{
 
            heap: Heap::new(),
 
            string_pool: StringPool::new(),
 
            modules: Vec::new(),
 
            symbol_table: SymbolTable::new(),
 
            type_table: TypeTable::new(),
 
            pass_tokenizer: PassTokenizer::new(),
 
            pass_symbols: PassSymbols::new(),
 
            pass_import: PassImport::new(),
 
            pass_definitions: PassDefinitions::new(),
 
            pass_validation: PassValidationLinking::new(),
 
            pass_typing: PassTyping::new(),
 
        };
 

	
 
        parser.symbol_table.insert_scope(None, SymbolScope::Global);
 

	
 
        fn quick_type(variants: &[ParserTypeVariant]) -> ParserType {
 
            let mut t = ParserType{ elements: Vec::with_capacity(variants.len()) };
 
            for variant in variants {
 
                t.elements.push(ParserTypeElement{ full_span: InputSpan::new(), variant: variant.clone() });
 
            }
 
            t
 
        }
 

	
 
        use ParserTypeVariant as PTV;
 
        insert_builtin_function(&mut parser, "get", &["T"], |id| (
 
            vec![
 
                ("input", quick_type(&[PTV::Input, PTV::PolymorphicArgument(id.upcast(), 0)]))
 
            ],
 
            quick_type(&[PTV::PolymorphicArgument(id.upcast(), 0)])
 
        ));
 
        insert_builtin_function(&mut parser, "put", &["T"], |id| (
 
            vec![
 
                ("output", quick_type(&[PTV::Output, PTV::PolymorphicArgument(id.upcast(), 0)])),
 
                ("value", quick_type(&[PTV::PolymorphicArgument(id.upcast(), 0)])),
 
            ],
 
            quick_type(&[PTV::Void])
 
        ));
 
        insert_builtin_function(&mut parser, "fires", &["T"], |id| (
 
            vec![
 
                ("port", quick_type(&[PTV::InputOrOutput, PTV::PolymorphicArgument(id.upcast(), 0)]))
 
            ],
 
            quick_type(&[PTV::Bool])
 
        ));
 
        insert_builtin_function(&mut parser, "create", &["T"], |id| (
 
            vec![
 
                ("length", quick_type(&[PTV::IntegerLike]))
 
            ],
 
            quick_type(&[PTV::ArrayLike, PTV::PolymorphicArgument(id.upcast(), 0)])
 
        ));
 
        insert_builtin_function(&mut parser, "length", &["T"], |id| (
 
            vec![
 
                ("array", quick_type(&[PTV::ArrayLike, PTV::PolymorphicArgument(id.upcast(), 0)]))
 
            ],
 
            quick_type(&[PTV::IntegerLike])
 
        ));
 
        insert_builtin_function(&mut parser, "assert", &[], |id| (
 
            vec![
 
                ("condition", quick_type(&[PTV::Bool])),
 
            ],
 
            quick_type(&[PTV::Void])
 
        ));
 

	
 
        parser
 
    }
 

	
 
    pub fn feed(&mut self, mut source: InputSource) -> Result<(), ParseError> {
 
        // TODO: @Optimize
 
        let mut token_buffer = TokenBuffer::new();
 
        self.pass_tokenizer.tokenize(&mut source, &mut token_buffer)?;
 

	
 
        let module = Module{
 
            source,
 
            tokens: token_buffer,
 
            root_id: RootId::new_invalid(),
 
            name: None,
 
            version: None,
 
            phase: ModuleCompilationPhase::Tokenized,
 
        };
 
        self.modules.push(module);
 

	
 
        Ok(())
 
    }
 

	
 
    pub fn parse(&mut self) -> Result<(), ParseError> {
 
        let mut pass_ctx = PassCtx{
 
            heap: &mut self.heap,
 
            symbols: &mut self.symbol_table,
 
            pool: &mut self.string_pool,
 
        };
 

	
 
        // Advance all modules to the phase where all symbols are scanned
 
        for module_idx in 0..self.modules.len() {
 
            self.pass_symbols.parse(&mut self.modules, module_idx, &mut pass_ctx)?;
 
        }
 

	
 
        // With all symbols scanned, perform further compilation until we can
 
        // add all base types to the type table.
 
        for module_idx in 0..self.modules.len() {
 
            self.pass_import.parse(&mut self.modules, module_idx, &mut pass_ctx)?;
 
            self.pass_definitions.parse(&mut self.modules, module_idx, &mut pass_ctx)?;
 
        }
 

	
 
        // Add every known type to the type table
 
        self.type_table.build_base_types(&mut self.modules, &mut pass_ctx)?;
 

	
 
        // Continue compilation with the remaining phases now that the types
 
        // are all in the type table
 
        for module_idx in 0..self.modules.len() {
 
            // TODO: Remove the entire Visitor abstraction. It really doesn't
 
            //  make sense considering the amount of special handling we do
 
            //  in each pass.
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                module: &mut self.modules[module_idx],
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
            };
 
            self.pass_validation.visit_module(&mut ctx)?;
 
        }
 

	
 
        // Perform typechecking on all modules
 
        let mut queue = ResolveQueue::new();
 
        for module in &mut self.modules {
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                module,
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
            };
 
            PassTyping::queue_module_definitions(&mut ctx, &mut queue);
 
        };
 
        while !queue.is_empty() {
 
            let top = queue.pop().unwrap();
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                module: &mut self.modules[top.root_id.index as usize],
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
            };
 
            self.pass_typing.handle_module_definition(&mut ctx, &mut queue, top)?;
 
        }
 

	
 
        // Perform remaining steps
 
        // TODO: Phase out at some point
 
        for module in &self.modules {
 
            let root_id = module.root_id;
 
            if let Err((position, message)) = Self::parse_inner(&mut self.heap, root_id) {
 
                return Err(ParseError::new_error_str_at_pos(&self.modules[0].source, position, &message))
 
            }
 
        }
 

	
 
        let mut writer = ASTWriter::new();
 
        let mut file = std::fs::File::create(std::path::Path::new("ast.txt")).unwrap();
 
        writer.write_ast(&mut file, &self.heap);
 
        // let mut writer = ASTWriter::new();
 
        // let mut file = std::fs::File::create(std::path::Path::new("ast.txt")).unwrap();
 
        // writer.write_ast(&mut file, &self.heap);
 

	
 
        Ok(())
 
    }
 

	
 
    pub fn parse_inner(h: &mut Heap, pd: RootId) -> VisitorResult {
 
        // TODO: @cleanup, slowly phasing out old compiler
 
        // NestedSynchronousStatements::new().visit_protocol_description(h, pd)?;
 
        // ChannelStatementOccurrences::new().visit_protocol_description(h, pd)?;
 
        // FunctionStatementReturns::new().visit_protocol_description(h, pd)?;
 
        // ComponentStatementReturnNew::new().visit_protocol_description(h, pd)?;
 
        // CheckBuiltinOccurrences::new().visit_protocol_description(h, pd)?;
 
        // BuildSymbolDeclarations::new().visit_protocol_description(h, pd)?;
 
        // LinkCallExpressions::new().visit_protocol_description(h, pd)?;
 
        // BuildScope::new().visit_protocol_description(h, pd)?;
 
        // ResolveVariables::new().visit_protocol_description(h, pd)?;
 
        LinkStatements::new().visit_protocol_description(h, pd)?;
 
        // BuildLabels::new().visit_protocol_description(h, pd)?;
 
        // ResolveLabels::new().visit_protocol_description(h, pd)?;
 
        // AssignableExpressions::new().visit_protocol_description(h, pd)?;
 
        // IndexableExpressions::new().visit_protocol_description(h, pd)?;
 
        // SelectableExpressions::new().visit_protocol_description(h, pd)?;
 

	
 
        Ok(())
 
    }
 
}
 

	
 
// Note: args and return type need to be a function because we need to know the function ID.
 
fn insert_builtin_function<T: Fn(FunctionDefinitionId) -> (Vec<(&'static str, ParserType)>, ParserType)> (
 
    p: &mut Parser, func_name: &str, polymorphic: &[&str], arg_and_return_fn: T) {
 

	
 
    let mut poly_vars = Vec::with_capacity(polymorphic.len());
 
    for poly_var in polymorphic {
 
        poly_vars.push(Identifier{ span: InputSpan::new(), value: p.string_pool.intern(poly_var.as_bytes()) });
 
    }
 

	
 
    let func_ident_ref = p.string_pool.intern(func_name.as_bytes());
 
    let func_id = p.heap.alloc_function_definition(|this| FunctionDefinition{
 
        this,
 
        defined_in: RootId::new_invalid(),
 
        builtin: true,
 
        span: InputSpan::new(),
 
        identifier: Identifier{ span: InputSpan::new(), value: func_ident_ref.clone() },
 
        poly_vars,
 
        return_types: Vec::new(),
 
        parameters: Vec::new(),
 
        body: BlockStatementId::new_invalid(),
 
        num_expressions_in_body: -1,
 
    });
 

	
 
    let (args, ret) = arg_and_return_fn(func_id);
 

	
 
    let mut parameters = Vec::with_capacity(args.len());
 
    for (arg_name, arg_type) in args {
 
        let identifier = Identifier{ span: InputSpan::new(), value: p.string_pool.intern(arg_name.as_bytes()) };
 
        let param_id = p.heap.alloc_variable(|this| Variable{
 
            this,
 
            kind: VariableKind::Parameter,
 
            parser_type: arg_type.clone(),
 
            identifier,
 
            relative_pos_in_block: 0,
 
            unique_id_in_scope: 0
 
        });
 
        parameters.push(param_id);
 
    }
 

	
 
    let func = &mut p.heap[func_id];
 
    func.parameters = parameters;
 
    func.return_types.push(ret);
 

	
 
    p.symbol_table.insert_symbol(SymbolScope::Global, Symbol{
 
        name: func_ident_ref,
 
        variant: SymbolVariant::Definition(SymbolDefinition{
 
            defined_in_module: RootId::new_invalid(),
 
            defined_in_scope: SymbolScope::Global,
 
            definition_span: InputSpan::new(),
 
            identifier_span: InputSpan::new(),
 
            imported_at: None,
 
            class: DefinitionClass::Function,
 
            definition_id: func_id.upcast(),
 
        })
 
    }).unwrap();
 
}
 
\ No newline at end of file
src/protocol/parser/pass_typing.rs
Show inline comments
 
/// pass_typing
 
///
 
/// Performs type inference and type checking. Type inference is implemented by
 
/// applying constraints on (sub)trees of types. During this process the
 
/// resolver takes the `ParserType` structs (the representation of the types
 
/// written by the programmer), converts them to `InferenceType` structs (the
 
/// temporary data structure used during type inference) and attempts to arrive
 
/// at `ConcreteType` structs (the representation of a fully checked and
 
/// validated type).
 
///
 
/// The resolver will visit every statement and expression relevant to the
 
/// procedure and insert and determine its initial type based on context (e.g. a
 
/// return statement's expression must match the function's return type, an
 
/// if statement's test expression must evaluate to a boolean). When all are
 
/// visited we attempt to make progress in evaluating the types. Whenever a type
 
/// is progressed we queue the related expressions for further type progression.
 
/// Once no more expressions are in the queue the algorithm is finished. At this
 
/// point either all types are inferred (or can be trivially implicitly
 
/// determined), or we have incomplete types. In the latter casee we return an
 
/// error.
 
///
 
/// Inference may be applied on non-polymorphic procedures and on polymorphic
 
/// procedures. When dealing with a non-polymorphic procedure we apply the type
 
/// resolver and annotate the AST with the `ConcreteType`s. When dealing with
 
/// polymorphic procedures we will only annotate the AST once, preserving
 
/// references to polymorphic variables. Any later pass will perform just the
 
/// type checking.
 
///
 
/// TODO: Needs a thorough rewrite:
 
///  0. polymorph_progress is intentionally broken at the moment.
 
///  1. For polymorphic type inference we need to have an extra datastructure
 
///     for progressing the polymorphic variables and mapping them back to each
 
///     signature type that uses that polymorphic type. The two types of markers
 
///     became somewhat of a mess.
 
///  2. We're doing a lot of extra work. It seems better to apply the initial
 
///     type based on expression parents, then to apply forced constraints (arg
 
///     to a fires() call must be port-like), only then to start progressing the
 
///     types.
 
///     Furthermore, queueing of expressions can be more intelligent, currently
 
///     every child/parent of an expression is inferred again when queued. Hence
 
///     we need to queue only specific children/parents of expressions.
 
///  3. Remove the `msg` type?
 
///  4. Disallow certain types in certain operations (e.g. `Void`).
 
///  5. Implement implicit and explicit casting.
 
///  6. Investigate different ways of performing the type-on-type inference,
 
///     maybe there is a better way then flattened trees + markers?
 

	
 
macro_rules! debug_log_enabled {
 
    () => { true };
 
    () => { false };
 
}
 

	
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(true, "types", $format);
 
        enabled_debug_print!(false, "types", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(true, "types", $format, $($args),*);
 
        enabled_debug_print!(false, "types", $format, $($args),*);
 
    };
 
}
 

	
 
use std::collections::{HashMap, HashSet};
 

	
 
use crate::collections::DequeSet;
 
use crate::protocol::ast::*;
 
use crate::protocol::input_source::ParseError;
 
use crate::protocol::parser::ModuleCompilationPhase;
 
use crate::protocol::parser::type_table::*;
 
use crate::protocol::parser::token_parsing::*;
 
use super::visitor::{
 
    STMT_BUFFER_INIT_CAPACITY,
 
    EXPR_BUFFER_INIT_CAPACITY,
 
    Ctx,
 
    Visitor2,
 
    VisitorResult
 
};
 

	
 
const VOID_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Void ];
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Message, InferenceTypePart::UInt8 ];
 
const BOOL_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Bool ];
 
const CHARACTER_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Character ];
 
const STRING_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::String ];
 
const NUMBERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::NumberLike ];
 
const INTEGERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::IntegerLike ];
 
const ARRAY_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Array, InferenceTypePart::Unknown ];
 
const SLICE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Slice, InferenceTypePart::Unknown ];
 
const ARRAYLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::ArrayLike, InferenceTypePart::Unknown ];
 

	
 
/// TODO: @performance Turn into PartialOrd+Ord to simplify checks
 
#[derive(Debug, Clone, Eq, PartialEq)]
 
pub(crate) enum InferenceTypePart {
 
    // When we infer types of AST elements that support polymorphic arguments,
 
    // then we might have the case that multiple embedded types depend on the
 
    // polymorphic type (e.g. func bla(T a, T[] b) -> T[][]). If we can infer
 
    // the type in one place (e.g. argument a), then we may propagate this
 
    // information to other types (e.g. argument b and the return type). For
 
    // this reason we place markers in the `InferenceType` instances such that
 
    // we know which part of the type was originally a polymorphic argument.
 
    Marker(u32),
 
    // Completely unknown type, needs to be inferred
 
    Unknown,
 
    // Partially known type, may be inferred to to be the appropriate related 
 
    // type.
 
    // IndexLike,      // index into array/slice
 
    NumberLike,     // any kind of integer/float
 
    IntegerLike,    // any kind of integer
 
    ArrayLike,      // array or slice. Note that this must have a subtype
 
    PortLike,       // input or output port
 
    // Special types that cannot be instantiated by the user
 
    Void, // For builtin functions that do not return anything
 
    // Concrete types without subtypes
 
    Bool,
 
    UInt8,
 
    UInt16,
 
    UInt32,
 
    UInt64,
 
    SInt8,
 
    SInt16,
 
    SInt32,
 
    SInt64,
 
    Character,
 
    String,
 
    // One subtype
 
    Message,
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // A user-defined type with any number of subtypes
 
    Instance(DefinitionId, u32)
 
}
 

	
 
impl InferenceTypePart {
 
    fn is_marker(&self) -> bool {
 
        match self {
 
            InferenceTypePart::Marker(_) => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if the type is concrete, markers are interpreted as concrete
 
    /// types.
 
    fn is_concrete(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | 
 
            ITP::ArrayLike | ITP::PortLike => false,
 
            _ => true
 
        }
 
    }
 

	
 
    fn is_concrete_number(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_integer(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_msg_array_or_slice(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Array | ITP::Slice | ITP::Message => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_port(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Input | ITP::Output => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if a part is less specific than the argument. Only checks for 
 
    /// single-part inference (i.e. not the replacement of an `Unknown` variant 
 
    /// with the argument)
 
    fn may_be_inferred_from(&self, arg: &InferenceTypePart) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        (*self == ITP::IntegerLike && arg.is_concrete_integer()) ||
 
        (*self == ITP::NumberLike && (arg.is_concrete_number() || *arg == ITP::IntegerLike)) ||
 
        (*self == ITP::ArrayLike && arg.is_concrete_msg_array_or_slice()) ||
 
        (*self == ITP::PortLike && arg.is_concrete_port())
 
    }
 

	
 
    /// Returns the change in "iteration depth" when traversing this particular
 
    /// part. The iteration depth is used to traverse the tree in a linear 
 
    /// fashion. It is basically `number_of_subtypes - 1`
 
    fn depth_change(&self) -> i32 {
 
        use InferenceTypePart as ITP;
 
        match &self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike |
 
            ITP::Void | ITP::Bool |
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 |
 
            ITP::Character | ITP::String => {
 
                -1
 
            },
 
            ITP::Marker(_) |
 
            ITP::ArrayLike | ITP::Message | ITP::Array | ITP::Slice |
 
            ITP::PortLike | ITP::Input | ITP::Output => {
 
                // One subtype, so do not modify depth
 
                0
 
            },
 
            ITP::Instance(_, num_args) => {
 
                (*num_args as i32) - 1
 
            }
 
        }
 
    }
 
}
 

	
 
impl From<ConcreteTypePart> for InferenceTypePart {
 
    fn from(v: ConcreteTypePart) -> InferenceTypePart {
 
        use ConcreteTypePart as CTP;
 
        use InferenceTypePart as ITP;
 

	
 
        match v {
 
            CTP::Void => ITP::Void,
 
            CTP::Message => ITP::Message,
 
            CTP::Bool => ITP::Bool,
 
            CTP::UInt8 => ITP::UInt8,
 
            CTP::UInt16 => ITP::UInt16,
 
            CTP::UInt32 => ITP::UInt32,
 
            CTP::UInt64 => ITP::UInt64,
 
            CTP::SInt8 => ITP::SInt8,
 
            CTP::SInt16 => ITP::SInt16,
 
            CTP::SInt32 => ITP::SInt32,
 
            CTP::SInt64 => ITP::SInt64,
 
            CTP::Character => ITP::Character,
 
            CTP::String => ITP::String,
 
            CTP::Array => ITP::Array,
 
            CTP::Slice => ITP::Slice,
 
            CTP::Input => ITP::Input,
 
            CTP::Output => ITP::Output,
 
            CTP::Instance(id, num) => ITP::Instance(id, num),
 
        }
 
    }
 
}
 
@@ -1298,385 +1298,385 @@ impl Visitor2 for PassTyping {
 
            Literal::Union(literal) => {
 
                // May carry subexpressions and polymorphic arguments
 
                // TODO: @performance
 
                let expr_ids = literal.values.clone();
 
                self.insert_initial_union_polymorph_data(ctx, id);
 

	
 
                for expr_id in expr_ids {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
            },
 
            Literal::Array(expressions) => {
 
                // TODO: @performance
 
                let expr_ids = expressions.clone();
 
                for expr_id in expr_ids {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
            }
 
        }
 

	
 
        self.progress_literal_expr(ctx, id)
 
    }
 

	
 
    fn visit_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 
        self.insert_initial_call_polymorph_data(ctx, id);
 

	
 
        // By default we set the polymorph idx for calls to 0. If the call ends
 
        // up not being a polymorphic one, then we will select the default
 
        // expression types in the type table
 
        let call_expr = &ctx.heap[id];
 
        self.expr_types[call_expr.unique_id_in_definition as usize].field_or_monomorph_idx = 0;
 

	
 
        // Visit all arguments
 
        for arg_expr_id in call_expr.arguments.clone() {
 
            self.visit_expr(ctx, arg_expr_id)?;
 
        }
 

	
 
        self.progress_call_expr(ctx, id)
 
    }
 

	
 
    fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let var_expr = &ctx.heap[id];
 
        debug_assert!(var_expr.declaration.is_some());
 
        let var_data = self.var_types.get_mut(var_expr.declaration.as_ref().unwrap()).unwrap();
 
        var_data.used_at.push(upcast_id);
 

	
 
        self.progress_variable_expr(ctx, id)
 
    }
 
}
 

	
 
impl PassTyping {
 
    fn temp_get_display_name(&self, ctx: &Ctx, expr_id: ExpressionId) -> String {
 
        let expr_idx = ctx.heap[expr_id].get_unique_id_in_definition();
 
        let expr_type = &self.expr_types[expr_idx as usize].expr_type;
 
        expr_type.display_name(&ctx.heap)
 
    }
 

	
 
    fn resolve_types(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) -> Result<(), ParseError> {
 
        // Keep inferring until we can no longer make any progress
 
        while !self.expr_queued.is_empty() {
 
            let next_expr_idx = self.expr_queued.pop_front().unwrap();
 
            self.progress_expr(ctx, next_expr_idx)?;
 
        }
 

	
 
        // Helper for transferring polymorphic variables to concrete types and
 
        // checking if they're completely specified
 
        fn poly_inference_to_concrete_type(
 
            ctx: &Ctx, expr_id: ExpressionId, inference: &Vec<InferenceType>
 
        ) -> Result<Vec<ConcreteType>, ParseError> {
 
            let mut concrete = Vec::with_capacity(inference.len());
 
            for (poly_idx, poly_type) in inference.iter().enumerate() {
 
                if !poly_type.is_done {
 
                    let expr = &ctx.heap[expr_id];
 
                    let definition = match expr {
 
                        Expression::Call(expr) => expr.definition,
 
                        Expression::Literal(expr) => match &expr.value {
 
                            Literal::Enum(lit) => lit.definition,
 
                            Literal::Union(lit) => lit.definition,
 
                            Literal::Struct(lit) => lit.definition,
 
                            _ => unreachable!()
 
                        },
 
                        _ => unreachable!(),
 
                    };
 
                    let poly_vars = ctx.heap[definition].poly_vars();
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module.source, expr.span(), format!(
 
                            "could not fully infer the type of polymorphic variable '{}' of this expression (got '{}')",
 
                            poly_vars[poly_idx].value.as_str(), poly_type.display_name(&ctx.heap)
 
                        )
 
                    ));
 
                }
 

	
 
                let mut concrete_type = ConcreteType::default();
 
                poly_type.write_concrete_type(&mut concrete_type);
 
                concrete.push(concrete_type);
 
            }
 

	
 
            Ok(concrete)
 
        }
 

	
 
        // Inference is now done. But we may still have uninferred types. So we
 
        // check for these.
 
        for infer_expr in self.expr_types.iter_mut() {
 
            let expr_type = &mut infer_expr.expr_type;
 
            if !expr_type.is_done {
 
                // Auto-infer numberlike/integerlike types to a regular int
 
                if expr_type.parts.len() == 1 && expr_type.parts[0] == InferenceTypePart::IntegerLike {
 
                    expr_type.parts[0] = InferenceTypePart::SInt32;
 
                } else {
 
                    let expr = &ctx.heap[infer_expr.expr_id];
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module.source, expr.span(), format!(
 
                            "could not fully infer the type of this expression (got '{}')",
 
                            expr_type.display_name(&ctx.heap)
 
                        )
 
                    ));
 
                }
 
            }
 

	
 
            // Expression is fine, check if any extra data is attached
 
            if infer_expr.extra_data_idx < 0 { continue; }
 

	
 
            // Extra data is attached, perform typechecking and transfer
 
            // resolved information to the expression
 
            let extra_data = &self.extra_data[infer_expr.extra_data_idx as usize];
 
            if extra_data.poly_vars.is_empty() { continue; }
 

	
 
            // Note that only call and literal expressions need full inference.
 
            // Select expressions also use `extra_data`, but only for temporary
 
            // storage of the struct type whose field it is selecting.
 
            match &ctx.heap[extra_data.expr_id] {
 
                Expression::Call(expr) => {
 
                    if expr.method != Method::UserFunction && expr.method != Method::UserComponent {
 
                        // Builtin function
 
                        continue;
 
                    }
 

	
 
                    let definition_id = expr.definition;
 
                    let poly_types = poly_inference_to_concrete_type(ctx, extra_data.expr_id, &extra_data.poly_vars)?;
 

	
 
                    match ctx.types.get_procedure_monomorph_index(&definition_id, &poly_types) {
 
                        Some(reserved_idx) => {
 
                            // Already typechecked, or already put into the resolve queue
 
                            infer_expr.field_or_monomorph_idx = reserved_idx;
 
                        },
 
                        None => {
 
                            // Not typechecked yet, so add an entry in the queue
 
                            let reserved_idx = ctx.types.reserve_procedure_monomorph_index(&definition_id, Some(poly_types.clone()));
 
                            infer_expr.field_or_monomorph_idx = reserved_idx;
 
                            queue.push(ResolveQueueElement{
 
                                root_id: ctx.heap[definition_id].defined_in(),
 
                                definition_id,
 
                                monomorph_types: poly_types,
 
                                reserved_monomorph_idx: reserved_idx,
 
                            });
 
                        }
 
                    }
 
                },
 
                Expression::Literal(expr) => {
 
                    let definition_id = match &expr.value {
 
                        Literal::Enum(lit) => lit.definition,
 
                        Literal::Union(lit) => lit.definition,
 
                        Literal::Struct(lit) => lit.definition,
 
                        _ => unreachable!(),
 
                    };
 

	
 
                    let poly_types = poly_inference_to_concrete_type(ctx, extra_data.expr_id, &extra_data.poly_vars)?;
 
                    let mono_index = ctx.types.add_data_monomorph(&definition_id, poly_types);
 
                    infer_expr.field_or_monomorph_idx = mono_index;
 
                },
 
                Expression::Select(_) => {
 
                    debug_assert!(infer_expr.field_or_monomorph_idx >= 0);
 
                },
 
                _ => {
 
                    unreachable!("handling extra data for expression {:?}", &ctx.heap[extra_data.expr_id]);
 
                }
 
            }
 
        }
 

	
 
        // Every expression checked, and new monomorphs are queued. Transfer the
 
        // expression information to the type table.
 
        let definition_id = match &self.definition_type {
 
            DefinitionType::Component(id) => id.upcast(),
 
            DefinitionType::Function(id) => id.upcast(),
 
        };
 

	
 
        let target = ctx.types.get_procedure_expression_data_mut(&definition_id, self.reserved_idx);
 
        debug_assert!(target.poly_args == self.poly_vars);
 
        debug_assert!(target.expr_data.is_empty());
 
        debug_assert!(target.expr_data.is_empty()); // makes sure we never queue something twice
 

	
 
        target.expr_data.reserve(self.expr_types.len());
 
        for infer_expr in self.expr_types.iter() {
 
            let mut concrete = ConcreteType::default();
 
            infer_expr.expr_type.write_concrete_type(&mut concrete);
 
            target.expr_data.push(MonomorphExpression{
 
                expr_type: concrete,
 
                field_or_monomorph_idx: infer_expr.field_or_monomorph_idx
 
            });
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_expr(&mut self, ctx: &mut Ctx, idx: i32) -> Result<(), ParseError> {
 
        let id = self.expr_types[idx as usize].expr_id; // TODO: @Temp
 
        match &ctx.heap[id] {
 
            Expression::Assignment(expr) => {
 
                let id = expr.this;
 
                self.progress_assignment_expr(ctx, id)
 
            },
 
            Expression::Binding(_expr) => {
 
                unimplemented!("progress binding expression");
 
            },
 
            Expression::Conditional(expr) => {
 
                let id = expr.this;
 
                self.progress_conditional_expr(ctx, id)
 
            },
 
            Expression::Binary(expr) => {
 
                let id = expr.this;
 
                self.progress_binary_expr(ctx, id)
 
            },
 
            Expression::Unary(expr) => {
 
                let id = expr.this;
 
                self.progress_unary_expr(ctx, id)
 
            },
 
            Expression::Indexing(expr) => {
 
                let id = expr.this;
 
                self.progress_indexing_expr(ctx, id)
 
            },
 
            Expression::Slicing(expr) => {
 
                let id = expr.this;
 
                self.progress_slicing_expr(ctx, id)
 
            },
 
            Expression::Select(expr) => {
 
                let id = expr.this;
 
                self.progress_select_expr(ctx, id)
 
            },
 
            Expression::Literal(expr) => {
 
                let id = expr.this;
 
                self.progress_literal_expr(ctx, id)
 
            },
 
            Expression::Call(expr) => {
 
                let id = expr.this;
 
                self.progress_call_expr(ctx, id)
 
            },
 
            Expression::Variable(expr) => {
 
                let id = expr.this;
 
                self.progress_variable_expr(ctx, id)
 
            }
 
        }
 
    }
 

	
 
    fn progress_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> Result<(), ParseError> {
 
        use AssignmentOperator as AO;
 

	
 
        let upcast_id = id.upcast();
 

	
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.left;
 
        let arg2_expr_id = expr.right;
 

	
 
        debug_log!("Assignment expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.temp_get_display_name(ctx, arg1_expr_id));
 
        debug_log!("   - Arg2 type: {}", self.temp_get_display_name(ctx, arg2_expr_id));
 
        debug_log!("   - Expr type: {}", self.temp_get_display_name(ctx, upcast_id));
 

	
 
        // Assignment does not return anything (it operates like a statement)
 
        let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &VOID_TEMPLATE)?;
 

	
 
        // Apply forced constraint to LHS value
 
        let progress_forced = match expr.operation {
 
            AO::Set =>
 
                false,
 
            AO::Multiplied | AO::Divided | AO::Added | AO::Subtracted =>
 
                self.apply_forced_constraint(ctx, arg1_expr_id, &NUMBERLIKE_TEMPLATE)?,
 
            AO::Remained | AO::ShiftedLeft | AO::ShiftedRight |
 
            AO::BitwiseAnded | AO::BitwiseXored | AO::BitwiseOred =>
 
                self.apply_forced_constraint(ctx, arg1_expr_id, &INTEGERLIKE_TEMPLATE)?,
 
        };
 

	
 
        let (progress_arg1, progress_arg2) = self.apply_equal2_constraint(
 
            ctx, upcast_id, arg1_expr_id, 0, arg2_expr_id, 0
 
        )?;
 
        debug_assert!(if progress_forced { progress_arg2 } else { true });
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_forced || progress_arg1, self.temp_get_display_name(ctx, arg1_expr_id));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.temp_get_display_name(ctx, arg2_expr_id));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.temp_get_display_name(ctx, upcast_id));
 

	
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_forced || progress_arg1 { self.queue_expr(ctx, arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(ctx, arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> Result<(), ParseError> {
 
        // Note: test expression type is already enforced
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.true_expression;
 
        let arg2_expr_id = expr.false_expression;
 

	
 
        debug_log!("Conditional expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.temp_get_display_name(ctx, arg1_expr_id));
 
        debug_log!("   - Arg2 type: {}", self.temp_get_display_name(ctx, arg2_expr_id));
 
        debug_log!("   - Expr type: {}", self.temp_get_display_name(ctx, upcast_id));
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = self.apply_equal3_constraint(
 
            ctx, upcast_id, arg1_expr_id, arg2_expr_id, 0
 
        )?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.temp_get_display_name(ctx, arg1_expr_id));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.temp_get_display_name(ctx, arg2_expr_id));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.temp_get_display_name(ctx, upcast_id));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(ctx, arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(ctx, arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> Result<(), ParseError> {
 
        // Note: our expression type might be fixed by our parent, but we still
 
        // need to make sure it matches the type associated with our operation.
 
        use BinaryOperator as BO;
 

	
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_id = expr.left;
 
        let arg2_id = expr.right;
 

	
 
        debug_log!("Binary expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.temp_get_display_name(ctx, arg1_id));
 
        debug_log!("   - Arg2 type: {}", self.temp_get_display_name(ctx, arg2_id));
 
        debug_log!("   - Expr type: {}", self.temp_get_display_name(ctx, upcast_id));
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = match expr.operation {
 
            BO::Concatenate => {
 
                // Arguments may be arrays/slices, output is always an array
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, arg1_id, &ARRAYLIKE_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, arg2_id, &ARRAYLIKE_TEMPLATE)?;
 

	
 
                // If they're all arraylike, then we want the subtype to match
 
                let (subtype_expr, subtype_arg1, subtype_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 1)?;
 

	
 
                (progress_expr || subtype_expr, progress_arg1 || subtype_arg1, progress_arg2 || subtype_arg2)
 
            },
 
            BO::LogicalOr | BO::LogicalAnd => {
 
                // Forced boolean on all
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, arg1_id, &BOOL_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, arg2_id, &BOOL_TEMPLATE)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::BitwiseOr | BO::BitwiseXor | BO::BitwiseAnd | BO::Remainder | BO::ShiftLeft | BO::ShiftRight => {
 
                // All equal of integer type
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg1, progress_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg1, progress_base || progress_arg2)
 
            },
 
            BO::Equality | BO::Inequality => {
 
                // Equal2 on args, forced boolean output
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let (progress_arg1, progress_arg2) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, arg1_id, 0, arg2_id, 0)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
@@ -1894,385 +1894,388 @@ impl PassTyping {
 
                    let mut struct_def_id = None;
 

	
 
                    for (field_def_idx, field_def) in struct_def.fields.iter().enumerate() {
 
                        if field_def.identifier == select_expr.field_name {
 
                            // Set field definition and index
 
                            let infer_expr = &mut self.expr_types[expr_idx as usize];
 
                            infer_expr.field_or_monomorph_idx = field_def_idx as i32;
 
                            struct_def_id = Some(type_def.ast_definition);
 
                            break;
 
                        }
 
                    }
 

	
 
                    if struct_def_id.is_none() {
 
                        let ast_struct_def = ctx.heap[type_def.ast_definition].as_struct();
 
                        return Err(ParseError::new_error_at_span(
 
                            &ctx.module.source, select_expr.field_name.span, format!(
 
                                "this field does not exist on the struct '{}'",
 
                                ast_struct_def.identifier.value.as_str()
 
                            )
 
                        ))
 
                    }
 

	
 
                    // Encountered definition and field index for the
 
                    // first time
 
                    self.insert_initial_select_polymorph_data(ctx, id, struct_def_id.unwrap());
 
                },
 
                Ok(None) => {
 
                    // Type of subject is not yet known, so we
 
                    // cannot make any progress yet
 
                    return Ok(())
 
                },
 
                Err(()) => {
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module.source, select_expr.field_name.span, format!(
 
                            "Can only apply field access to structs, got a subject of type '{}'",
 
                            subject_type.display_name(&ctx.heap)
 
                        )
 
                    ));
 
                }
 
            }
 
        }
 

	
 
        // If here then field index is known, and the referenced struct type
 
        // information is inserted into `extra_data`. Check to see if we can
 
        // do some mutual inference.
 
        let poly_data = &mut self.extra_data[extra_idx as usize];
 
        let mut poly_progress = HashSet::new();
 

	
 
        // Apply to struct's type
 
        let signature_type: *mut _ = &mut poly_data.embedded[0];
 
        let subject_type: *mut _ = &mut self.expr_types[subject_expr_idx as usize].expr_type;
 

	
 
        let (_, progress_subject) = Self::apply_equal2_signature_constraint(
 
            ctx, upcast_id, Some(subject_id), poly_data, &mut poly_progress,
 
            signature_type, 0, subject_type, 0
 
        )?;
 

	
 
        if progress_subject {
 
            self.expr_queued.push_back(subject_expr_idx);
 
        }
 

	
 
        // Apply to field's type
 
        let signature_type: *mut _ = &mut poly_data.returned;
 
        let expr_type: *mut _ = &mut self.expr_types[expr_idx as usize].expr_type;
 

	
 
        let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
            ctx, upcast_id, None, poly_data, &mut poly_progress,
 
            signature_type, 0, expr_type, 0
 
        )?;
 

	
 
        if progress_expr {
 
            if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                let parent_idx = ctx.heap[parent_id].get_unique_id_in_definition();
 
                self.expr_queued.push_back(parent_idx);
 
            }
 
        }
 

	
 
        // Reapply progress in polymorphic variables to struct's type
 
        let signature_type: *mut _ = &mut poly_data.embedded[0];
 
        let subject_type: *mut _ = &mut self.expr_types[subject_expr_idx as usize].expr_type;
 

	
 
        let progress_subject = Self::apply_equal2_polyvar_constraint(
 
            poly_data, &poly_progress, signature_type, subject_type
 
        );
 

	
 
        let signature_type: *mut _ = &mut poly_data.returned;
 
        let expr_type: *mut _ = &mut self.expr_types[expr_idx as usize].expr_type;
 

	
 
        let progress_expr = Self::apply_equal2_polyvar_constraint(
 
            poly_data, &poly_progress, signature_type, expr_type
 
        );
 

	
 
        if progress_subject { self.queue_expr(ctx, subject_id); }
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject, self.temp_get_display_name(ctx, subject_id));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.temp_get_display_name(ctx, upcast_id));
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_literal_expr(&mut self, ctx: &mut Ctx, id: LiteralExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let expr_idx = expr.unique_id_in_definition;
 
        let extra_idx = self.expr_types[expr_idx as usize].extra_data_idx;
 

	
 
        debug_log!("Literal expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Expr type: {}", self.temp_get_display_name(ctx, upcast_id));
 

	
 
        let progress_expr = match &expr.value {
 
            Literal::Null => {
 
                self.apply_forced_constraint(ctx, upcast_id, &MESSAGE_TEMPLATE)?
 
            },
 
            Literal::Integer(_) => {
 
                self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?
 
            },
 
            Literal::True | Literal::False => {
 
                self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?
 
            },
 
            Literal::Character(_) => {
 
                self.apply_forced_constraint(ctx, upcast_id, &CHARACTER_TEMPLATE)?
 
            },
 
            Literal::String(_) => {
 
                self.apply_forced_constraint(ctx, upcast_id, &STRING_TEMPLATE)?
 
            },
 
            Literal::Struct(data) => {
 
                let extra = &mut self.extra_data[extra_idx as usize];
 
                for _poly in &extra.poly_vars {
 
                    debug_log!(" * Poly: {}", _poly.display_name(&ctx.heap));
 
                }
 
                let mut poly_progress = HashSet::new();
 
                debug_assert_eq!(extra.embedded.len(), data.fields.len());
 

	
 
                debug_log!(" * During (inferring types from fields and struct type):");
 

	
 
                // Mutually infer field signature/expression types
 
                for (field_idx, field) in data.fields.iter().enumerate() {
 
                    let field_expr_id = field.value;
 
                    let field_expr_idx = ctx.heap[field_expr_id].get_unique_id_in_definition();
 
                    let signature_type: *mut _ = &mut extra.embedded[field_idx];
 
                    let field_type: *mut _ = &mut self.expr_types[field_expr_idx as usize].expr_type;
 
                    let (_, progress_arg) = Self::apply_equal2_signature_constraint(
 
                        ctx, upcast_id, Some(field_expr_id), extra, &mut poly_progress,
 
                        signature_type, 0, field_type, 0
 
                    )?;
 

	
 
                    debug_log!(
 
                        "   - Field {} type | sig: {}, field: {}", field_idx,
 
                        unsafe{&*signature_type}.display_name(&ctx.heap),
 
                        unsafe{&*field_type}.display_name(&ctx.heap)
 
                    );
 

	
 
                    if progress_arg {
 
                        self.expr_queued.push_back(field_expr_idx);
 
                    }
 
                }
 

	
 
                debug_log!("   - Field poly progress | {:?}", poly_progress);
 

	
 
                // Same for the type of the struct itself
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = &mut self.expr_types[expr_idx as usize].expr_type;
 
                let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, extra, &mut poly_progress,
 
                    signature_type, 0, expr_type, 0
 
                )?;
 

	
 
                debug_log!(
 
                    "   - Ret type | sig: {}, expr: {}",
 
                    unsafe{&*signature_type}.display_name(&ctx.heap),
 
                    unsafe{&*expr_type}.display_name(&ctx.heap)
 
                );
 
                debug_log!("   - Ret poly progress | {:?}", poly_progress);
 

	
 
                if progress_expr {
 
                    // TODO: @cleanup, cannot call utility self.queue_parent thingo
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        let parent_idx = ctx.heap[parent_id].get_unique_id_in_definition();
 
                        self.expr_queued.push_back(parent_idx);
 
                    }
 
                }
 

	
 
                // Check which expressions use the polymorphic arguments. If the
 
                // polymorphic variables have been progressed then we try to 
 
                // progress them inside the expression as well.
 
                debug_log!(" * During (reinferring from progressed polyvars):");
 

	
 
                // For all field expressions
 
                for field_idx in 0..extra.embedded.len() {
 
                    debug_assert_eq!(field_idx, data.fields[field_idx].field_idx, "confusing, innit?");
 
                    // Note: fields in extra.embedded are in the same order as
 
                    // they are specified in the literal. Whereas
 
                    // `data.fields[...].field_idx` points to the field in the
 
                    // struct definition.
 
                    let signature_type: *mut _ = &mut extra.embedded[field_idx];
 
                    let field_expr_id = data.fields[field_idx].value;
 
                    let field_expr_idx = ctx.heap[field_expr_id].get_unique_id_in_definition();
 
                    let field_type: *mut _ = &mut self.expr_types[field_expr_idx as usize].expr_type;
 

	
 
                    let progress_arg = Self::apply_equal2_polyvar_constraint(
 
                        extra, &poly_progress, signature_type, field_type
 
                    );
 

	
 
                    debug_log!(
 
                        "   - Field {} type | sig: {}, field: {}", field_idx,
 
                        unsafe{&*signature_type}.display_name(&ctx.heap),
 
                        unsafe{&*field_type}.display_name(&ctx.heap)
 
                    );
 
                    if progress_arg {
 
                        self.expr_queued.push_back(field_expr_idx);
 
                    }
 
                }
 
                
 
                // For the return type
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = &mut self.expr_types[expr_idx as usize].expr_type;
 

	
 
                let progress_expr = Self::apply_equal2_polyvar_constraint(
 
                    extra, &poly_progress, signature_type, expr_type
 
                );
 

	
 
                progress_expr
 
            },
 
            Literal::Enum(_) => {
 
                let extra = &mut self.extra_data[extra_idx as usize];
 
                for _poly in &extra.poly_vars {
 
                    debug_log!(" * Poly: {}", _poly.display_name(&ctx.heap));
 
                }
 
                let mut poly_progress = HashSet::new();
 
                
 
                debug_log!(" * During (inferring types from return type)");
 

	
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = &mut self.expr_types[expr_idx as usize].expr_type;
 
                let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, extra, &mut poly_progress,
 
                    signature_type, 0, expr_type, 0
 
                )?;
 

	
 
                debug_log!(
 
                    "   - Ret type | sig: {}, expr: {}",
 
                    unsafe{&*signature_type}.display_name(&ctx.heap),
 
                    unsafe{&*expr_type}.display_name(&ctx.heap)
 
                );
 

	
 
                if progress_expr {
 
                    // TODO: @cleanup
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        let parent_idx = ctx.heap[parent_id].get_unique_id_in_definition();
 
                        self.expr_queued.push_back(parent_idx);
 
                    }
 
                }
 

	
 
                debug_log!(" * During (reinferring from progress polyvars):");
 
                let progress_expr = Self::apply_equal2_polyvar_constraint(
 
                    extra, &poly_progress, signature_type, expr_type
 
                );
 

	
 
                progress_expr
 
            },
 
            Literal::Union(data) => {
 
                let extra = &mut self.extra_data[extra_idx as usize];
 
                for _poly in &extra.poly_vars {
 
                    debug_log!(" * Poly: {}", _poly.display_name(&ctx.heap));
 
                }
 
                let mut poly_progress = HashSet::new();
 
                debug_assert_eq!(extra.embedded.len(), data.values.len());
 

	
 
                debug_log!(" * During (inferring types from variant values and union type):");
 

	
 
                // Mutually infer union variant values
 
                for (value_idx, value_expr_id) in data.values.iter().enumerate() {
 
                    let value_expr_id = *value_expr_id;
 
                    let value_expr_idx = ctx.heap[value_expr_id].get_unique_id_in_definition();
 
                    let signature_type: *mut _ = &mut extra.embedded[value_idx];
 
                    let value_type: *mut _ = &mut self.expr_types[value_expr_idx as usize].expr_type;
 
                    let (_, progress_arg) = Self::apply_equal2_signature_constraint(
 
                        ctx, upcast_id, Some(value_expr_id), extra, &mut poly_progress,
 
                        signature_type, 0, value_type, 0 
 
                    )?;
 

	
 
                    debug_log!(
 
                        "   - Value {} type | sig: {}, field: {}", value_idx,
 
                        unsafe{&*signature_type}.display_name(&ctx.heap),
 
                        unsafe{&*value_type}.display_name(&ctx.heap)
 
                    );
 

	
 
                    if progress_arg {
 
                        self.expr_queued.push_back(value_expr_idx);
 
                    }
 
                }
 

	
 
                debug_log!("   - Field poly progress | {:?}", poly_progress);
 

	
 
                // Infer type of union itself
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = &mut self.expr_types[expr_idx as usize].expr_type;
 
                let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, extra, &mut poly_progress,
 
                    signature_type, 0, expr_type, 0
 
                )?;
 

	
 
                debug_log!(
 
                    "   - Ret type | sig: {}, expr: {}",
 
                    unsafe{&*signature_type}.display_name(&ctx.heap),
 
                    unsafe{&*expr_type}.display_name(&ctx.heap)
 
                );
 
                debug_log!("   - Ret poly progress | {:?}", poly_progress);
 

	
 
                if progress_expr {
 
                    // TODO: @cleanup, borrowing rules
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        let parent_idx = ctx.heap[parent_id].get_unique_id_in_definition();
 
                        self.expr_queued.push_back(parent_idx);
 
                    }
 
                }
 

	
 
                debug_log!(" * During (reinferring from progress polyvars):");
 
            
 
                // For all embedded values of the union variant
 
                for value_idx in 0..extra.embedded.len() {
 
                    let signature_type: *mut _ = &mut extra.embedded[value_idx];
 
                    let value_expr_id = data.values[value_idx];
 
                    let value_expr_idx = ctx.heap[value_expr_id].get_unique_id_in_definition();
 
                    let value_type: *mut _ = &mut self.expr_types[value_expr_idx as usize].expr_type;
 
                    
 
                    let progress_arg = Self::apply_equal2_polyvar_constraint(
 
                        extra, &poly_progress, signature_type, value_type
 
                    );
 

	
 
                    debug_log!(
 
                        "   - Value {} type | sig: {}, value: {}", value_idx,
 
                        unsafe{&*signature_type}.display_name(&ctx.heap),
 
                        unsafe{&*value_type}.display_name(&ctx.heap)
 
                    );
 
                    if progress_arg {
 
                        self.expr_queued.push_back(value_expr_idx);
 
                    }
 
                }
 

	
 
                // And for the union type itself
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = &mut self.expr_types[expr_idx as usize].expr_type;
 

	
 
                let progress_expr = Self::apply_equal2_polyvar_constraint(
 
                    extra, &poly_progress, signature_type, expr_type
 
                );
 

	
 
                progress_expr
 
            },
 
            Literal::Array(data) => {
 
                let expr_elements = data.clone(); // TODO: @performance
 
                debug_log!("Array expr ({} elements): {}", expr_elements.len(), upcast_id.index);
 
                debug_log!(" * Before:");
 
                debug_log!("   - Expr type: {}", self.temp_get_display_name(ctx, upcast_id));
 

	
 
                // All elements should have an equal type
 
                let progress = self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements)?;
 
                for (progress_arg, arg_id) in progress.iter().zip(expr_elements.iter()) {
 
                    if *progress_arg {
 
                        self.queue_expr(ctx, *arg_id);
 
                    }
 
                }
 

	
 
                // And the output should be an array of the element types
 
                let mut progress_expr = self.apply_forced_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
                if !expr_elements.is_empty() {
 
                    let first_arg_id = expr_elements[0];
 
                    let (inner_expr_progress, arg_progress) = self.apply_equal2_constraint(
 
                        ctx, upcast_id, upcast_id, 1, first_arg_id, 0
 
                    )?;
 

	
 
                    progress_expr = progress_expr || inner_expr_progress;
 

	
 
                    // Note that if the array type progressed the type of the arguments,
 
                    // then we should enqueue this progression function again
 
                    // TODO: @fix Make apply_equal_n accept a start idx as well
 
                    if arg_progress { self.queue_expr(ctx, upcast_id); }
 
                }
 

	
 
                debug_log!(" * After:");
 
                debug_log!("   - Expr type [{}]: {}", progress_expr, self.temp_get_display_name(ctx, upcast_id));
 

	
 
                progress_expr
 
            },
 
        };
src/protocol/tests/eval_silly.rs
Show inline comments
 
use super::*;
 

	
 
#[test]
 
fn test_concatenate_operator() {
 
    Tester::new_single_source_expect_ok(
 
        "concatenate and check values",
 
        "
 
        // Too see if we accept the polymorphic arg
 
        func check_pair<T>(T[] arr, u32 idx) -> bool {
 
            return arr[idx] == arr[idx + 1];
 
        }
 

	
 
        // Too see if we can check fields of polymorphs
 
        func check_values<T>(T[] arr, u32 idx, u32 x, u32 y) -> bool {
 
            auto value = arr[idx];
 
            return value.x == x && value.y == y;
 
        }
 

	
 
        struct Point2D {
 
            u32 x,
 
            u32 y,
 
        }
 

	
 
        // Could do this inline, but we're attempt to stress the system a bit
 
        func create_point(u32 x, u32 y) -> Point2D {
 
            return Point2D{ x: x, y: y };
 
        }
 

	
 
        // Again, more stressing: returning a heap-allocated thing
 
        func create_array() -> Point2D[] {
 
            return {
 
                create_point(1, 2),
 
                create_point(1, 2),
 
                create_point(3, 4),
 
                create_point(3, 4)
 
            };
 
        }
 

	
 
        // The silly checkamajig
 
        func foo() -> bool {
 
            auto lhs = create_array();
 
            auto rhs = create_array();
 
            auto total = lhs @ rhs;
 
            auto is_equal =
 
                check_pair(total, 0) &&
 
                check_pair(total, 2) &&
 
                check_pair(total, 4) &&
 
                check_pair(total, 6);
 
            auto is_not_equal =
 
                !check_pair(total, 0) ||
 
                !check_pair(total, 2) ||
 
                !check_pair(total, 4) ||
 
                !check_pair(total, 6);
 
            auto has_correct_fields =
 
                check_values(total, 3, 3, 4) &&
 
                check_values(total, 4, 1, 2);
 
            auto array_check = lhs == rhs && total == total;
 
            return is_equal && !is_not_equal && has_correct_fields && array_check;
 
        }
 
        "
 
    ).for_function("foo", |f| {
 
        f.call_ok(Some(Value::Bool(true)));
 
    });
 
}
 

	
 
#[test]
 
fn test_slicing_magic() {
 
    Tester::new_single_source_expect_ok("slicing", "
 
        struct Holder<T> {
 
            T[] left,
 
            T[] right,
 
        }
 

	
 
        func create_array<T>(T first_index, T last_index) -> T[] {
 
            auto result = {};
 
            while (first_index < last_index) {
 
                // Absolutely rediculous, but we don't have builtin array functions yet...
 
                result = result @ { first_index };
 
                first_index += 1;
 
            }
 
            return result;
 
        }
 

	
 
        func create_holder<T>(T left_first, T left_last, T right_first, T right_last) -> Holder<T> {
 
            return Holder{
 
                left: create_array(left_first, left_last),
 
                right: create_array(right_first, right_last)
 
            };
 
        }
 

	
 
        // Another silly thing, we first slice the full thing. Then subslice a single
 
        // element, then concatenate. We always return an array of two things.
 
        func slicing_magic<T>(Holder<T> holder, u32 left_base, u32 left_amount, u32 right_base, u32 right_amount) -> T[] {
 
            auto left = holder.left[left_base..left_base + left_amount];
 
            auto right = holder.right[right_base..right_base + right_amount];
 
            return left[0..1] @ right[0..1];
 
        }
 

	
 
        func foo() -> u32 {
 
            // left array will be [0, 1, 2, ...] and right array will be [2, 3, 4, ...]
 
            auto created = create_holder(0, 5, 2, 8);
 

	
 
            // in a convoluted fashion select the value 3 from the lhs and the value 3 from the rhs
 
            auto result = slicing_magic(created, 3, 2, 1, 2);
 

	
 
            // and return 3 + 3
 
            return result[0] + result[1];
 
        func foo() -> bool {
 
            // Preliminaries:
 
            // 1. construct a holder, where:
 
            //      - left array will be [0, 1, 2, ...]
 
            //      - right array will be [2, 3, 4, ...]
 
            // 2. Perform slicing magic, always returning an array [3, 3]
 
            // 3. Make sure result is 6
 

	
 
            // But ofcourse, because we want to be silly, we will check this for
 
            // any possible integer type.
 
            auto created_u08 = create_holder<u8> (0, 5, 2, 8);
 
            auto created_u16 = create_holder<u16>(0, 5, 2, 8);
 
            auto created_u32 = create_holder<u32>(0, 5, 2, 8);
 
            auto created_u64 = create_holder<u64>(0, 5, 2, 8);
 

	
 
            auto result_u08 = slicing_magic(created_u08, 3, 2, 1, 2);
 
            auto result_u16 = slicing_magic(created_u16, 3, 2, 1, 2);
 
            auto result_u32 = slicing_magic(created_u32, 3, 2, 1, 2);
 
            auto result_u64 = slicing_magic(created_u64, 3, 2, 1, 2);
 

	
 
            auto result_s08 = slicing_magic(create_holder<s8> (0, 5, 2, 8), 3, 2, 1, 2);
 
            auto result_s16 = slicing_magic(create_holder<s16>(0, 5, 2, 8), 3, 2, 1, 2);
 
            auto result_s32 = slicing_magic(create_holder<s32>(0, 5, 2, 8), 3, 2, 1, 2);
 
            auto result_s64 = slicing_magic(create_holder<s64>(0, 5, 2, 8), 3, 2, 1, 2);
 

	
 
            return
 
                result_u08[0] + result_u08[1] == 6 &&
 
                result_u16[0] + result_u16[1] == 6 &&
 
                result_u32[0] + result_u32[1] == 6 &&
 
                result_u64[0] + result_u64[1] == 6 &&
 
                result_s08[0] + result_s08[1] == 6 &&
 
                result_s16[0] + result_s16[1] == 6 &&
 
                result_s32[0] + result_s32[1] == 6 &&
 
                result_s64[0] + result_s64[1] == 6;
 
        }
 
    ").for_function("foo", |f| {
 
        f.call_ok(Some(Value::UInt32(6)));
 
        f.call_ok(Some(Value::Bool(true)));
 
    }).for_struct("Holder", |s| { s
 
        .assert_num_monomorphs(8)
 
        .assert_has_monomorph("u8")
 
        .assert_has_monomorph("u16")
 
        .assert_has_monomorph("u32")
 
        .assert_has_monomorph("u64")
 
        .assert_has_monomorph("s8")
 
        .assert_has_monomorph("s16")
 
        .assert_has_monomorph("s32")
 
        .assert_has_monomorph("s64");
 
    });
 
}
 

	
 
#[test]
 
fn test_struct_fields() {
 
    Tester::new_single_source_expect_ok("struct field access", "
 
        struct Nester<T> {
 
            T v,
 
        }
 

	
 
        func make<T>(T inner) -> Nester<T> {
 
            return Nester{ v: inner };
 
        }
 

	
 
        func modify<T>(Nester<T> outer, T inner) -> Nester<T> {
 
            outer.v = inner;
 
            return outer;
 
        }
 

	
 
        func foo() -> bool {
 
            // Single depth modification
 
            auto original1 = make<u32>(5);
 
            auto modified1 = modify(original1, 2);
 
            auto success1 = original1.v == 5 && modified1.v == 2;
 

	
 
            // Multiple levels of modification
 
            auto original2 = make(make(make(make(true))));
 
            auto modified2 = modify(original2.v, make(make(false))); // strip one Nester level
 
            auto success2 = original2.v.v.v.v == true && modified2.v.v.v == false;
 

	
 
            return success1 && success2;
 
        }
 
    ").for_function("foo", |f| {
 
        f.call_ok(Some(Value::Bool(true)));
 
    });
 
}
 

	
 
#[test]
 
fn test_field_selection_polymorphism() {
 
    // Bit silly, but just to be sure
 
    Tester::new_single_source_expect_ok("struct field shuffles", "
 
struct VecXYZ<T> { T x, T y, T z }
 
struct VecYZX<T> { T y, T z, T x }
 
struct VecZXY<T> { T z, T x, T y }
 
func modify_x<T>(T input) -> T {
 
    input.x = 1337;
 
    return input;
 
}
 

	
 
func foo() -> bool {
 
    auto xyz = VecXYZ<u16>{ x: 1, y: 2, z: 3 };
 
    auto yzx = VecYZX<u32>{ y: 2, z: 3, x: 1 };
 
    auto zxy = VecZXY<u64>{ x: 1, y: 2, z: 3 };
 

	
 
    auto mod_xyz = modify_x(xyz);
 
    auto mod_yzx = modify_x(yzx);
 
    auto mod_zxy = modify_x(zxy);
 

	
 
    return
 
        xyz.x == 1 && xyz.y == 2 && xyz.z == 3 &&
 
        yzx.x == 1 && yzx.y == 2 && yzx.z == 3 &&
 
        zxy.x == 1 && zxy.y == 2 && zxy.z == 3 &&
 
        mod_xyz.x == 1337 && mod_xyz.y == 2 && mod_xyz.z == 3 &&
 
        mod_yzx.x == 1337 && mod_yzx.y == 2 && mod_yzx.z == 3 &&
 
        mod_zxy.x == 1337 && mod_zxy.y == 2 && mod_zxy.z == 3;
 
}
 
").for_function("foo", |f| {
 
        f.call_ok(Some(Value::Bool(true)));
 
    });
 
}
 

	
 
#[test]
 
fn test_index_error() {
 
    Tester::new_single_source_expect_ok("indexing error", "
 
        func check_array(u32[] vals, u32 idx) -> u32 {
 
            return vals[idx];
 
        }
 

	
 
        func foo() -> u32 {
 
            auto array = {1, 2, 3, 4, 5, 6, 7};
 
            check_array(array, 7);
 
            return array[0];
 
        }
 
    ").for_function("foo", |f| {
 
        f.call_err("index 7 is out of bounds: array length is 7");
 
    });
 
}
 
\ No newline at end of file
src/protocol/tests/utils.rs
Show inline comments
 
@@ -717,385 +717,385 @@ pub(crate) struct ExpressionTester<'a> {
 

	
 
impl<'a> ExpressionTester<'a> {
 
    fn new(
 
        ctx: TestCtx<'a>, definition_id: DefinitionId, expr: &'a Expression
 
    ) -> Self {
 
        Self{ ctx, definition_id, expr }
 
    }
 

	
 
    pub(crate) fn assert_concrete_type(self, expected: &str) -> Self {
 
        // Lookup concrete type
 
        let mono_data = self.ctx.types.get_procedure_expression_data(&self.definition_id, 0);
 
        let expr_index = self.expr.get_unique_id_in_definition();
 
        let concrete_type = &mono_data.expr_data[expr_index as usize].expr_type;
 

	
 
        // Serialize and check type
 
        let mut serialized = String::new();
 
        serialize_concrete_type(&mut serialized, self.ctx.heap, self.definition_id, concrete_type);
 

	
 
        assert_eq!(
 
            expected, &serialized,
 
            "[{}] Expected concrete type '{}', but got '{}' for {}",
 
            self.ctx.test_name, expected, &serialized, self.assert_postfix()
 
        );
 
        self
 
    }
 

	
 
    fn assert_postfix(&self) -> String {
 
        format!(
 
            "Expression{{ debug: {:?} }}",
 
            self.expr
 
        )
 
    }
 
}
 

	
 
//------------------------------------------------------------------------------
 
// Interface for failed compilation
 
//------------------------------------------------------------------------------
 

	
 
pub(crate) struct AstErrTester {
 
    test_name: String,
 
    error: ParseError,
 
}
 

	
 
impl AstErrTester {
 
    fn new(test_name: String, error: ParseError) -> Self {
 
        Self{ test_name, error }
 
    }
 

	
 
    pub(crate) fn error<F: Fn(ErrorTester)>(&self, f: F) {
 
        // Maybe multiple errors will be supported in the future
 
        let tester = ErrorTester{ test_name: &self.test_name, error: &self.error };
 
        f(tester)
 
    }
 
}
 

	
 
//------------------------------------------------------------------------------
 
// Utilities for failed compilation
 
//------------------------------------------------------------------------------
 

	
 
pub(crate) struct ErrorTester<'a> {
 
    test_name: &'a str,
 
    error: &'a ParseError,
 
}
 

	
 
impl<'a> ErrorTester<'a> {
 
    pub(crate) fn assert_num(self, num: usize) -> Self {
 
        assert_eq!(
 
            num, self.error.statements.len(),
 
            "[{}] expected error to consist of '{}' parts, but encountered '{}' for {}",
 
            self.test_name, num, self.error.statements.len(), self.assert_postfix()
 
        );
 

	
 
        self
 
    }
 

	
 
    pub(crate) fn assert_ctx_has(self, idx: usize, msg: &str) -> Self {
 
        assert!(
 
            self.error.statements[idx].context.contains(msg),
 
            "[{}] expected error statement {}'s context to contain '{}' for {}",
 
            self.test_name, idx, msg, self.assert_postfix()
 
        );
 

	
 
        self
 
    }
 

	
 
    pub(crate) fn assert_msg_has(self, idx: usize, msg: &str) -> Self {
 
        assert!(
 
            self.error.statements[idx].message.contains(msg),
 
            "[{}] expected error statement {}'s message to contain '{}' for {}",
 
            self.test_name, idx, msg, self.assert_postfix()
 
        );
 

	
 
        self
 
    }
 

	
 
    // TODO: @tokenizer This should really be removed, as compilation should be
 
    //  deterministic, but we're currently using rather inefficient hashsets for
 
    //  the type inference, so remove once compiler architecture has changed.
 
    pub(crate) fn assert_any_msg_has(self, msg: &str) -> Self {
 
        let mut is_present = false;
 
        for statement in &self.error.statements {
 
            if statement.message.contains(msg) {
 
                is_present = true;
 
                break;
 
            }
 
        }
 

	
 
        assert!(
 
            is_present, "[{}] Expected an error statement to contain '{}' for {}",
 
            self.test_name, msg, self.assert_postfix()
 
        );
 
        
 
        self
 
    }
 

	
 
    /// Seeks the index of the pattern in the context message, then checks if
 
    /// the input position corresponds to that index.
 
    pub (crate) fn assert_occurs_at(self, idx: usize, pattern: &str) -> Self {
 
        let pos = self.error.statements[idx].context.find(pattern);
 
        assert!(
 
            pos.is_some(),
 
            "[{}] incorrect occurs_at: '{}' could not be found in the context for {}",
 
            self.test_name, pattern, self.assert_postfix()
 
        );
 
        let pos = pos.unwrap();
 
        let col = self.error.statements[idx].start_column as usize;
 
        assert_eq!(
 
            pos + 1, col,
 
            "[{}] Expected error to occur at column {}, but found it at {} for {}",
 
            self.test_name, pos + 1, col, self.assert_postfix()
 
        );
 

	
 
        self
 
    }
 

	
 
    fn assert_postfix(&self) -> String {
 
        let mut v = String::new();
 
        v.push_str("error: [");
 
        for (idx, stmt) in self.error.statements.iter().enumerate() {
 
            if idx != 0 {
 
                v.push_str(", ");
 
            }
 

	
 
            v.push_str(&format!("{{ context: {}, message: {} }}", &stmt.context, stmt.message));
 
        }
 
        v.push(']');
 
        v
 
    }
 
}
 

	
 
//------------------------------------------------------------------------------
 
// Generic utilities
 
//------------------------------------------------------------------------------
 

	
 
fn has_equal_num_monomorphs(ctx: TestCtx, num: usize, definition_id: DefinitionId) -> (bool, usize) {
 
    use DefinedTypeVariant::*;
 

	
 
    let type_def = ctx.types.get_base_definition(&definition_id).unwrap();
 
    let num_on_type = match &type_def.definition {
 
        Struct(v) => v.monomorphs.len(),
 
        Enum(v) => v.monomorphs.len(),
 
        Union(v) => v.monomorphs.len(),
 
        Function(v) => v.monomorphs.len(),
 
        Component(v) => v.monomorphs.len(),
 
    };
 

	
 
    (num_on_type == num, num_on_type)
 
}
 

	
 
fn has_monomorph(ctx: TestCtx, definition_id: DefinitionId, serialized_monomorph: &str) -> (bool, String) {
 
    use DefinedTypeVariant::*;
 

	
 
    let type_def = ctx.types.get_base_definition(&definition_id).unwrap();
 

	
 
    // Note: full_buffer is just for error reporting
 
    let mut full_buffer = String::new();
 
    let mut has_match = false;
 

	
 
    let serialize_monomorph = |monomorph: &Vec<ConcreteType>| -> String {
 
        let mut buffer = String::new();
 
        for (element_idx, element) in monomorph.iter().enumerate() {
 
            if element_idx != 0 {
 
                buffer.push(';');
 
            }
 
            serialize_concrete_type(&mut buffer, ctx.heap, definition_id, element);
 
        }
 

	
 
        buffer
 
    };
 

	
 
    full_buffer.push('[');
 
    let mut append_to_full_buffer = |buffer: String| {
 
        if buffer.len() == 1 {
 
        if full_buffer.len() != 1 {
 
            full_buffer.push_str(", ");
 
        }
 
        full_buffer.push('"');
 
        full_buffer.push_str(&buffer);
 
        full_buffer.push('"');
 
    };
 

	
 
    match &type_def.definition {
 
        Enum(_) | Union(_) | Struct(_) => {
 
            let monomorphs = type_def.definition.data_monomorphs();
 
            for monomorph in monomorphs.iter() {
 
                let buffer = serialize_monomorph(&monomorph.poly_args);
 
                if buffer == serialized_monomorph {
 
                    has_match = true;
 
                }
 
                append_to_full_buffer(buffer);
 
            }
 
        },
 
        Function(_) | Component(_) => {
 
            let monomorphs = type_def.definition.procedure_monomorphs();
 
            for monomorph in monomorphs.iter() {
 
                let buffer = serialize_monomorph(&monomorph.poly_args);
 
                if buffer == serialized_monomorph {
 
                    has_match = true;
 
                }
 
                append_to_full_buffer(buffer);
 
            }
 
        }
 
    }
 

	
 
    full_buffer.push(']');
 

	
 
    (has_match, full_buffer)
 
}
 

	
 
fn serialize_parser_type(buffer: &mut String, heap: &Heap, parser_type: &ParserType) {
 
    use ParserTypeVariant as PTV;
 

	
 
    fn write_bytes(buffer: &mut String, bytes: &[u8]) {
 
        let utf8 = String::from_utf8_lossy(bytes);
 
        buffer.push_str(&utf8);
 
    }
 

	
 
    fn serialize_variant(buffer: &mut String, heap: &Heap, parser_type: &ParserType, mut idx: usize) -> usize {
 
        match &parser_type.elements[idx].variant {
 
            PTV::Void => buffer.push_str("void"),
 
            PTV::InputOrOutput => {
 
                buffer.push_str("portlike<");
 
                idx = serialize_variant(buffer, heap, parser_type, idx + 1);
 
                buffer.push('>');
 
            },
 
            PTV::ArrayLike => {
 
                idx = serialize_variant(buffer, heap, parser_type, idx + 1);
 
                buffer.push_str("[???]");
 
            },
 
            PTV::IntegerLike => buffer.push_str("integerlike"),
 
            PTV::Message => buffer.push_str(KW_TYPE_MESSAGE_STR),
 
            PTV::Bool => buffer.push_str(KW_TYPE_BOOL_STR),
 
            PTV::UInt8 => buffer.push_str(KW_TYPE_UINT8_STR),
 
            PTV::UInt16 => buffer.push_str(KW_TYPE_UINT16_STR),
 
            PTV::UInt32 => buffer.push_str(KW_TYPE_UINT32_STR),
 
            PTV::UInt64 => buffer.push_str(KW_TYPE_UINT64_STR),
 
            PTV::SInt8 => buffer.push_str(KW_TYPE_SINT8_STR),
 
            PTV::SInt16 => buffer.push_str(KW_TYPE_SINT16_STR),
 
            PTV::SInt32 => buffer.push_str(KW_TYPE_SINT32_STR),
 
            PTV::SInt64 => buffer.push_str(KW_TYPE_SINT64_STR),
 
            PTV::Character => buffer.push_str(KW_TYPE_CHAR_STR),
 
            PTV::String => buffer.push_str(KW_TYPE_STRING_STR),
 
            PTV::IntegerLiteral => buffer.push_str("int_literal"),
 
            PTV::Inferred => buffer.push_str(KW_TYPE_INFERRED_STR),
 
            PTV::Array => {
 
                idx = serialize_variant(buffer, heap, parser_type, idx + 1);
 
                buffer.push_str("[]");
 
            },
 
            PTV::Input => {
 
                buffer.push_str(KW_TYPE_IN_PORT_STR);
 
                buffer.push('<');
 
                idx = serialize_variant(buffer, heap, parser_type, idx + 1);
 
                buffer.push('>');
 
            },
 
            PTV::Output => {
 
                buffer.push_str(KW_TYPE_OUT_PORT_STR);
 
                buffer.push('<');
 
                idx = serialize_variant(buffer, heap, parser_type, idx + 1);
 
                buffer.push('>');
 
            },
 
            PTV::PolymorphicArgument(definition_id, poly_idx) => {
 
                let definition = &heap[*definition_id];
 
                let poly_arg = &definition.poly_vars()[*poly_idx as usize];
 
                buffer.push_str(poly_arg.value.as_str());
 
            },
 
            PTV::Definition(definition_id, num_embedded) => {
 
                let definition = &heap[*definition_id];
 
                buffer.push_str(definition.identifier().value.as_str());
 

	
 
                let num_embedded = *num_embedded;
 
                if num_embedded != 0 {
 
                    buffer.push('<');
 
                    for embedded_idx in 0..num_embedded {
 
                        if embedded_idx != 0 {
 
                            buffer.push(',');
 
                        }
 
                        idx = serialize_variant(buffer, heap, parser_type, idx + 1);
 
                    }
 
                    buffer.push('>');
 
                }
 
            }
 
        }
 

	
 
        idx
 
    }
 

	
 
    serialize_variant(buffer, heap, parser_type, 0);
 
}
 

	
 
fn serialize_concrete_type(buffer: &mut String, heap: &Heap, def: DefinitionId, concrete: &ConcreteType) {
 
    // Retrieve polymorphic variables
 
    let poly_vars = heap[def].poly_vars();
 

	
 
    fn write_bytes(buffer: &mut String, bytes: &[u8]) {
 
        let utf8 = String::from_utf8_lossy(bytes);
 
        buffer.push_str(&utf8);
 
    }
 

	
 
    fn serialize_recursive(
 
        buffer: &mut String, heap: &Heap, poly_vars: &Vec<Identifier>, concrete: &ConcreteType, mut idx: usize
 
    ) -> usize {
 
        use ConcreteTypePart as CTP;
 

	
 
        let part = &concrete.parts[idx];
 
        match part {
 
            CTP::Void => buffer.push_str("void"),
 
            CTP::Message => write_bytes(buffer, KW_TYPE_MESSAGE),
 
            CTP::Bool => write_bytes(buffer, KW_TYPE_BOOL),
 
            CTP::UInt8 => write_bytes(buffer, KW_TYPE_UINT8),
 
            CTP::UInt16 => write_bytes(buffer, KW_TYPE_UINT16),
 
            CTP::UInt32 => write_bytes(buffer, KW_TYPE_UINT32),
 
            CTP::UInt64 => write_bytes(buffer, KW_TYPE_UINT64),
 
            CTP::SInt8 => write_bytes(buffer, KW_TYPE_SINT8),
 
            CTP::SInt16 => write_bytes(buffer, KW_TYPE_SINT16),
 
            CTP::SInt32 => write_bytes(buffer, KW_TYPE_SINT32),
 
            CTP::SInt64 => write_bytes(buffer, KW_TYPE_SINT64),
 
            CTP::Character => write_bytes(buffer, KW_TYPE_CHAR),
 
            CTP::String => write_bytes(buffer, KW_TYPE_STRING),
 
            CTP::Array => {
 
                idx = serialize_recursive(buffer, heap, poly_vars, concrete, idx + 1);
 
                buffer.push_str("[]");
 
            },
 
            CTP::Slice => {
 
                idx = serialize_recursive(buffer, heap, poly_vars, concrete, idx + 1);
 
                buffer.push_str("[..]");
 
            },
 
            CTP::Input => {
 
                write_bytes(buffer, KW_TYPE_IN_PORT);
 
                buffer.push('<');
 
                idx = serialize_recursive(buffer, heap, poly_vars, concrete, idx + 1);
 
                buffer.push('>');
 
            },
 
            CTP::Output => {
 
                write_bytes(buffer, KW_TYPE_OUT_PORT);
 
                buffer.push('<');
 
                idx = serialize_recursive(buffer, heap, poly_vars, concrete, idx + 1);
 
                buffer.push('>');
 
            },
 
            CTP::Instance(definition_id, num_sub) => {
 
                let definition_name = heap[*definition_id].identifier();
 
                buffer.push_str(definition_name.value.as_str());
 
                if *num_sub != 0 {
 
                    buffer.push('<');
 
                    for sub_idx in 0..*num_sub {
 
                        if sub_idx != 0 { buffer.push(','); }
 
                        idx = serialize_recursive(buffer, heap, poly_vars, concrete, idx + 1);
 
                    }
 
                    buffer.push('>');
 
                }
 
            }
 
        }
 

	
 
        idx
 
    }
 

	
 
    serialize_recursive(buffer, heap, poly_vars, concrete, 0);
 
}
 

	
 
fn seek_def_in_modules<'a>(heap: &Heap, modules: &'a [Module], def_id: DefinitionId) -> Option<&'a Module> {
 
    for module in modules {
 
        let root = &heap.protocol_descriptions[module.root_id];
 
        for definition in &root.definitions {
 
            if *definition == def_id {
 
                return Some(module)
 
            }
 
        }
0 comments (0 inline, 0 general)