Changeset - f34349cabe17
[Not reviewed]
0 3 0
MH - 3 years ago 2022-02-25 11:54:59
contact@maxhenger.nl
WIP: Fix bug related to builtin procedure typechecking
3 files changed with 28 insertions and 17 deletions:
0 comments (0 inline, 0 general)
src/protocol/parser/mod.rs
Show inline comments
 
@@ -112,320 +112,320 @@ pub struct PassCtx<'a> {
 
}
 

	
 
pub struct Parser {
 
    // Storage of all information created/gathered during compilation.
 
    pub(crate) heap: Heap,
 
    pub(crate) string_pool: StringPool, // Do not deallocate, holds all strings
 
    pub(crate) modules: Vec<Module>,
 
    pub(crate) symbol_table: SymbolTable,
 
    pub(crate) type_table: TypeTable,
 
    // Compiler passes, used as little state machine that keep their memory
 
    // around.
 
    pass_tokenizer: PassTokenizer,
 
    pass_symbols: PassSymbols,
 
    pass_import: PassImport,
 
    pass_definitions: PassDefinitions,
 
    pass_validation: PassValidationLinking,
 
    pass_typing: PassTyping,
 
    pass_rewriting: PassRewriting,
 
    pass_stack_size: PassStackSize,
 
    // Compiler options
 
    pub write_ast_to: Option<String>,
 
    pub(crate) arch: TargetArch,
 
}
 

	
 
impl Parser {
 
    pub fn new() -> Self {
 
        let mut parser = Parser{
 
            heap: Heap::new(),
 
            string_pool: StringPool::new(),
 
            modules: Vec::new(),
 
            symbol_table: SymbolTable::new(),
 
            type_table: TypeTable::new(),
 
            pass_tokenizer: PassTokenizer::new(),
 
            pass_symbols: PassSymbols::new(),
 
            pass_import: PassImport::new(),
 
            pass_definitions: PassDefinitions::new(),
 
            pass_validation: PassValidationLinking::new(),
 
            pass_typing: PassTyping::new(),
 
            pass_rewriting: PassRewriting::new(),
 
            pass_stack_size: PassStackSize::new(),
 
            write_ast_to: None,
 
            arch: TargetArch::new(),
 
        };
 

	
 
        parser.symbol_table.insert_scope(None, SymbolScope::Global);
 

	
 
        // Insert builtin types
 
        // TODO: At some point use correct values for size/alignment
 
        parser.arch.void_type_id    = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::Void], false, 0, 1);
 
        parser.arch.message_type_id = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::Message], false, 24, 8);
 
        parser.arch.bool_type_id    = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::Bool], false, 1, 1);
 
        parser.arch.uint8_type_id   = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::UInt8], false, 1, 1);
 
        parser.arch.uint16_type_id  = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::UInt16], false, 2, 2);
 
        parser.arch.uint32_type_id  = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::UInt32], false, 4, 4);
 
        parser.arch.uint64_type_id  = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::UInt64], false, 8, 8);
 
        parser.arch.sint8_type_id   = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::SInt8], false, 1, 1);
 
        parser.arch.sint16_type_id  = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::SInt16], false, 2, 2);
 
        parser.arch.sint32_type_id  = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::SInt32], false, 4, 4);
 
        parser.arch.sint64_type_id  = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::SInt64], false, 8, 8);
 
        parser.arch.char_type_id    = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::Character], false, 4, 4);
 
        parser.arch.string_type_id  = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::String], false, 24, 8);
 
        parser.arch.array_type_id   = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::Array, ConcreteTypePart::Void], true, 24, 8);
 
        parser.arch.slice_type_id   = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::Slice, ConcreteTypePart::Void], true, 16, 4);
 
        parser.arch.input_type_id   = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::Input, ConcreteTypePart::Void], true, 8, 8);
 
        parser.arch.output_type_id  = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::Output, ConcreteTypePart::Void], true, 8, 8);
 
        parser.arch.pointer_type_id = insert_builtin_type(&mut parser.type_table, vec![ConcreteTypePart::Pointer, ConcreteTypePart::Void], true, 8, 8);
 

	
 
        // Insert builtin functions
 
        fn quick_type(variants: &[ParserTypeVariant]) -> ParserType {
 
            let mut t = ParserType{ elements: Vec::with_capacity(variants.len()), full_span: InputSpan::new() };
 
            for variant in variants {
 
                t.elements.push(ParserTypeElement{ element_span: InputSpan::new(), variant: variant.clone() });
 
            }
 
            t
 
        }
 

	
 
        use ParserTypeVariant as PTV;
 
        insert_builtin_function(&mut parser, "get", &["T"], |id| (
 
            vec![
 
                ("input", quick_type(&[PTV::Input, PTV::PolymorphicArgument(id.upcast(), 0)]))
 
            ],
 
            quick_type(&[PTV::PolymorphicArgument(id.upcast(), 0)])
 
        ));
 
        insert_builtin_function(&mut parser, "put", &["T"], |id| (
 
            vec![
 
                ("output", quick_type(&[PTV::Output, PTV::PolymorphicArgument(id.upcast(), 0)])),
 
                ("value", quick_type(&[PTV::PolymorphicArgument(id.upcast(), 0)])),
 
            ],
 
            quick_type(&[PTV::Void])
 
        ));
 
        insert_builtin_function(&mut parser, "fires", &["T"], |id| (
 
            vec![
 
                ("port", quick_type(&[PTV::InputOrOutput, PTV::PolymorphicArgument(id.upcast(), 0)]))
 
            ],
 
            quick_type(&[PTV::Bool])
 
        ));
 
        insert_builtin_function(&mut parser, "create", &["T"], |id| (
 
            vec![
 
                ("length", quick_type(&[PTV::IntegerLike]))
 
            ],
 
            quick_type(&[PTV::ArrayLike, PTV::PolymorphicArgument(id.upcast(), 0)])
 
        ));
 
        insert_builtin_function(&mut parser, "length", &["T"], |id| (
 
            vec![
 
                ("array", quick_type(&[PTV::ArrayLike, PTV::PolymorphicArgument(id.upcast(), 0)]))
 
            ],
 
            quick_type(&[PTV::UInt32]) // TODO: @PtrInt
 
        ));
 
        insert_builtin_function(&mut parser, "assert", &[], |_id| (
 
            vec![
 
                ("condition", quick_type(&[PTV::Bool])),
 
            ],
 
            quick_type(&[PTV::Void])
 
        ));
 
        insert_builtin_function(&mut parser, "print", &[], |_id| (
 
            vec![
 
                ("message", quick_type(&[PTV::String])),
 
            ],
 
            quick_type(&[PTV::Void])
 
        ));
 

	
 
        parser
 
    }
 

	
 
    pub fn feed(&mut self, mut source: InputSource) -> Result<(), ParseError> {
 
        let mut token_buffer = TokenBuffer::new();
 
        self.pass_tokenizer.tokenize(&mut source, &mut token_buffer)?;
 

	
 
        let module = Module{
 
            source,
 
            tokens: token_buffer,
 
            root_id: RootId::new_invalid(),
 
            name: None,
 
            version: None,
 
            phase: ModuleCompilationPhase::Tokenized,
 
        };
 
        self.modules.push(module);
 

	
 
        Ok(())
 
    }
 

	
 
    pub fn parse(&mut self) -> Result<(), ParseError> {
 
        let mut pass_ctx = PassCtx{
 
            heap: &mut self.heap,
 
            symbols: &mut self.symbol_table,
 
            pool: &mut self.string_pool,
 
            arch: &self.arch,
 
        };
 

	
 
        // Advance all modules to the phase where all symbols are scanned
 
        for module_idx in 0..self.modules.len() {
 
            self.pass_symbols.parse(&mut self.modules, module_idx, &mut pass_ctx)?;
 
        }
 

	
 
        // With all symbols scanned, perform further compilation until we can
 
        // add all base types to the type table.
 
        for module_idx in 0..self.modules.len() {
 
            self.pass_import.parse(&mut self.modules, module_idx, &mut pass_ctx)?;
 
            self.pass_definitions.parse(&mut self.modules, module_idx, &mut pass_ctx)?;
 
        }
 

	
 
        // Add every known type to the type table
 
        self.type_table.build_base_types(&mut self.modules, &mut pass_ctx)?;
 

	
 
        // Continue compilation with the remaining phases now that the types
 
        // are all in the type table
 
        for module_idx in 0..self.modules.len() {
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                modules: &mut self.modules,
 
                module_idx,
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
                arch: &self.arch,
 
            };
 
            self.pass_validation.visit_module(&mut ctx)?;
 
        }
 

	
 
        // Perform typechecking on all modules
 
        let mut queue = ResolveQueue::new();
 
        for module_idx in 0..self.modules.len() {
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                modules: &mut self.modules,
 
                module_idx,
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
                arch: &self.arch,
 
            };
 
            self.pass_typing.queue_module_definitions(&mut ctx, &mut queue);
 
        };
 
        while !queue.is_empty() {
 
            let top = queue.pop().unwrap();
 
            let top = queue.pop_front().unwrap();
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                modules: &mut self.modules,
 
                module_idx: top.root_id.index as usize,
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
                arch: &self.arch,
 
            };
 
            self.pass_typing.handle_module_definition(&mut ctx, &mut queue, top)?;
 
        }
 

	
 
        // Rewrite nodes in tree, then prepare for execution of code
 
        for module_idx in 0..self.modules.len() {
 
            self.modules[module_idx].phase = ModuleCompilationPhase::Typed;
 
            let mut ctx = visitor::Ctx{
 
                heap: &mut self.heap,
 
                modules: &mut self.modules,
 
                module_idx,
 
                symbols: &mut self.symbol_table,
 
                types: &mut self.type_table,
 
                arch: &self.arch,
 
            };
 
            self.pass_rewriting.visit_module(&mut ctx)?;
 
            self.pass_stack_size.visit_module(&mut ctx)?;
 
        }
 

	
 
        // Write out desired information
 
        if let Some(filename) = &self.write_ast_to {
 
            let mut writer = ASTWriter::new();
 
            let mut file = std::fs::File::create(std::path::Path::new(filename)).unwrap();
 
            writer.write_ast(&mut file, &self.heap);
 
        }
 

	
 
        Ok(())
 
    }
 
}
 

	
 
fn insert_builtin_type(type_table: &mut TypeTable, parts: Vec<ConcreteTypePart>, has_poly_var: bool, size: usize, alignment: usize) -> TypeId {
 
    const POLY_VARS: [PolymorphicVariable; 1] = [PolymorphicVariable{
 
        identifier: Identifier::new_empty(InputSpan::new()),
 
        is_in_use: false,
 
    }];
 

	
 
    let concrete_type = ConcreteType{ parts };
 
    let poly_var = if has_poly_var {
 
        POLY_VARS.as_slice()
 
    } else {
 
        &[]
 
    };
 

	
 
    return type_table.add_builtin_data_type(concrete_type, poly_var, size, alignment);
 
}
 

	
 
// Note: args and return type need to be a function because we need to know the function ID.
 
fn insert_builtin_function<T: Fn(ProcedureDefinitionId) -> (Vec<(&'static str, ParserType)>, ParserType)> (
 
    p: &mut Parser, func_name: &str, polymorphic: &[&str], arg_and_return_fn: T
 
) {
 
    // Insert into AST (to get an ID), also prepare the polymorphic variables
 
    // we need later for the type table
 
    let mut ast_poly_vars = Vec::with_capacity(polymorphic.len());
 
    let mut type_poly_vars = Vec::with_capacity(polymorphic.len());
 
    for poly_var in polymorphic {
 
        let identifier = Identifier{ span: InputSpan::new(), value: p.string_pool.intern(poly_var.as_bytes()) } ;
 
        ast_poly_vars.push(identifier.clone());
 
        type_poly_vars.push(PolymorphicVariable{ identifier, is_in_use: false });
 
    }
 

	
 
    let func_ident_ref = p.string_pool.intern(func_name.as_bytes());
 
    let procedure_id = p.heap.alloc_procedure_definition(|this| ProcedureDefinition {
 
        this,
 
        defined_in: RootId::new_invalid(),
 
        builtin: true,
 
        kind: ProcedureKind::Function,
 
        span: InputSpan::new(),
 
        identifier: Identifier{ span: InputSpan::new(), value: func_ident_ref.clone() },
 
        poly_vars: ast_poly_vars,
 
        return_type: None,
 
        parameters: Vec::new(),
 
        scope: ScopeId::new_invalid(),
 
        body: BlockStatementId::new_invalid(),
 
        monomorphs: Vec::new(),
 
    });
 

	
 
    // Modify AST with more information about the procedure
 
    let (arguments, return_type) = arg_and_return_fn(procedure_id);
 

	
 
    let mut parameters = Vec::with_capacity(arguments.len());
 
    for (arg_name, arg_type) in arguments {
 
        let identifier = Identifier{ span: InputSpan::new(), value: p.string_pool.intern(arg_name.as_bytes()) };
 
        let param_id = p.heap.alloc_variable(|this| Variable{
 
            this,
 
            kind: VariableKind::Parameter,
 
            parser_type: arg_type.clone(),
 
            identifier,
 
            relative_pos_in_parent: 0,
 
            unique_id_in_scope: 0
 
        });
 
        parameters.push(param_id);
 
    }
 

	
 
    let func = &mut p.heap[procedure_id];
 
    func.parameters = parameters;
 
    func.return_type = Some(return_type);
 

	
 
    // Insert into symbol table
 
    p.symbol_table.insert_symbol(SymbolScope::Global, Symbol{
 
        name: func_ident_ref,
 
        variant: SymbolVariant::Definition(SymbolDefinition{
 
            defined_in_module: RootId::new_invalid(),
 
            defined_in_scope: SymbolScope::Global,
 
            definition_span: InputSpan::new(),
 
            identifier_span: InputSpan::new(),
 
            imported_at: None,
 
            class: DefinitionClass::Function,
 
            definition_id: procedure_id.upcast(),
 
        })
 
    }).unwrap();
 

	
 
    // Insert into type table
 
    let mut concrete_type = ConcreteType::default();
 
    concrete_type.parts.push(ConcreteTypePart::Function(procedure_id, type_poly_vars.len() as u32));
 

	
 
    for _ in 0..type_poly_vars.len() {
 
        concrete_type.parts.push(ConcreteTypePart::Void); // doesn't matter (I hope...)
 
    }
 
    p.type_table.add_builtin_procedure_type(concrete_type, &type_poly_vars);
 
    // let mut concrete_type = ConcreteType::default();
 
    // concrete_type.parts.push(ConcreteTypePart::Function(procedure_id, type_poly_vars.len() as u32));
 
    //
 
    // for _ in 0..type_poly_vars.len() {
 
    //     concrete_type.parts.push(ConcreteTypePart::Void); // doesn't matter (I hope...)
 
    // }
 
    // p.type_table.add_builtin_procedure_type(concrete_type, &type_poly_vars);
 
}
 
\ No newline at end of file
src/protocol/parser/pass_typing.rs
Show inline comments
 
/// pass_typing
 
///
 
/// Performs type inference and type checking. Type inference is implemented by
 
/// applying constraints on (sub)trees of types. During this process the
 
/// resolver takes the `ParserType` structs (the representation of the types
 
/// written by the programmer), converts them to `InferenceType` structs (the
 
/// temporary data structure used during type inference) and attempts to arrive
 
/// at `ConcreteType` structs (the representation of a fully checked and
 
/// validated type).
 
///
 
/// The resolver will visit every statement and expression relevant to the
 
/// procedure and insert and determine its initial type based on context (e.g. a
 
/// return statement's expression must match the function's return type, an
 
/// if statement's test expression must evaluate to a boolean). When all are
 
/// visited we attempt to make progress in evaluating the types. Whenever a type
 
/// is progressed we queue the related expressions for further type progression.
 
/// Once no more expressions are in the queue the algorithm is finished. At this
 
/// point either all types are inferred (or can be trivially implicitly
 
/// determined), or we have incomplete types. In the latter case we return an
 
/// error.
 
///
 
/// TODO: Needs a thorough rewrite:
 
///  0. polymorph_progress is intentionally broken at the moment. Make it work
 
///     again and use a normal VecSomething.
 
///  1. The foundation for doing all of the work with predetermined indices
 
///     instead of with HashMaps is there, but it is not really used because of
 
///     time constraints. When time is available, rewrite the system such that
 
///     AST IDs are not needed, and only indices into arrays are used.
 
///  2. Remove the `msg` type?
 
///  3. Disallow certain types in certain operations (e.g. `Void`).
 

	
 
macro_rules! debug_log_enabled {
 
    () => { false };
 
}
 

	
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(false, "types", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(false, "types", $format, $($args),*);
 
    };
 
}
 

	
 
use std::collections::VecDeque;
 

	
 
use crate::collections::{ScopedBuffer, ScopedSection, DequeSet};
 
use crate::protocol::ast::*;
 
use crate::protocol::input_source::ParseError;
 
use crate::protocol::parser::ModuleCompilationPhase;
 
use crate::protocol::parser::type_table::*;
 
use crate::protocol::parser::token_parsing::*;
 
use super::visitor::{
 
    BUFFER_INIT_CAP_LARGE,
 
    BUFFER_INIT_CAP_SMALL,
 
    Ctx,
 
};
 

	
 
// -----------------------------------------------------------------------------
 
// Inference type
 
// -----------------------------------------------------------------------------
 

	
 
const VOID_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Void ];
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Message, InferenceTypePart::UInt8 ];
 
const BOOL_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Bool ];
 
const CHARACTER_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Character ];
 
const STRING_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::String, InferenceTypePart::Character ];
 
const NUMBERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::NumberLike ];
 
const INTEGERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::IntegerLike ];
 
const ARRAY_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Array, InferenceTypePart::Unknown ];
 
const SLICE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Slice, InferenceTypePart::Unknown ];
 
const ARRAYLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::ArrayLike, InferenceTypePart::Unknown ];
 

	
 
/// TODO: @performance Turn into PartialOrd+Ord to simplify checks
 
#[derive(Debug, Clone, Eq, PartialEq)]
 
pub(crate) enum InferenceTypePart {
 
    // When we infer types of AST elements that support polymorphic arguments,
 
    // then we might have the case that multiple embedded types depend on the
 
    // polymorphic type (e.g. func bla(T a, T[] b) -> T[][]). If we can infer
 
    // the type in one place (e.g. argument a), then we may propagate this
 
    // information to other types (e.g. argument b and the return type). For
 
    // this reason we place markers in the `InferenceType` instances such that
 
    // we know which part of the type was originally a polymorphic argument.
 
    Marker(u32),
 
    // Completely unknown type, needs to be inferred
 
    Unknown,
 
    // Partially known type, may be inferred to to be the appropriate related 
 
    // type.
 
    // IndexLike,      // index into array/slice
 
    NumberLike,     // any kind of integer/float
 
    IntegerLike,    // any kind of integer
 
    ArrayLike,      // array or slice. Note that this must have a subtype
 
    PortLike,       // input or output port
 
    // Special types that cannot be instantiated by the user
 
    Void, // For builtin functions that do not return anything
 
    // Concrete types without subtypes
 
    Bool,
 
    UInt8,
 
    UInt16,
 
    UInt32,
 
    UInt64,
 
    SInt8,
 
    SInt16,
 
    SInt32,
 
    SInt64,
 
    Character,
 
    String,
 
    // One subtype
 
    Message,
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // Tuple with any number of subtypes (for practical reasons 1 element is impossible)
 
    Tuple(u32),
 
    // A user-defined type with any number of subtypes
 
    Instance(DefinitionId, u32)
 
}
 

	
 
impl InferenceTypePart {
 
    fn is_marker(&self) -> bool {
 
        match self {
 
            InferenceTypePart::Marker(_) => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if the type is concrete, markers are interpreted as concrete
 
    /// types.
 
    fn is_concrete(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Unknown | ITP::NumberLike |
 
            ITP::IntegerLike | ITP::ArrayLike | ITP::PortLike => false,
 
            _ => true
 
        }
 
    }
 

	
 
    fn is_concrete_number(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_integer(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_arraylike(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Array | ITP::Slice | ITP::String | ITP::Message => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_port(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Input | ITP::Output => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if a part is less specific than the argument. Only checks for 
 
    /// single-part inference (i.e. not the replacement of an `Unknown` variant 
 
    /// with the argument)
 
    fn may_be_inferred_from(&self, arg: &InferenceTypePart) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        (*self == ITP::IntegerLike && arg.is_concrete_integer()) ||
 
        (*self == ITP::NumberLike && (arg.is_concrete_number() || *arg == ITP::IntegerLike)) ||
 
        (*self == ITP::ArrayLike && arg.is_concrete_arraylike()) ||
 
        (*self == ITP::PortLike && arg.is_concrete_port())
 
    }
 

	
 
    /// Checks if a part is more specific
 

	
 
    /// Returns the change in "iteration depth" when traversing this particular
 
    /// part. The iteration depth is used to traverse the tree in a linear 
 
    /// fashion. It is basically `number_of_subtypes - 1`
 
    fn depth_change(&self) -> i32 {
 
        use InferenceTypePart as ITP;
 
        match &self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike |
 
            ITP::Void | ITP::Bool |
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 |
 
            ITP::Character => {
 
                -1
 
            },
 
            ITP::Marker(_) |
 
            ITP::ArrayLike | ITP::Message | ITP::Array | ITP::Slice |
 
            ITP::PortLike | ITP::Input | ITP::Output | ITP::String => {
 
                // One subtype, so do not modify depth
 
                0
 
            },
 
            ITP::Tuple(num) | ITP::Instance(_, num) => {
 
                (*num as i32) - 1
 
            }
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
struct InferenceType {
 
    has_marker: bool,
 
    is_done: bool,
 
    parts: Vec<InferenceTypePart>,
 
}
 

	
 
impl InferenceType {
 
    /// Generates a new InferenceType. The two boolean flags will be checked in
 
    /// debug mode.
 
    fn new(has_marker: bool, is_done: bool, parts: Vec<InferenceTypePart>) -> Self {
 
        dbg_code!({
 
            debug_assert!(!parts.is_empty());
 
            let parts_body_marker = parts.iter().any(|v| v.is_marker());
 
            debug_assert_eq!(has_marker, parts_body_marker);
 
            let parts_done = parts.iter().all(|v| v.is_concrete());
 
            debug_assert_eq!(is_done, parts_done, "{:?}", parts);
 
        });
 
        Self{ has_marker, is_done, parts }
 
    }
 

	
 
    /// Replaces a type subtree with the provided subtree. The caller must make
 
    /// sure the the replacement is a well formed type subtree.
 
    fn replace_subtree(&mut self, start_idx: usize, with: &[InferenceTypePart]) {
 
        let end_idx = Self::find_subtree_end_idx(&self.parts, start_idx);
 
        debug_assert_eq!(with.len(), Self::find_subtree_end_idx(with, 0));
 
@@ -631,710 +633,710 @@ impl InferenceType {
 
            },
 
            ITP::Unknown => buffer.push_str("?"),
 
            ITP::NumberLike => buffer.push_str("numberlike"),
 
            ITP::IntegerLike => buffer.push_str("integerlike"),
 
            ITP::ArrayLike => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[?]");
 
            },
 
            ITP::PortLike => {
 
                buffer.push_str("portlike<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            }
 
            ITP::Void => buffer.push_str("void"),
 
            ITP::Bool => buffer.push_str(KW_TYPE_BOOL_STR),
 
            ITP::UInt8 => buffer.push_str(KW_TYPE_UINT8_STR),
 
            ITP::UInt16 => buffer.push_str(KW_TYPE_UINT16_STR),
 
            ITP::UInt32 => buffer.push_str(KW_TYPE_UINT32_STR),
 
            ITP::UInt64 => buffer.push_str(KW_TYPE_UINT64_STR),
 
            ITP::SInt8 => buffer.push_str(KW_TYPE_SINT8_STR),
 
            ITP::SInt16 => buffer.push_str(KW_TYPE_SINT16_STR),
 
            ITP::SInt32 => buffer.push_str(KW_TYPE_SINT32_STR),
 
            ITP::SInt64 => buffer.push_str(KW_TYPE_SINT64_STR),
 
            ITP::Character => buffer.push_str(KW_TYPE_CHAR_STR),
 
            ITP::String => {
 
                buffer.push_str(KW_TYPE_STRING_STR);
 
                idx += 1; // skip the 'char' subtype
 
            },
 
            ITP::Message => {
 
                buffer.push_str(KW_TYPE_MESSAGE_STR);
 
                buffer.push('<');
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Array => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[]");
 
            },
 
            ITP::Slice => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[..]");
 
            },
 
            ITP::Input => {
 
                buffer.push_str(KW_TYPE_IN_PORT_STR);
 
                buffer.push('<');
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Output => {
 
                buffer.push_str(KW_TYPE_OUT_PORT_STR);
 
                buffer.push('<');
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Tuple(num_sub) => {
 
                buffer.push('(');
 
                if *num_sub > 0 {
 
                    idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    for _sub_idx in 1..*num_sub {
 
                        buffer.push_str(", ");
 
                        idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    }
 
                }
 
                buffer.push(')');
 
            }
 
            ITP::Instance(definition_id, num_sub) => {
 
                let definition = &heap[*definition_id];
 
                buffer.push_str(definition.identifier().value.as_str());
 
                if *num_sub > 0 {
 
                    buffer.push('<');
 
                    idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    for _sub_idx in 1..*num_sub {
 
                        buffer.push_str(", ");
 
                        idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    }
 
                    buffer.push('>');
 
                }
 
            },
 
        }
 

	
 
        idx
 
    }
 

	
 
    /// Returns the display name of a (part of) the type tree. Will allocate a
 
    /// string.
 
    fn partial_display_name(heap: &Heap, parts: &[InferenceTypePart]) -> String {
 
        let mut buffer = String::with_capacity(parts.len() * 6);
 
        Self::write_display_name(&mut buffer, heap, parts, 0);
 
        buffer
 
    }
 

	
 
    /// Returns the display name of the full type tree. Will allocate a string.
 
    fn display_name(&self, heap: &Heap) -> String {
 
        Self::partial_display_name(heap, &self.parts)
 
    }
 
}
 

	
 
impl Default for InferenceType {
 
    fn default() -> Self {
 
        Self{
 
            has_marker: false,
 
            is_done: false,
 
            parts: Vec::new(),
 
        }
 
    }
 
}
 

	
 
/// Iterator over the subtrees that follow a marker in an `InferenceType`
 
/// instance. Returns immutable slices over the internal parts
 
struct InferenceTypeMarkerIter<'a> {
 
    parts: &'a [InferenceTypePart],
 
    idx: usize,
 
}
 

	
 
impl<'a> InferenceTypeMarkerIter<'a> {
 
    fn new(parts: &'a [InferenceTypePart]) -> Self {
 
        Self{ parts, idx: 0 }
 
    }
 
}
 

	
 
impl<'a> Iterator for InferenceTypeMarkerIter<'a> {
 
    type Item = (u32, &'a [InferenceTypePart]);
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        // Iterate until we find a marker
 
        while self.idx < self.parts.len() {
 
            if let InferenceTypePart::Marker(marker) = self.parts[self.idx] {
 
                // Found a marker, find the subtree end
 
                let start_idx = self.idx + 1;
 
                let end_idx = InferenceType::find_subtree_end_idx(self.parts, start_idx);
 

	
 
                // Modify internal index, then return items
 
                self.idx = end_idx;
 
                return Some((marker, &self.parts[start_idx..end_idx]));
 
            }
 

	
 
            self.idx += 1;
 
        }
 

	
 
        None
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum DualInferenceResult {
 
    Neither,        // neither argument is clarified
 
    First,          // first argument is clarified using the second one
 
    Second,         // second argument is clarified using the first one
 
    Both,           // both arguments are clarified
 
    Incompatible,   // types are incompatible: programmer error
 
}
 

	
 
impl DualInferenceResult {
 
    fn modified_lhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::First | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
    fn modified_rhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::Second | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum SingleInferenceResult {
 
    Unmodified,
 
    Modified,
 
    Incompatible
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// PassTyping - Public Interface
 
// -----------------------------------------------------------------------------
 

	
 
type InferNodeIndex = usize;
 
type PolyDataIndex = isize;
 
type VarDataIndex = usize;
 

	
 
pub(crate) struct ResolveQueueElement {
 
    // Note that using the `definition_id` and the `monomorph_idx` one may
 
    // query the type table for the full procedure type, thereby retrieving
 
    // the polymorphic arguments to the procedure.
 
    pub(crate) root_id: RootId,
 
    pub(crate) definition_id: DefinitionId,
 
    pub(crate) reserved_type_id: TypeId,
 
    pub(crate) reserved_monomorph_index: u32,
 
}
 

	
 
pub(crate) type ResolveQueue = Vec<ResolveQueueElement>;
 
pub(crate) type ResolveQueue = VecDeque<ResolveQueueElement>;
 

	
 
struct InferenceNode {
 
    // filled in during type inference
 
    expr_type: InferenceType,               // result type from expression
 
    expr_id: ExpressionId,                  // expression that is evaluated
 
    inference_rule: InferenceRule,          // rule used to infer node type
 
    parent_index: Option<InferNodeIndex>,   // parent of inference node
 
    field_index: i32,                       // index of struct field or tuple member
 
    poly_data_index: PolyDataIndex,         // index to inference data for polymorphic types
 
    // filled in once type inference is done
 
    info_type_id: TypeId,
 
    info_variant: ExpressionInfoVariant,
 
}
 

	
 
impl InferenceNode {
 
    #[inline]
 
    fn as_expression_info(&self) -> ExpressionInfo {
 
        return ExpressionInfo {
 
            type_id: self.info_type_id,
 
            variant: self.info_variant
 
        }
 
    }
 
}
 

	
 
/// Inferencing rule to apply. Some of these are reasonably generic. Other ones
 
/// require so much custom logic that we'll not try to come up with an
 
/// abstraction.
 
enum InferenceRule {
 
    Noop,
 
    MonoTemplate(InferenceRuleTemplate),
 
    BiEqual(InferenceRuleBiEqual),
 
    TriEqualArgs(InferenceRuleTriEqualArgs),
 
    TriEqualAll(InferenceRuleTriEqualAll),
 
    Concatenate(InferenceRuleTwoArgs),
 
    IndexingExpr(InferenceRuleIndexingExpr),
 
    SlicingExpr(InferenceRuleSlicingExpr),
 
    SelectStructField(InferenceRuleSelectStructField),
 
    SelectTupleMember(InferenceRuleSelectTupleMember),
 
    LiteralStruct(InferenceRuleLiteralStruct),
 
    LiteralEnum,
 
    LiteralUnion(InferenceRuleLiteralUnion),
 
    LiteralArray(InferenceRuleLiteralArray),
 
    LiteralTuple(InferenceRuleLiteralTuple),
 
    CastExpr(InferenceRuleCastExpr),
 
    CallExpr(InferenceRuleCallExpr),
 
    VariableExpr(InferenceRuleVariableExpr),
 
}
 

	
 
impl InferenceRule {
 
    union_cast_method_impl!(as_mono_template, InferenceRuleTemplate, InferenceRule::MonoTemplate);
 
    union_cast_method_impl!(as_bi_equal, InferenceRuleBiEqual, InferenceRule::BiEqual);
 
    union_cast_method_impl!(as_tri_equal_args, InferenceRuleTriEqualArgs, InferenceRule::TriEqualArgs);
 
    union_cast_method_impl!(as_tri_equal_all, InferenceRuleTriEqualAll, InferenceRule::TriEqualAll);
 
    union_cast_method_impl!(as_concatenate, InferenceRuleTwoArgs, InferenceRule::Concatenate);
 
    union_cast_method_impl!(as_indexing_expr, InferenceRuleIndexingExpr, InferenceRule::IndexingExpr);
 
    union_cast_method_impl!(as_slicing_expr, InferenceRuleSlicingExpr, InferenceRule::SlicingExpr);
 
    union_cast_method_impl!(as_select_struct_field, InferenceRuleSelectStructField, InferenceRule::SelectStructField);
 
    union_cast_method_impl!(as_select_tuple_member, InferenceRuleSelectTupleMember, InferenceRule::SelectTupleMember);
 
    union_cast_method_impl!(as_literal_struct, InferenceRuleLiteralStruct, InferenceRule::LiteralStruct);
 
    union_cast_method_impl!(as_literal_union, InferenceRuleLiteralUnion, InferenceRule::LiteralUnion);
 
    union_cast_method_impl!(as_literal_array, InferenceRuleLiteralArray, InferenceRule::LiteralArray);
 
    union_cast_method_impl!(as_literal_tuple, InferenceRuleLiteralTuple, InferenceRule::LiteralTuple);
 
    union_cast_method_impl!(as_cast_expr, InferenceRuleCastExpr, InferenceRule::CastExpr);
 
    union_cast_method_impl!(as_call_expr, InferenceRuleCallExpr, InferenceRule::CallExpr);
 
    union_cast_method_impl!(as_variable_expr, InferenceRuleVariableExpr, InferenceRule::VariableExpr);
 
}
 

	
 
// Note: InferenceRuleTemplate is `Copy`, so don't add dynamically allocated
 
// members in the future (or review places where this struct is copied)
 
#[derive(Clone, Copy)]
 
struct InferenceRuleTemplate {
 
    template: &'static [InferenceTypePart],
 
    application: InferenceRuleTemplateApplication,
 
}
 

	
 
impl InferenceRuleTemplate {
 
    fn new_none() -> Self {
 
        return Self{
 
            template: &[],
 
            application: InferenceRuleTemplateApplication::None,
 
        };
 
    }
 

	
 
    fn new_forced(template: &'static [InferenceTypePart]) -> Self {
 
        return Self{
 
            template,
 
            application: InferenceRuleTemplateApplication::Forced,
 
        };
 
    }
 

	
 
    fn new_template(template: &'static [InferenceTypePart]) -> Self {
 
        return Self{
 
            template,
 
            application: InferenceRuleTemplateApplication::Template,
 
        }
 
    }
 
}
 

	
 
#[derive(Clone, Copy)]
 
enum InferenceRuleTemplateApplication {
 
    None, // do not apply template, silly, but saves some bytes
 
    Forced,
 
    Template,
 
}
 

	
 
/// Type equality applied to 'self' and the argument. An optional template will
 
/// be applied to 'self' first. Example: "bitwise not"
 
struct InferenceRuleBiEqual {
 
    template: InferenceRuleTemplate,
 
    argument_index: InferNodeIndex,
 
}
 

	
 
/// Type equality applied to two arguments. Template can be applied to 'self'
 
/// (generally forced, since this rule does not apply a type equality constraint
 
/// to 'self') and the two arguments. Example: "equality operator"
 
struct InferenceRuleTriEqualArgs {
 
    argument_template: InferenceRuleTemplate,
 
    result_template: InferenceRuleTemplate,
 
    argument1_index: InferNodeIndex,
 
    argument2_index: InferNodeIndex,
 
}
 

	
 
/// Type equality applied to 'self' and two arguments. Template may be
 
/// optionally applied to 'self'. Example: "addition operator"
 
struct InferenceRuleTriEqualAll {
 
    template: InferenceRuleTemplate,
 
    argument1_index: InferNodeIndex,
 
    argument2_index: InferNodeIndex,
 
}
 

	
 
/// Information for an inference rule that is applied to 'self' and two
 
/// arguments, see `InferenceRule` for its meaning.
 
struct InferenceRuleTwoArgs {
 
    argument1_index: InferNodeIndex,
 
    argument2_index: InferNodeIndex,
 
}
 

	
 
struct InferenceRuleIndexingExpr {
 
    subject_index: InferNodeIndex,
 
    index_index: InferNodeIndex,
 
}
 

	
 
struct InferenceRuleSlicingExpr {
 
    subject_index: InferNodeIndex,
 
    from_index: InferNodeIndex,
 
    to_index: InferNodeIndex,
 
}
 

	
 
struct InferenceRuleSelectStructField {
 
    subject_index: InferNodeIndex,
 
    selected_field: Identifier,
 
}
 

	
 
struct InferenceRuleSelectTupleMember {
 
    subject_index: InferNodeIndex,
 
    selected_index: u64,
 
}
 

	
 
struct InferenceRuleLiteralStruct {
 
    element_indices: Vec<InferNodeIndex>,
 
}
 

	
 
struct InferenceRuleLiteralUnion {
 
    element_indices: Vec<InferNodeIndex>
 
}
 

	
 
struct InferenceRuleLiteralArray {
 
    element_indices: Vec<InferNodeIndex>
 
}
 

	
 
struct InferenceRuleLiteralTuple {
 
    element_indices: Vec<InferNodeIndex>
 
}
 

	
 
struct InferenceRuleCastExpr {
 
    subject_index: InferNodeIndex,
 
}
 

	
 
struct InferenceRuleCallExpr {
 
    argument_indices: Vec<InferNodeIndex>
 
}
 

	
 
/// Data associated with a variable expression: an expression that reads the
 
/// value from a variable.
 
struct InferenceRuleVariableExpr {
 
    var_data_index: VarDataIndex, // shared variable information
 
}
 

	
 
/// This particular visitor will recurse depth-first into the AST and ensures
 
/// that all expressions have the appropriate types.
 
pub(crate) struct PassTyping {
 
    // Current definition we're typechecking.
 
    reserved_type_id: TypeId,
 
    reserved_monomorph_index: u32,
 
    procedure_id: ProcedureDefinitionId,
 
    procedure_kind: ProcedureKind,
 
    poly_vars: Vec<ConcreteType>,
 
    // Temporary variables during construction of inference rulesr
 
    parent_index: Option<InferNodeIndex>,
 
    // Buffers for iteration over various types
 
    var_buffer: ScopedBuffer<VariableId>,
 
    expr_buffer: ScopedBuffer<ExpressionId>,
 
    stmt_buffer: ScopedBuffer<StatementId>,
 
    bool_buffer: ScopedBuffer<bool>,
 
    index_buffer: ScopedBuffer<usize>,
 
    definition_buffer: ScopedBuffer<DefinitionId>,
 
    poly_progress_buffer: ScopedBuffer<u32>,
 
    // Mapping from parser type to inferred type. We attempt to continue to
 
    // specify these types until we're stuck or we've fully determined the type.
 
    infer_nodes: Vec<InferenceNode>,                     // will be transferred to type table at end
 
    poly_data: Vec<PolyData>,       // data for polymorph inference
 
    var_data: Vec<VarData>,
 
    // Keeping track of which expressions need to be reinferred because the
 
    // expressions they're linked to made progression on an associated type
 
    node_queued: DequeSet<InferNodeIndex>,
 
}
 

	
 
/// Generic struct that is used to store inferred types associated with
 
/// polymorphic types.
 
struct PolyData {
 
    first_rule_application: bool,
 
    definition_id: DefinitionId, // the definition, only used for user feedback
 
    /// Inferred types of the polymorphic variables as they are written down
 
    /// at the type's definition.
 
    poly_vars: Vec<InferenceType>,
 
    expr_types: PolyDataTypes,
 
}
 

	
 
// silly structure, just so we can use `PolyDataTypeIndex` ergonomically while
 
// making sure we're still capable of borrowing from `poly_vars`.
 
struct PolyDataTypes {
 
    /// Inferred types of associated types (e.g. struct fields, tuple members,
 
    /// function arguments). These types may depend on the polymorphic variables
 
    /// defined above.
 
    associated: Vec<InferenceType>,
 
    /// Inferred "returned" type (e.g. if a struct field is selected, then this
 
    /// contains the type of the selected field, for a function call it contains
 
    /// the return type). May depend on the polymorphic variables defined above.
 
    returned: InferenceType,
 
}
 

	
 
#[derive(Clone, Copy)]
 
enum PolyDataTypeIndex {
 
    Associated(usize), // indexes into `PolyData.associated`
 
    Returned,
 
}
 

	
 
impl PolyDataTypes {
 
    fn get_type(&self, index: PolyDataTypeIndex) -> &InferenceType {
 
        match index {
 
            PolyDataTypeIndex::Associated(index) => return &self.associated[index],
 
            PolyDataTypeIndex::Returned => return &self.returned,
 
        }
 
    }
 

	
 
    fn get_type_mut(&mut self, index: PolyDataTypeIndex) -> &mut InferenceType {
 
        match index {
 
            PolyDataTypeIndex::Associated(index) => return &mut self.associated[index],
 
            PolyDataTypeIndex::Returned => return &mut self.returned,
 
        }
 
    }
 
}
 

	
 
struct VarData {
 
    var_id: VariableId,
 
    var_type: InferenceType,
 
    used_at: Vec<InferNodeIndex>, // of variable expressions
 
    linked_var: Option<VarDataIndex>,
 
}
 

	
 
impl PassTyping {
 
    pub(crate) fn new() -> Self {
 
        PassTyping {
 
            reserved_type_id: TypeId::new_invalid(),
 
            reserved_monomorph_index: u32::MAX,
 
            procedure_id: ProcedureDefinitionId::new_invalid(),
 
            procedure_kind: ProcedureKind::Function,
 
            poly_vars: Vec::new(),
 
            parent_index: None,
 
            var_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            expr_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            stmt_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            bool_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            index_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            definition_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            poly_progress_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            infer_nodes: Vec::with_capacity(BUFFER_INIT_CAP_LARGE),
 
            poly_data: Vec::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            var_data: Vec::with_capacity(BUFFER_INIT_CAP_SMALL),
 
            node_queued: DequeSet::new(),
 
        }
 
    }
 

	
 
    pub(crate) fn queue_module_definitions(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) {
 
        debug_assert_eq!(ctx.module().phase, ModuleCompilationPhase::ValidatedAndLinked);
 
        let root_id = ctx.module().root_id;
 
        let root = &ctx.heap.protocol_descriptions[root_id];
 
        let definitions_section = self.definition_buffer.start_section_initialized(&root.definitions);
 

	
 
        for definition_id in definitions_section.iter_copied() {
 
            let definition = &ctx.heap[definition_id];
 

	
 
            let first_concrete_part_and_procedure_id = match definition {
 
                Definition::Procedure(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        if definition.kind == ProcedureKind::Function {
 
                            Some((ConcreteTypePart::Function(definition.this, 0), definition.this))
 
                        } else {
 
                            Some((ConcreteTypePart::Component(definition.this, 0), definition.this))
 
                        }
 
                    } else {
 
                        None
 
                    }
 
                }
 
                Definition::Enum(_) | Definition::Struct(_) | Definition::Union(_) => None,
 
            };
 

	
 
            if let Some((first_concrete_part, procedure_id)) = first_concrete_part_and_procedure_id {
 
                let procedure = &mut ctx.heap[procedure_id];
 
                let monomorph_index = procedure.monomorphs.len() as u32;
 
                procedure.monomorphs.push(ProcedureDefinitionMonomorph::new_invalid());
 

	
 
                let concrete_type = ConcreteType{ parts: vec![first_concrete_part] };
 
                let type_id = ctx.types.reserve_procedure_monomorph_type_id(&definition_id, concrete_type, monomorph_index);
 
                queue.push(ResolveQueueElement{
 
                queue.push_back(ResolveQueueElement{
 
                    root_id,
 
                    definition_id,
 
                    reserved_type_id: type_id,
 
                    reserved_monomorph_index: monomorph_index,
 
                })
 
            }
 
        }
 

	
 
        definitions_section.forget();
 
    }
 

	
 
    pub(crate) fn handle_module_definition(
 
        &mut self, ctx: &mut Ctx, queue: &mut ResolveQueue, element: ResolveQueueElement
 
    ) -> VisitorResult {
 
        self.reset();
 
        debug_assert_eq!(ctx.module().root_id, element.root_id);
 
        debug_assert!(self.poly_vars.is_empty());
 

	
 
        // Prepare for visiting the definition
 
        self.reserved_type_id = element.reserved_type_id;
 
        self.reserved_monomorph_index = element.reserved_monomorph_index;
 

	
 
        let proc_base = ctx.types.get_base_definition(&element.definition_id).unwrap();
 
        if proc_base.is_polymorph {
 
            let monomorph = ctx.types.get_monomorph(element.reserved_type_id);
 
            for poly_arg in monomorph.concrete_type.embedded_iter(0) {
 
                self.poly_vars.push(ConcreteType{ parts: Vec::from(poly_arg) });
 
            }
 
        }
 

	
 
        // Visit the definition, setting up the type resolving process, then
 
        // (attempt to) resolve all types
 
        self.visit_definition(ctx, element.definition_id)?;
 
        self.resolve_types(ctx, queue)?;
 
        Ok(())
 
    }
 

	
 
    fn reset(&mut self) {
 
        self.reserved_type_id = TypeId::new_invalid();
 
        self.procedure_id = ProcedureDefinitionId::new_invalid();
 
        self.procedure_kind = ProcedureKind::Function;
 
        self.poly_vars.clear();
 
        self.parent_index = None;
 

	
 
        self.infer_nodes.clear();
 
        self.poly_data.clear();
 
        self.var_data.clear();
 
        self.node_queued.clear();
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// PassTyping - Visitor-like implementation
 
// -----------------------------------------------------------------------------
 

	
 
type VisitorResult = Result<(), ParseError>;
 
type VisitExprResult = Result<InferNodeIndex, ParseError>;
 

	
 
impl PassTyping {
 
    // Definitions
 

	
 
    fn visit_definition(&mut self, ctx: &mut Ctx, id: DefinitionId) -> VisitorResult {
 
        return visitor_recursive_definition_impl!(self, &ctx.heap[id], ctx);
 
    }
 

	
 
    fn visit_enum_definition(&mut self, _: &mut Ctx, _: EnumDefinitionId) -> VisitorResult { return Ok(()) }
 
    fn visit_struct_definition(&mut self, _: &mut Ctx, _: StructDefinitionId) -> VisitorResult { return Ok(()) }
 
    fn visit_union_definition(&mut self, _: &mut Ctx, _: UnionDefinitionId) -> VisitorResult { return Ok(()) }
 

	
 
    fn visit_procedure_definition(&mut self, ctx: &mut Ctx, id: ProcedureDefinitionId) -> VisitorResult {
 
        let procedure_def = &ctx.heap[id];
 

	
 
        self.procedure_id = id;
 
        self.procedure_kind = procedure_def.kind;
 
        let body_id = procedure_def.body;
 

	
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting procedure: '{}' (id: {}, kind: {:?})", procedure_def.identifier.value.as_str(), id.0.index, procedure_def.kind);
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        // Visit parameters
 
        let section = self.var_buffer.start_section_initialized(procedure_def.parameters.as_slice());
 
        for param_id in section.iter_copied() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, true);
 
            debug_assert!(var_type.is_done, "expected function arguments to be concrete types");
 
            self.var_data.push(VarData{
 
                var_id: param_id,
 
                var_type,
 
                used_at: Vec::new(),
 
                linked_var: None
 
            })
 
        }
 
        section.forget();
 

	
 
        // Visit all of the expressions within the body
 
        self.parent_index = None;
 
        return self.visit_block_stmt(ctx, body_id);
 
    }
 

	
 
    // Statements
 

	
 
    fn visit_stmt(&mut self, ctx: &mut Ctx, id: StatementId) -> VisitorResult {
 
        return visitor_recursive_statement_impl!(self, &ctx.heap[id], ctx, Ok(()));
 
    }
 

	
 
    fn visit_block_stmt(&mut self, ctx: &mut Ctx, id: BlockStatementId) -> VisitorResult {
 
        // Transfer statements for traversal
 
        let block = &ctx.heap[id];
 

	
 
        let section = self.stmt_buffer.start_section_initialized(block.statements.as_slice());
 
        for stmt_id in section.iter_copied() {
 
            self.visit_stmt(ctx, stmt_id)?;
 
        }
 
        section.forget();
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_stmt(&mut self, ctx: &mut Ctx, id: LocalStatementId) -> VisitorResult {
 
        return visitor_recursive_local_impl!(self, &ctx.heap[id], ctx);
 
    }
 

	
 
    fn visit_local_memory_stmt(&mut self, ctx: &mut Ctx, id: MemoryStatementId) -> VisitorResult {
 
        let memory_stmt = &ctx.heap[id];
 
        let initial_expr_id = memory_stmt.initial_expr;
 

	
 
        let local = &ctx.heap[memory_stmt.variable];
 
        let var_type = self.determine_inference_type_from_parser_type_elements(&local.parser_type.elements, true);
 
        self.var_data.push(VarData{
 
            var_id: memory_stmt.variable,
 
            var_type,
 
            used_at: Vec::new(),
 
            linked_var: None,
 
        });
 

	
 
        // Process the initial value
 
        self.visit_assignment_expr(ctx, initial_expr_id)?;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_channel_stmt(&mut self, ctx: &mut Ctx, id: ChannelStatementId) -> VisitorResult {
 
        let channel_stmt = &ctx.heap[id];
 

	
 
        let from_var_index = self.var_data.len() as VarDataIndex;
 
        let to_var_index = from_var_index + 1;
 

	
 
        let from_local = &ctx.heap[channel_stmt.from];
 
        let from_var_type = self.determine_inference_type_from_parser_type_elements(&from_local.parser_type.elements, true);
 
        self.var_data.push(VarData{
 
            var_id: channel_stmt.from,
 
            var_type: from_var_type,
 
            used_at: Vec::new(),
 
            linked_var: Some(to_var_index),
 
        });
 

	
 
        let to_local = &ctx.heap[channel_stmt.to];
 
        let to_var_type = self.determine_inference_type_from_parser_type_elements(&to_local.parser_type.elements, true);
 
        self.var_data.push(VarData{
 
            var_id: channel_stmt.to,
 
            var_type: to_var_type,
 
            used_at: Vec::new(),
 
            linked_var: Some(from_var_index),
 
        });
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_labeled_stmt(&mut self, ctx: &mut Ctx, id: LabeledStatementId) -> VisitorResult {
 
        let labeled_stmt = &ctx.heap[id];
 
        let substmt_id = labeled_stmt.body;
 
        self.visit_stmt(ctx, substmt_id)
 
    }
 

	
 
    fn visit_if_stmt(&mut self, ctx: &mut Ctx, id: IfStatementId) -> VisitorResult {
 
        let if_stmt = &ctx.heap[id];
 

	
 
        let true_body_case = if_stmt.true_case;
 
        let false_body_case = if_stmt.false_case;
 
        let test_expr_id = if_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_stmt(ctx, true_body_case.body)?;
 
        if let Some(false_body_case) = false_body_case {
 
            self.visit_stmt(ctx, false_body_case.body)?;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_while_stmt(&mut self, ctx: &mut Ctx, id: WhileStatementId) -> VisitorResult {
 
@@ -1882,415 +1884,425 @@ impl PassTyping {
 

	
 
    fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitExprResult {
 
        let upcast_id = id.upcast();
 
        let self_index = self.insert_initial_inference_node(ctx, upcast_id)?;
 

	
 
        let var_expr = &ctx.heap[id];
 
        debug_assert!(var_expr.declaration.is_some());
 
        let old_parent = self.parent_index.replace(self_index);
 

	
 
        let declaration = &ctx.heap[var_expr.declaration.unwrap()];
 
        let mut var_data_index = None;
 
        for (index, var_data) in self.var_data.iter().enumerate() {
 
            if var_data.var_id == declaration.this {
 
                var_data_index = Some(index);
 
                break;
 
            }
 
        }
 

	
 
        let var_data_index = if let Some(var_data_index) = var_data_index {
 
            let var_data = &mut self.var_data[var_data_index];
 
            var_data.used_at.push(self_index);
 

	
 
            var_data_index
 
        } else {
 
            // If we're in a binding expression then it might the first time we
 
            // encounter the variable, so add a `VarData` entry.
 
            debug_assert_eq!(declaration.kind, VariableKind::Binding);
 
            let var_type = self.determine_inference_type_from_parser_type_elements(
 
                &declaration.parser_type.elements, true
 
            );
 
            let var_data_index = self.var_data.len();
 
            self.var_data.push(VarData{
 
                var_id: declaration.this,
 
                var_type,
 
                used_at: vec![self_index],
 
                linked_var: None,
 
            });
 

	
 
            var_data_index
 
        };
 

	
 
        let node = &mut self.infer_nodes[self_index];
 
        node.inference_rule = InferenceRule::VariableExpr(InferenceRuleVariableExpr{
 
            var_data_index,
 
        });
 

	
 
        self.parent_index = old_parent;
 
        self.progress_inference_rule_variable_expr(ctx, self_index)?;
 
        return Ok(self_index);
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// PassTyping - Type-inference progression
 
// -----------------------------------------------------------------------------
 

	
 
impl PassTyping {
 
    #[allow(dead_code)] // used when debug flag at the top of this file is true.
 
    fn debug_get_display_name(&self, ctx: &Ctx, node_index: InferNodeIndex) -> String {
 
        let expr_type = &self.infer_nodes[node_index].expr_type;
 
        expr_type.display_name(&ctx.heap)
 
    }
 

	
 
    fn resolve_types(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) -> Result<(), ParseError> {
 
        // Keep inferring until we can no longer make any progress
 
        while !self.node_queued.is_empty() {
 
            while !self.node_queued.is_empty() {
 
                let node_index = self.node_queued.pop_front().unwrap();
 
                self.progress_inference_rule(ctx, node_index)?;
 
            }
 

	
 
            // Nothing is queued anymore. However we might have integer literals
 
            // whose type cannot be inferred. For convenience's sake we'll
 
            // infer these to be s32.
 
            for (infer_node_index, infer_node) in self.infer_nodes.iter_mut().enumerate() {
 
                let expr_type = &mut infer_node.expr_type;
 
                if !expr_type.is_done && expr_type.parts.len() == 1 && expr_type.parts[0] == InferenceTypePart::IntegerLike {
 
                    // Force integer type to s32
 
                    expr_type.parts[0] = InferenceTypePart::SInt32;
 
                    expr_type.is_done = true;
 

	
 
                    // Requeue expression (and its parent, if it exists)
 
                    self.node_queued.push_back(infer_node_index);
 
                    if let Some(node_parent_index) = infer_node.parent_index {
 
                        self.node_queued.push_back(node_parent_index);
 
                    }
 
                }
 
            }
 
        }
 

	
 
        // Helper for transferring polymorphic variables to concrete types and
 
        // checking if they're completely specified
 
        fn poly_data_type_to_concrete_type(
 
            ctx: &Ctx, expr_id: ExpressionId, inference_poly_args: &Vec<InferenceType>,
 
            first_concrete_part: ConcreteTypePart,
 
        ) -> Result<ConcreteType, ParseError> {
 
            // Prepare storage vector
 
            let mut num_inference_parts = 0;
 
            for inference_type in inference_poly_args {
 
                num_inference_parts += inference_type.parts.len();
 
            }
 

	
 
            let mut concrete_type = ConcreteType{
 
                parts: Vec::with_capacity(1 + num_inference_parts),
 
            };
 
            concrete_type.parts.push(first_concrete_part);
 

	
 
            // Go through all polymorphic arguments and add them to the concrete
 
            // types.
 
            for (poly_idx, poly_type) in inference_poly_args.iter().enumerate() {
 
                if !poly_type.is_done {
 
                    let expr = &ctx.heap[expr_id];
 
                    let definition = match expr {
 
                        Expression::Call(expr) => expr.procedure.upcast(),
 
                        Expression::Literal(expr) => match &expr.value {
 
                            Literal::Enum(lit) => lit.definition,
 
                            Literal::Union(lit) => lit.definition,
 
                            Literal::Struct(lit) => lit.definition,
 
                            _ => unreachable!()
 
                        },
 
                        _ => unreachable!(),
 
                    };
 
                    let poly_vars = ctx.heap[definition].poly_vars();
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module().source, expr.operation_span(), format!(
 
                            "could not fully infer the type of polymorphic variable '{}' of this expression (got '{}')",
 
                            poly_vars[poly_idx].value.as_str(), poly_type.display_name(&ctx.heap)
 
                        )
 
                    ));
 
                }
 

	
 
                poly_type.write_concrete_type(&mut concrete_type);
 
            }
 

	
 
            Ok(concrete_type)
 
        }
 

	
 
        // Every expression checked, and new monomorphs are queued. Transfer the
 
        // expression information to the AST. If this is the first time we're
 
        // visiting this procedure then we assign expression indices as well.
 
        let procedure = &ctx.heap[self.procedure_id];
 
        let num_infer_nodes = self.infer_nodes.len();
 
        let mut monomorph = ProcedureDefinitionMonomorph{
 
            argument_types: Vec::with_capacity(procedure.parameters.len()),
 
            expr_info: Vec::with_capacity(num_infer_nodes),
 
        };
 

	
 
        // For all of the expressions look up the TypeId (or create a new one).
 
        // For function calls and component instantiations figure out if they
 
        // need to be typechecked
 
        for infer_node in self.infer_nodes.iter_mut() {
 
            // Determine type ID
 
            let expr = &ctx.heap[infer_node.expr_id];
 

	
 
            // TODO: Maybe optimize? Split insertion up into lookup, then clone
 
            //  if needed?
 
            let mut concrete_type = ConcreteType::default();
 
            infer_node.expr_type.write_concrete_type(&mut concrete_type);
 
            let info_type_id = ctx.types.add_monomorphed_type(ctx.modules, ctx.heap, ctx.arch, concrete_type)?;
 

	
 
            // Determine procedure type ID, i.e. a called/instantiated
 
            // procedure's signature.
 
            let info_variant = if let Expression::Call(expr) = expr {
 
                // Construct full function type. If not yet typechecked then
 
                // queue it for typechecking.
 
                let poly_data = &self.poly_data[infer_node.poly_data_index as usize];
 
                debug_assert!(expr.method.is_user_defined() || expr.method.is_public_builtin());
 
                let procedure_id = expr.procedure;
 
                let num_poly_vars = poly_data.poly_vars.len() as u32;
 

	
 
                let first_part = match expr.method {
 
                    Method::UserFunction => ConcreteTypePart::Function(procedure_id, num_poly_vars),
 
                    Method::UserComponent => ConcreteTypePart::Component(procedure_id, num_poly_vars),
 
                    _ => ConcreteTypePart::Function(procedure_id, num_poly_vars),
 
                };
 

	
 

	
 
                let definition_id = procedure_id.upcast();
 
                let signature_type = poly_data_type_to_concrete_type(
 
                    ctx, infer_node.expr_id, &poly_data.poly_vars, first_part
 
                )?;
 

	
 
                let (type_id, monomorph_index) = if let Some(type_id) = ctx.types.get_procedure_monomorph_type_id(&definition_id, &signature_type.parts) {
 
                    // Procedure is already typechecked
 
                    let monomorph_index = ctx.types.get_monomorph(type_id).variant.as_procedure().monomorph_index;
 
                    (type_id, monomorph_index)
 
                } else {
 
                    // Procedure is not yet typechecked, reserve a TypeID and a monomorph index
 
                    let procedure_to_check = &mut ctx.heap[procedure_id];
 
                    let monomorph_index = procedure_to_check.monomorphs.len() as u32;
 
                    procedure_to_check.monomorphs.push(ProcedureDefinitionMonomorph::new_invalid());
 
                    let type_id = ctx.types.reserve_procedure_monomorph_type_id(&definition_id, signature_type, monomorph_index);
 
                    queue.push(ResolveQueueElement{
 
                        root_id: ctx.heap[definition_id].defined_in(),
 
                        definition_id,
 
                        reserved_type_id: type_id,
 
                        reserved_monomorph_index: monomorph_index,
 
                    });
 

	
 
                    if !procedure_to_check.builtin {
 
                        // Only perform typechecking on the user-defined
 
                        // procedures
 
                        queue.push_back(ResolveQueueElement{
 
                            root_id: ctx.heap[definition_id].defined_in(),
 
                            definition_id,
 
                            reserved_type_id: type_id,
 
                            reserved_monomorph_index: monomorph_index,
 
                        });
 
                    }
 

	
 
                    (type_id, monomorph_index)
 
                };
 

	
 
                ExpressionInfoVariant::Procedure(type_id, monomorph_index)
 
            } else if let Expression::Select(_expr) = expr {
 
                ExpressionInfoVariant::Select(infer_node.field_index)
 
            } else {
 
                ExpressionInfoVariant::Generic
 
            };
 

	
 
            infer_node.info_type_id = info_type_id;
 
            infer_node.info_variant = info_variant;
 
        }
 

	
 
        // Write the types of the arguments
 
        let procedure = &ctx.heap[self.procedure_id];
 
        for parameter_id in procedure.parameters.iter().copied() {
 
            let mut concrete = ConcreteType::default();
 
            let var_data = self.var_data.iter().find(|v| v.var_id == parameter_id).unwrap();
 
            var_data.var_type.write_concrete_type(&mut concrete);
 
            let type_id = ctx.types.add_monomorphed_type(ctx.modules, ctx.heap, ctx.arch, concrete)?;
 
            monomorph.argument_types.push(type_id)
 
        }
 

	
 
        println!("DEBUG: For procedure {} with polyargs {:#?}", ctx.heap[self.procedure_id].identifier.value.as_str(), self.poly_vars);
 
        for infer_node in self.infer_nodes.iter() {
 
            println!("DEBUG: [{:?}] has type: {}", infer_node.expr_id, infer_node.expr_type.display_name(&ctx.heap));
 
        }
 

	
 
        // Determine if we have already assigned type indices to the expressions
 
        // before (the indices that, for a monomorph, can retrieve the type of
 
        // the expression).
 
        let has_type_indices = self.reserved_monomorph_index > 0;
 
        if has_type_indices {
 
            // already have indices, so resize and then index into it
 
            debug_assert!(monomorph.expr_info.is_empty());
 
            monomorph.expr_info.resize(num_infer_nodes, ExpressionInfo::new_invalid());
 
            for infer_node in self.infer_nodes.iter() {
 
                let type_index = ctx.heap[infer_node.expr_id].type_index();
 
                monomorph.expr_info[type_index as usize] = infer_node.as_expression_info();
 
            }
 
        } else {
 
            // no indices yet, need to be assigned in AST
 
            for infer_node in self.infer_nodes.iter() {
 
                let type_index = monomorph.expr_info.len();
 
                monomorph.expr_info.push(infer_node.as_expression_info());
 
                *ctx.heap[infer_node.expr_id].type_index_mut() = type_index as i32;
 
            }
 
        }
 

	
 
        // Push the information into the AST
 
        let procedure = &mut ctx.heap[self.procedure_id];
 
        procedure.monomorphs[self.reserved_monomorph_index as usize] = monomorph;
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_inference_rule(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        use InferenceRule as IR;
 

	
 
        let node = &self.infer_nodes[node_index];
 
        match &node.inference_rule {
 
            IR::Noop =>
 
                unreachable!(),
 
            IR::MonoTemplate(_) =>
 
                self.progress_inference_rule_mono_template(ctx, node_index),
 
            IR::BiEqual(_) =>
 
                self.progress_inference_rule_bi_equal(ctx, node_index),
 
            IR::TriEqualArgs(_) =>
 
                self.progress_inference_rule_tri_equal_args(ctx, node_index),
 
            IR::TriEqualAll(_) =>
 
                self.progress_inference_rule_tri_equal_all(ctx, node_index),
 
            IR::Concatenate(_) =>
 
                self.progress_inference_rule_concatenate(ctx, node_index),
 
            IR::IndexingExpr(_) =>
 
                self.progress_inference_rule_indexing_expr(ctx, node_index),
 
            IR::SlicingExpr(_) =>
 
                self.progress_inference_rule_slicing_expr(ctx, node_index),
 
            IR::SelectStructField(_) =>
 
                self.progress_inference_rule_select_struct_field(ctx, node_index),
 
            IR::SelectTupleMember(_) =>
 
                self.progress_inference_rule_select_tuple_member(ctx, node_index),
 
            IR::LiteralStruct(_) =>
 
                self.progress_inference_rule_literal_struct(ctx, node_index),
 
            IR::LiteralEnum =>
 
                self.progress_inference_rule_literal_enum(ctx, node_index),
 
            IR::LiteralUnion(_) =>
 
                self.progress_inference_rule_literal_union(ctx, node_index),
 
            IR::LiteralArray(_) =>
 
                self.progress_inference_rule_literal_array(ctx, node_index),
 
            IR::LiteralTuple(_) =>
 
                self.progress_inference_rule_literal_tuple(ctx, node_index),
 
            IR::CastExpr(_) =>
 
                self.progress_inference_rule_cast_expr(ctx, node_index),
 
            IR::CallExpr(_) =>
 
                self.progress_inference_rule_call_expr(ctx, node_index),
 
            IR::VariableExpr(_) =>
 
                self.progress_inference_rule_variable_expr(ctx, node_index),
 
        }
 
    }
 

	
 
    fn progress_inference_rule_mono_template(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = *node.inference_rule.as_mono_template();
 

	
 
        let progress = self.progress_template(ctx, node_index, rule.application, rule.template)?;
 
        if progress { self.queue_node_parent(node_index); }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn progress_inference_rule_bi_equal(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_bi_equal();
 
        let template = rule.template;
 
        let arg_index = rule.argument_index;
 

	
 
        let base_progress = self.progress_template(ctx, node_index, template.application, template.template)?;
 
        let (node_progress, arg_progress) = self.apply_equal2_constraint(ctx, node_index, node_index, 0, arg_index, 0)?;
 

	
 
        if base_progress || node_progress { self.queue_node_parent(node_index); }
 
        if arg_progress { self.queue_node(arg_index); }
 

	
 
        return Ok(())
 
    }
 

	
 
    fn progress_inference_rule_tri_equal_args(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_tri_equal_args();
 

	
 
        let result_template = rule.result_template;
 
        let argument_template = rule.argument_template;
 
        let arg1_index = rule.argument1_index;
 
        let arg2_index = rule.argument2_index;
 

	
 
        let self_template_progress = self.progress_template(ctx, node_index, result_template.application, result_template.template)?;
 
        let arg1_template_progress = self.progress_template(ctx, arg1_index, argument_template.application, argument_template.template)?;
 
        let (arg1_progress, arg2_progress) = self.apply_equal2_constraint(ctx, node_index, arg1_index, 0, arg2_index, 0)?;
 

	
 
        if self_template_progress { self.queue_node_parent(node_index); }
 
        if arg1_template_progress || arg1_progress { self.queue_node(arg1_index); }
 
        if arg2_progress { self.queue_node(arg2_index); }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn progress_inference_rule_tri_equal_all(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_tri_equal_all();
 

	
 
        let template = rule.template;
 
        let arg1_index = rule.argument1_index;
 
        let arg2_index = rule.argument2_index;
 

	
 
        let template_progress = self.progress_template(ctx, node_index, template.application, template.template)?;
 
        let (node_progress, arg1_progress, arg2_progress) =
 
            self.apply_equal3_constraint(ctx, node_index, arg1_index, arg2_index, 0)?;
 

	
 
        if template_progress || node_progress { self.queue_node_parent(node_index); }
 
        if arg1_progress { self.queue_node(arg1_index); }
 
        if arg2_progress { self.queue_node(arg2_index); }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn progress_inference_rule_concatenate(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_concatenate();
 
        let arg1_index = rule.argument1_index;
 
        let arg2_index = rule.argument2_index;
 

	
 
        // Two cases: one of the arguments is a string (then all must be), or
 
        // one of the arguments is an array (and all must be arrays).
 
        let (expr_is_str, expr_is_not_str) = self.type_is_certainly_or_certainly_not_string(node_index);
 
        let (arg1_is_str, arg1_is_not_str) = self.type_is_certainly_or_certainly_not_string(arg1_index);
 
        let (arg2_is_str, arg2_is_not_str) = self.type_is_certainly_or_certainly_not_string(arg2_index);
 

	
 
        let someone_is_str = expr_is_str || arg1_is_str || arg2_is_str;
 
        let someone_is_not_str = expr_is_not_str || arg1_is_not_str || arg2_is_not_str;
 
        // Note: this statement is an expression returning the progression bools
 
        let (node_progress, arg1_progress, arg2_progress) = if someone_is_str {
 
            // One of the arguments is a string, then all must be strings
 
            self.apply_equal3_constraint(ctx, node_index, arg1_index, arg2_index, 0)?
 
        } else {
 
            let progress_expr = if someone_is_not_str {
 
                // Output must be a normal array
 
                self.apply_template_constraint(ctx, node_index, &ARRAY_TEMPLATE)?
 
            } else {
 
                // Output may still be anything
 
                self.apply_template_constraint(ctx, node_index, &ARRAYLIKE_TEMPLATE)?
 
            };
 

	
 
            let progress_arg1 = self.apply_template_constraint(ctx, arg1_index, &ARRAYLIKE_TEMPLATE)?;
 
            let progress_arg2 = self.apply_template_constraint(ctx, arg2_index, &ARRAYLIKE_TEMPLATE)?;
 

	
 
            // If they're all arraylike, then we want the subtype to match
 
            let (subtype_expr, subtype_arg1, subtype_arg2) =
 
                self.apply_equal3_constraint(ctx, node_index, arg1_index, arg2_index, 1)?;
 

	
 
            (progress_expr || subtype_expr, progress_arg1 || subtype_arg1, progress_arg2 || subtype_arg2)
 
        };
 

	
 
        if node_progress { self.queue_node_parent(node_index); }
 
        if arg1_progress { self.queue_node(arg1_index); }
 
        if arg2_progress { self.queue_node(arg2_index); }
 

	
 
        return Ok(())
 
    }
 

	
 
    fn progress_inference_rule_indexing_expr(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> {
 
        let node = &self.infer_nodes[node_index];
 
        let rule = node.inference_rule.as_indexing_expr();
 
        let subject_index = rule.subject_index;
 
        let index_index = rule.index_index; // which one?
 

	
 
        // Subject is arraylike, index in integerlike
 
        let subject_template_progress = self.apply_template_constraint(ctx, subject_index, &ARRAYLIKE_TEMPLATE)?;
 
        let index_template_progress = self.apply_template_constraint(ctx, index_index, &INTEGERLIKE_TEMPLATE)?;
 

	
 
        // If subject is type `Array<T>`, then expr type is `T`
 
        let (node_progress, subject_progress) =
src/protocol/parser/type_table.rs
Show inline comments
 
@@ -287,385 +287,384 @@ pub struct UnionMonomorphEmbedded {
 
    pub size: usize,
 
    pub alignment: usize,
 
    pub offset: usize,
 
}
 

	
 
/// Procedure (functions and components of all possible types) monomorph. Also
 
/// stores the expression type data from the typechecking/inferencing pass.
 
pub struct ProcedureMonomorph {
 
    pub monomorph_index: u32,
 
    pub builtin: bool,
 
}
 

	
 
/// Tuple monomorph. Again a kind of exception because one cannot define a named
 
/// tuple type containing explicit polymorphic variables. But again: we need to
 
/// store size/offset/alignment information, so we do it here.
 
pub struct TupleMonomorph {
 
    pub members: Vec<TupleMonomorphMember>
 
}
 

	
 
pub struct TupleMonomorphMember {
 
    pub type_id: TypeId,
 
    concrete_type: ConcreteType,
 
    pub size: usize,
 
    pub alignment: usize,
 
    pub offset: usize,
 
}
 

	
 
/// Generic unique type ID. Every monomorphed type and every non-polymorphic
 
/// type will have one of these associated with it.
 
#[derive(Debug, Clone, Copy, PartialEq)]
 
pub struct TypeId(i64);
 

	
 
impl TypeId {
 
    pub(crate) fn new_invalid() -> Self {
 
        return Self(-1);
 
    }
 
}
 

	
 
/// A monomorphed type (or non-polymorphic type's) memory layout and information
 
/// regarding associated types (like a struct's field type).
 
pub struct MonoType {
 
    pub type_id: TypeId,
 
    pub concrete_type: ConcreteType,
 
    pub size: usize,
 
    pub alignment: usize,
 
    pub(crate) variant: MonoTypeVariant
 
}
 

	
 
impl MonoType {
 
    #[inline]
 
    fn new_empty(type_id: TypeId, concrete_type: ConcreteType, variant: MonoTypeVariant) -> Self {
 
        return Self {
 
            type_id, concrete_type,
 
            size: 0,
 
            alignment: 0,
 
            variant,
 
        }
 
    }
 

	
 
    /// Little internal helper function as a reminder: if alignment is 0, then
 
    /// the size/alignment are not actually computed yet!
 
    #[inline]
 
    fn get_size_alignment(&self) -> Option<(usize, usize)> {
 
        if self.alignment == 0 {
 
            return None
 
        } else {
 
            return Some((self.size, self.alignment));
 
        }
 
    }
 
}
 

	
 
/// Special structure that acts like the lookup key for `ConcreteType` instances
 
/// that have already been added to the type table before.
 
#[derive(Clone)]
 
struct MonoSearchKey {
 
    // Uses bitflags to denote when parts between search keys should match and
 
    // whether they should be checked. Needs to have a system like this to
 
    // accommodate tuples.
 
    parts: Vec<(u8, ConcreteTypePart)>,
 
    change_bit: u8,
 
}
 

	
 
impl MonoSearchKey {
 
    const KEY_IN_USE: u8 = 0x01;
 
    const KEY_CHANGE_BIT: u8 = 0x02;
 

	
 
    fn with_capacity(capacity: usize) -> Self {
 
        return MonoSearchKey{
 
            parts: Vec::with_capacity(capacity),
 
            change_bit: 0,
 
        };
 
    }
 

	
 
    /// Sets the search key based on a single concrete type and its polymorphic
 
    /// variables.
 
    fn set(&mut self, concrete_type_parts: &[ConcreteTypePart], poly_var_in_use: &[PolymorphicVariable]) {
 
        self.set_top_type(concrete_type_parts[0]);
 

	
 
        let mut poly_var_index = 0;
 
        for subtype in ConcreteTypeIter::new(concrete_type_parts, 0) {
 
            let in_use = poly_var_in_use[poly_var_index].is_in_use;
 
            poly_var_index += 1;
 
            self.push_subtype(subtype, in_use);
 
        }
 

	
 
        debug_assert_eq!(poly_var_index, poly_var_in_use.len());
 
    }
 

	
 
    /// Starts setting the search key based on an initial top-level type,
 
    /// programmer must call `push_subtype` the appropriate number of times
 
    /// after calling this function
 
    fn set_top_type(&mut self, type_part: ConcreteTypePart) {
 
        self.parts.clear();
 
        self.parts.push((Self::KEY_IN_USE, type_part));
 
        self.change_bit = Self::KEY_CHANGE_BIT;
 
    }
 

	
 
    fn push_subtype(&mut self, concrete_type: &[ConcreteTypePart], in_use: bool) {
 
        let flag = self.change_bit | (if in_use { Self::KEY_IN_USE } else { 0 });
 

	
 
        for part in concrete_type {
 
            self.parts.push((flag, *part));
 
        }
 
        self.change_bit ^= Self::KEY_CHANGE_BIT;
 
    }
 

	
 
    fn push_subtree(&mut self, concrete_type: &[ConcreteTypePart], poly_var_in_use: &[PolymorphicVariable]) {
 
        self.parts.push((self.change_bit | Self::KEY_IN_USE, concrete_type[0]));
 
        self.change_bit ^= Self::KEY_CHANGE_BIT;
 

	
 
        let mut poly_var_index = 0;
 
        for subtype in ConcreteTypeIter::new(concrete_type, 0) {
 
            let in_use = poly_var_in_use[poly_var_index].is_in_use;
 
            poly_var_index += 1;
 
            self.push_subtype(subtype, in_use);
 
        }
 

	
 
        debug_assert_eq!(poly_var_index, poly_var_in_use.len());
 
    }
 

	
 
    // Utilities for hashing and comparison
 
    fn find_end_index(&self, start_index: usize) -> usize {
 
        // Check if we're already at the end
 
        let mut index = start_index;
 
        if index >= self.parts.len() {
 
            return index;
 
        }
 

	
 
        // Iterate until bit flips, or until at end
 
        let expected_bit = self.parts[index].0 & Self::KEY_CHANGE_BIT;
 

	
 
        index += 1;
 
        while index < self.parts.len() {
 
            let current_bit = self.parts[index].0 & Self::KEY_CHANGE_BIT;
 
            if current_bit != expected_bit {
 
                return index;
 
            }
 

	
 
            index += 1;
 
        }
 

	
 
        return index;
 
    }
 
}
 

	
 
impl Hash for MonoSearchKey {
 
    fn hash<H: Hasher>(&self, state: &mut H) {
 
        for index in 0..self.parts.len() {
 
            let (flags, part) = self.parts[index];
 
            if flags & Self::KEY_IN_USE != 0 {
 
                part.hash(state);
 
            }
 
        }
 
    }
 
}
 

	
 
impl PartialEq for MonoSearchKey {
 
    fn eq(&self, other: &Self) -> bool {
 
        let mut self_index = 0;
 
        let mut other_index = 0;
 

	
 
        while self_index < self.parts.len() && other_index < other.parts.len() {
 
            // Retrieve part and flags
 
            let (self_bits, _) = self.parts[self_index];
 
            let (other_bits, _) = other.parts[other_index];
 
            let self_in_use = (self_bits & Self::KEY_IN_USE) != 0;
 
            let other_in_use = (other_bits & Self::KEY_IN_USE) != 0;
 

	
 
            // Determine ending indices
 
            let self_end_index = self.find_end_index(self_index);
 
            let other_end_index = other.find_end_index(other_index);
 

	
 

	
 
            if self_in_use == other_in_use {
 
                if self_in_use {
 
                    // Both are in use, so both parts should be equal
 
                    let delta_self = self_end_index - self_index;
 
                    let delta_other = other_end_index - other_index;
 
                    if delta_self != delta_other {
 
                        // Both in use, but not of equal length, so the types
 
                        // cannot match
 
                        return false;
 
                    }
 

	
 
                    for _ in 0..delta_self {
 
                        let (_, self_part) = self.parts[self_index];
 
                        let (_, other_part) = other.parts[other_index];
 

	
 
                        if self_part != other_part {
 
                            return false;
 
                        }
 

	
 
                        self_index += 1;
 
                        other_index += 1;
 
                    }
 
                } else {
 
                    // Both not in use, so skip associated parts
 
                    self_index = self_end_index;
 
                    other_index = other_end_index;
 
                }
 
            } else {
 
                // No agreement on importance of parts. This is practically
 
                // impossible
 
                unreachable!();
 
            }
 
        }
 

	
 
        // Everything matched, so if we're at the end of both arrays then we're
 
        // certain that the two keys are equal.
 
        return self_index == self.parts.len() && other_index == other.parts.len();
 
    }
 
}
 

	
 
impl Eq for MonoSearchKey{}
 

	
 
//------------------------------------------------------------------------------
 
// Type table
 
//------------------------------------------------------------------------------
 

	
 
const POLY_VARS_IN_USE: [PolymorphicVariable; 1] = [PolymorphicVariable{ identifier: Identifier::new_empty(InputSpan::new()), is_in_use: true }];
 

	
 
// Programmer note: keep this struct free of dynamically allocated memory
 
#[derive(Clone)]
 
struct TypeLoopBreadcrumb {
 
    type_id: TypeId,
 
    next_member: u32,
 
    next_embedded: u32, // for unions, the index into the variant's embedded types
 
}
 

	
 
// Programmer note: keep this struct free of dynamically allocated memory
 
#[derive(Clone)]
 
struct MemoryBreadcrumb {
 
    type_id: TypeId,
 
    next_member: u32,
 
    next_embedded: u32,
 
    first_size_alignment_idx: u32,
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum TypeLoopResult {
 
    TypeExists,
 
    PushBreadcrumb(DefinitionId, ConcreteType),
 
    TypeLoop(usize), // index into vec of breadcrumbs at which the type matched
 
}
 

	
 
enum MemoryLayoutResult {
 
    TypeExists(usize, usize), // (size, alignment)
 
    PushBreadcrumb(MemoryBreadcrumb),
 
}
 

	
 
// TODO: @Optimize, initial memory-unoptimized implementation
 
struct TypeLoopEntry {
 
    type_id: TypeId,
 
    is_union: bool,
 
}
 

	
 
struct TypeLoop {
 
    members: Vec<TypeLoopEntry>,
 
}
 

	
 
type DefinitionMap = HashMap<DefinitionId, DefinedType>;
 
type MonoTypeMap = HashMap<MonoSearchKey, TypeId>;
 
type MonoTypeArray = Vec<MonoType>;
 

	
 
pub struct TypeTable {
 
    // Lookup from AST DefinitionId to a defined type. Also lookups for
 
    // concrete type to monomorphs
 
    pub(crate) definition_lookup: DefinitionMap,
 
    mono_type_lookup: MonoTypeMap,
 
    pub(crate) mono_types: MonoTypeArray,
 
    mono_search_key: MonoSearchKey,
 
    // Breadcrumbs left behind while trying to find type loops. Also used to
 
    // determine sizes of types when all type loops are detected.
 
    type_loop_breadcrumbs: Vec<TypeLoopBreadcrumb>,
 
    type_loops: Vec<TypeLoop>,
 
    // Stores all encountered types during type loop detection. Used afterwards
 
    // to iterate over all types in order to compute size/alignment.
 
    encountered_types: Vec<TypeLoopEntry>,
 
    // Breadcrumbs and temporary storage during memory layout computation.
 
    memory_layout_breadcrumbs: Vec<MemoryBreadcrumb>,
 
    size_alignment_stack: Vec<(usize, usize)>,
 
}
 

	
 
impl TypeTable {
 
    /// Construct a new type table without any resolved types.
 
    pub(crate) fn new() -> Self {
 
        Self{ 
 
            definition_lookup: HashMap::with_capacity(128),
 
            mono_type_lookup: HashMap::with_capacity(128),
 
            mono_types: Vec::with_capacity(128),
 
            mono_search_key: MonoSearchKey::with_capacity(32),
 
            type_loop_breadcrumbs: Vec::with_capacity(32),
 
            type_loops: Vec::with_capacity(8),
 
            encountered_types: Vec::with_capacity(32),
 
            memory_layout_breadcrumbs: Vec::with_capacity(32),
 
            size_alignment_stack: Vec::with_capacity(64),
 
        }
 
    }
 

	
 
    /// Iterates over all defined types (polymorphic and non-polymorphic) and
 
    /// add their types in two passes. In the first pass we will just add the
 
    /// base types (we will not consider monomorphs, and we will not compute
 
    /// byte sizes). In the second pass we will compute byte sizes of
 
    /// non-polymorphic types, and potentially the monomorphs that are embedded
 
    /// in those types.
 
    pub(crate) fn build_base_types(&mut self, modules: &mut [Module], ctx: &mut PassCtx) -> Result<(), ParseError> {
 
        // Make sure we're allowed to cast root_id to index into ctx.modules
 
        debug_assert!(modules.iter().all(|m| m.phase >= ModuleCompilationPhase::DefinitionsParsed));
 
        debug_assert!(self.definition_lookup.is_empty());
 

	
 
        dbg_code!({
 
            for (index, module) in modules.iter().enumerate() {
 
                debug_assert_eq!(index, module.root_id.index as usize);
 
            }
 
        });
 

	
 
        // Use context to guess hashmap size of the base types
 
        let reserve_size = ctx.heap.definitions.len();
 
        self.definition_lookup.reserve(reserve_size);
 

	
 
        // Resolve all base types
 
        for definition_idx in 0..ctx.heap.definitions.len() {
 
            let definition_id = ctx.heap.definitions.get_id(definition_idx);
 
            let definition = &ctx.heap[definition_id];
 

	
 
            match definition {
 
                Definition::Enum(_) => self.build_base_enum_definition(modules, ctx, definition_id)?,
 
                Definition::Union(_) => self.build_base_union_definition(modules, ctx, definition_id)?,
 
                Definition::Struct(_) => self.build_base_struct_definition(modules, ctx, definition_id)?,
 
                Definition::Procedure(_) => self.build_base_procedure_definition(modules, ctx, definition_id)?,
 
            }
 
        }
 

	
 
        debug_assert_eq!(self.definition_lookup.len(), reserve_size, "mismatch in reserved size of type table");
 
        for module in modules.iter_mut() {
 
            module.phase = ModuleCompilationPhase::TypesAddedToTable;
 
        }
 

	
 
        // Go through all types again, lay out all types that are not
 
        // polymorphic. This might cause us to lay out monomorphized polymorphs
 
        // if these were member types of non-polymorphic types.
 
        for definition_idx in 0..ctx.heap.definitions.len() {
 
            let definition_id = ctx.heap.definitions.get_id(definition_idx);
 
            let poly_type = self.definition_lookup.get(&definition_id).unwrap();
 

	
 
            if !poly_type.definition.is_data_type() || !poly_type.poly_vars.is_empty() {
 
                continue;
 
            }
 

	
 
            // If here then the type is a data type without polymorphic
 
            // variables, but we might have instantiated it already, so:
 
            let concrete_parts = [ConcreteTypePart::Instance(definition_id, 0)];
 
            self.mono_search_key.set(&concrete_parts, &[]);
 
            let type_id = self.mono_type_lookup.get(&self.mono_search_key);
 
            if type_id.is_none() {
 
                self.detect_and_resolve_type_loops_for(
 
                    modules, ctx.heap, ctx.arch,
 
                    ConcreteType{
 
                        parts: vec![ConcreteTypePart::Instance(definition_id, 0)]
 
                    },
 
                )?;
 
                self.lay_out_memory_for_encountered_types(ctx.arch);
 
            }
 
        }
 

	
0 comments (0 inline, 0 general)