Changeset - d8148185e205
[Not reviewed]
0 9 0
MH - 4 years ago 2021-03-24 15:17:42
contact@maxhenger.nl
finished initial type inferencer/checker, tests work again
7 files changed:
0 comments (0 inline, 0 general)
src/protocol/ast.rs
Show inline comments
 
@@ -611,296 +611,311 @@ pub struct ImportSymbols {
 
    // if symbols is empty, then we implicitly import all symbols without any
 
    // aliases for them. If it is not empty, then symbols are explicitly
 
    // specified, and optionally given an alias.
 
    pub symbols: Vec<AliasedSymbol>,
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct Identifier {
 
    pub position: InputPosition,
 
    pub value: Vec<u8>
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct NamespacedIdentifier {
 
    pub position: InputPosition,
 
    pub num_namespaces: u8,
 
    pub value: Vec<u8>,
 
}
 

	
 
impl NamespacedIdentifier {
 
    pub(crate) fn iter(&self) -> NamespacedIdentifierIter {
 
        NamespacedIdentifierIter{
 
            value: &self.value,
 
            cur_offset: 0,
 
            num_returned: 0,
 
            num_total: self.num_namespaces
 
        }
 
    }
 
}
 

	
 
impl PartialEq for NamespacedIdentifier {
 
    fn eq(&self, other: &Self) -> bool {
 
        return self.value == other.value
 
    }
 
}
 
impl Eq for NamespacedIdentifier{}
 

	
 
// TODO: Just keep ref to NamespacedIdentifier
 
pub(crate) struct NamespacedIdentifierIter<'a> {
 
    value: &'a Vec<u8>,
 
    cur_offset: usize,
 
    num_returned: u8,
 
    num_total: u8,
 
}
 

	
 
impl<'a> NamespacedIdentifierIter<'a> {
 
    pub(crate) fn num_returned(&self) -> u8 {
 
        return self.num_returned;
 
    }
 
    pub(crate) fn num_remaining(&self) -> u8 {
 
        return self.num_total - self.num_returned
 
    }
 
    pub(crate) fn returned_section(&self) -> &[u8] {
 
        // Offset always includes the two trailing ':' characters
 
        let end = if self.cur_offset >= 2 { self.cur_offset - 2 } else { self.cur_offset };
 
        return &self.value[..end]
 
    }
 
}
 

	
 
impl<'a> Iterator for NamespacedIdentifierIter<'a> {
 
    type Item = &'a [u8];
 
    fn next(&mut self) -> Option<Self::Item> {
 
        if self.cur_offset >= self.value.len() {
 
            debug_assert_eq!(self.num_returned, self.num_total);
 
            None
 
        } else {
 
            debug_assert!(self.num_returned < self.num_total);
 
            let start = self.cur_offset;
 
            let mut end = start;
 
            while end < self.value.len() - 1 {
 
                if self.value[end] == b':' && self.value[end + 1] == b':' {
 
                    self.cur_offset = end + 2;
 
                    self.num_returned += 1;
 
                    return Some(&self.value[start..end]);
 
                }
 
                end += 1;
 
            }
 

	
 
            // If NamespacedIdentifier is constructed properly, then we cannot
 
            // end with "::" in the value, so
 
            debug_assert!(end == 0 || (self.value[end - 1] != b':' && self.value[end] != b':'));
 
            debug_assert_eq!(self.num_returned + 1, self.num_total);
 
            self.cur_offset = self.value.len();
 
            self.num_returned += 1;
 
            return Some(&self.value[start..]);
 
        }
 
    }
 
}
 

	
 
impl Display for Identifier {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        // A source identifier is in ASCII range.
 
        write!(f, "{}", String::from_utf8_lossy(&self.value))
 
    }
 
}
 

	
 
/// TODO: @types Remove the Message -> Byte hack at some point...
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub enum ParserTypeVariant {
 
    // Basic builtin
 
    Message,
 
    Bool,
 
    Byte,
 
    Short,
 
    Int,
 
    Long,
 
    String,
 
    // Literals (need to get concrete builtin type during typechecking)
 
    IntegerLiteral,
 
    Inferred,
 
    // Complex builtins
 
    Array(ParserTypeId), // array of a type
 
    Input(ParserTypeId), // typed input endpoint of a channel
 
    Output(ParserTypeId), // typed output endpoint of a channel
 
    Symbolic(SymbolicParserType), // symbolic type (definition or polyarg)
 
}
 

	
 
impl ParserTypeVariant {
 
    pub(crate) fn supports_polymorphic_args(&self) -> bool {
 
        use ParserTypeVariant::*;
 
        match self {
 
            Message | Bool | Byte | Short | Int | Long | String | IntegerLiteral | Inferred => false,
 
            _ => true
 
        }
 
    }
 
}
 

	
 
/// ParserType is a specification of a type during the parsing phase and initial
 
/// linker/validator phase of the compilation process. These types may be
 
/// (partially) inferred or represent literals (e.g. a integer whose bytesize is
 
/// not yet determined).
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ParserType {
 
    pub this: ParserTypeId,
 
    pub pos: InputPosition,
 
    pub variant: ParserTypeVariant,
 
}
 

	
 
/// SymbolicParserType is the specification of a symbolic type. During the
 
/// parsing phase we will only store the identifier of the type. During the
 
/// validation phase we will determine whether it refers to a user-defined type,
 
/// or a polymorphic argument. After the validation phase it may still be the
 
/// case that the resulting `variant` will not pass the typechecker.
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct SymbolicParserType {
 
    // Phase 1: parser
 
    pub identifier: NamespacedIdentifier,
 
    /// The user-specified polymorphic arguments. Zero-length implies that the
 
    /// user did not specify any of them, and they're either not needed or all
 
    /// need to be inferred. Otherwise the number of polymorphic arguments must
 
    /// match those of the corresponding definition
 
    pub poly_args: Vec<ParserTypeId>,
 
    // Phase 2: validation/linking (for types in function/component bodies) and
 
    //  type table construction (for embedded types of structs/unions)
 
    pub variant: Option<SymbolicParserTypeVariant>
 
}
 

	
 
/// Specifies whether the symbolic type points to an actual user-defined type,
 
/// or whether it points to a polymorphic argument within the definition (e.g.
 
/// a defined variable `T var` within a function `int func<T>()`
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub enum SymbolicParserTypeVariant {
 
    Definition(DefinitionId),
 
    // TODO: figure out if I need the DefinitionId here
 
    PolyArg(DefinitionId, usize), // index of polyarg in the definition
 
}
 

	
 
/// ConcreteType is the representation of a type after resolving symbolic types
 
/// and performing type inference
 
#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)]
 
#[derive(Debug, Clone, Copy, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
 
pub enum ConcreteTypePart {
 
    // Markers for the use of polymorphic types within a procedure's body that
 
    // refer to polymorphic variables on the procedure's definition. Different
 
    // from markers in the `InferenceType`, these will not contain nested types.
 
    Marker(usize),
 
    // Special types (cannot be explicitly constructed by the programmer)
 
    Void,
 
    // Builtin types without nested types
 
    Message,
 
    Bool,
 
    Byte,
 
    Short,
 
    Int,
 
    Long,
 
    String,
 
    // Builtin types with one nested type
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // User defined type with any number of nested types
 
    Instance(DefinitionId, usize),
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
#[derive(Debug, Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
 
pub struct ConcreteType {
 
    pub(crate) parts: Vec<ConcreteTypePart>
 
}
 

	
 
impl Default for ConcreteType {
 
    fn default() -> Self {
 
        Self{ parts: Vec::new() }
 
    }
 
}
 

	
 
impl ConcreteType {
 
    pub(crate) fn has_marker(&self) -> bool {
 
        self.parts
 
            .iter()
 
            .any(|p| {
 
                if let ConcreteTypePart::Marker(_) = p { true } else { false }
 
            })
 
    }
 
}
 

	
 
// TODO: Remove at some point
 
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
 
pub enum PrimitiveType {
 
    Unassigned,
 
    Input,
 
    Output,
 
    Message,
 
    Boolean,
 
    Byte,
 
    Short,
 
    Int,
 
    Long,
 
    Symbolic(PrimitiveSymbolic)
 
}
 

	
 
// TODO: @cleanup, remove PartialEq implementations
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct PrimitiveSymbolic {
 
    // Phase 1: parser
 
    pub(crate) identifier: NamespacedIdentifier,
 
    // Phase 2: typing
 
    pub(crate) definition: Option<DefinitionId>
 
}
 

	
 
impl PartialEq for PrimitiveSymbolic {
 
    fn eq(&self, other: &Self) -> bool {
 
        self.identifier == other.identifier
 
    }
 
}
 
impl Eq for PrimitiveSymbolic{}
 

	
 
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
 
pub struct Type {
 
    pub primitive: PrimitiveType,
 
    pub array: bool,
 
}
 

	
 
#[allow(dead_code)]
 
impl Type {
 
    pub const UNASSIGNED: Type = Type { primitive: PrimitiveType::Unassigned, array: false };
 

	
 
    pub const INPUT: Type = Type { primitive: PrimitiveType::Input, array: false };
 
    pub const OUTPUT: Type = Type { primitive: PrimitiveType::Output, array: false };
 
    pub const MESSAGE: Type = Type { primitive: PrimitiveType::Message, array: false };
 
    pub const BOOLEAN: Type = Type { primitive: PrimitiveType::Boolean, array: false };
 
    pub const BYTE: Type = Type { primitive: PrimitiveType::Byte, array: false };
 
    pub const SHORT: Type = Type { primitive: PrimitiveType::Short, array: false };
 
    pub const INT: Type = Type { primitive: PrimitiveType::Int, array: false };
 
    pub const LONG: Type = Type { primitive: PrimitiveType::Long, array: false };
 

	
 
    pub const INPUT_ARRAY: Type = Type { primitive: PrimitiveType::Input, array: true };
 
    pub const OUTPUT_ARRAY: Type = Type { primitive: PrimitiveType::Output, array: true };
 
    pub const MESSAGE_ARRAY: Type = Type { primitive: PrimitiveType::Message, array: true };
 
    pub const BOOLEAN_ARRAY: Type = Type { primitive: PrimitiveType::Boolean, array: true };
 
    pub const BYTE_ARRAY: Type = Type { primitive: PrimitiveType::Byte, array: true };
 
    pub const SHORT_ARRAY: Type = Type { primitive: PrimitiveType::Short, array: true };
 
    pub const INT_ARRAY: Type = Type { primitive: PrimitiveType::Int, array: true };
 
    pub const LONG_ARRAY: Type = Type { primitive: PrimitiveType::Long, array: true };
 
}
 

	
 
impl Display for Type {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        match &self.primitive {
 
            PrimitiveType::Unassigned => {
 
                write!(f, "unassigned")?;
 
            }
 
            PrimitiveType::Input => {
 
                write!(f, "in")?;
 
            }
 
            PrimitiveType::Output => {
 
                write!(f, "out")?;
 
            }
 
            PrimitiveType::Message => {
 
                write!(f, "msg")?;
 
            }
 
            PrimitiveType::Boolean => {
 
                write!(f, "boolean")?;
 
            }
 
            PrimitiveType::Byte => {
 
                write!(f, "byte")?;
 
            }
 
            PrimitiveType::Short => {
 
                write!(f, "short")?;
 
            }
 
            PrimitiveType::Int => {
 
                write!(f, "int")?;
 
            }
 
            PrimitiveType::Long => {
 
                write!(f, "long")?;
 
            }
 
            PrimitiveType::Symbolic(data) => {
 
                // Type data is in ASCII range.
 
                if let Some(id) = &data.definition {
 
                    write!(
 
                        f, "Symbolic({}, id: {})", 
 
                        String::from_utf8_lossy(&data.identifier.value),
src/protocol/ast_printer.rs
Show inline comments
 
use std::fmt::{Debug, Display, Write};
 
use std::io::Write as IOWrite;
 

	
 
use super::ast::*;
 

	
 
const INDENT: usize = 2;
 

	
 
const PREFIX_EMPTY: &'static str = "    ";
 
const PREFIX_ROOT_ID: &'static str = "Root";
 
const PREFIX_PRAGMA_ID: &'static str = "Prag";
 
const PREFIX_IMPORT_ID: &'static str = "Imp ";
 
const PREFIX_TYPE_ANNOT_ID: &'static str = "TyAn";
 
const PREFIX_VARIABLE_ID: &'static str = "Var ";
 
const PREFIX_PARAMETER_ID: &'static str = "Par ";
 
const PREFIX_LOCAL_ID: &'static str = "Loc ";
 
const PREFIX_DEFINITION_ID: &'static str = "Def ";
 
const PREFIX_STRUCT_ID: &'static str = "DefS";
 
const PREFIX_ENUM_ID: &'static str = "DefE";
 
const PREFIX_COMPONENT_ID: &'static str = "DefC";
 
const PREFIX_FUNCTION_ID: &'static str = "DefF";
 
const PREFIX_STMT_ID: &'static str = "Stmt";
 
const PREFIX_BLOCK_STMT_ID: &'static str = "SBl ";
 
const PREFIX_LOCAL_STMT_ID: &'static str = "SLoc";
 
const PREFIX_MEM_STMT_ID: &'static str = "SMem";
 
const PREFIX_CHANNEL_STMT_ID: &'static str = "SCha";
 
const PREFIX_SKIP_STMT_ID: &'static str = "SSki";
 
const PREFIX_LABELED_STMT_ID: &'static str = "SLab";
 
const PREFIX_IF_STMT_ID: &'static str = "SIf ";
 
const PREFIX_ENDIF_STMT_ID: &'static str = "SEIf";
 
const PREFIX_WHILE_STMT_ID: &'static str = "SWhi";
 
const PREFIX_ENDWHILE_STMT_ID: &'static str = "SEWh";
 
const PREFIX_BREAK_STMT_ID: &'static str = "SBre";
 
const PREFIX_CONTINUE_STMT_ID: &'static str = "SCon";
 
const PREFIX_SYNC_STMT_ID: &'static str = "SSyn";
 
const PREFIX_ENDSYNC_STMT_ID: &'static str = "SESy";
 
const PREFIX_RETURN_STMT_ID: &'static str = "SRet";
 
const PREFIX_ASSERT_STMT_ID: &'static str = "SAsr";
 
const PREFIX_GOTO_STMT_ID: &'static str = "SGot";
 
const PREFIX_NEW_STMT_ID: &'static str = "SNew";
 
const PREFIX_PUT_STMT_ID: &'static str = "SPut";
 
const PREFIX_EXPR_STMT_ID: &'static str = "SExp";
 
const PREFIX_ASSIGNMENT_EXPR_ID: &'static str = "EAsi";
 
const PREFIX_CONDITIONAL_EXPR_ID: &'static str = "ECnd";
 
const PREFIX_BINARY_EXPR_ID: &'static str = "EBin";
 
const PREFIX_UNARY_EXPR_ID: &'static str = "EUna";
 
const PREFIX_INDEXING_EXPR_ID: &'static str = "EIdx";
 
const PREFIX_SLICING_EXPR_ID: &'static str = "ESli";
 
const PREFIX_SELECT_EXPR_ID: &'static str = "ESel";
 
const PREFIX_ARRAY_EXPR_ID: &'static str = "EArr";
 
const PREFIX_CONST_EXPR_ID: &'static str = "ECns";
 
const PREFIX_CALL_EXPR_ID: &'static str = "ECll";
 
const PREFIX_VARIABLE_EXPR_ID: &'static str = "EVar";
 

	
 
struct KV<'a> {
 
    buffer: &'a mut String,
 
    prefix: Option<(&'static str, u32)>,
 
    indent: usize,
 
    temp_key: &'a mut String,
 
    temp_val: &'a mut String,
 
}
 

	
 
impl<'a> KV<'a> {
 
    fn new(buffer: &'a mut String, temp_key: &'a mut String, temp_val: &'a mut String, indent: usize) -> Self {
 
        temp_key.clear();
 
        temp_val.clear();
 
        KV{
 
            buffer,
 
            prefix: None,
 
            indent,
 
            temp_key,
 
            temp_val
 
        }
 
    }
 

	
 
    fn with_id(mut self, prefix: &'static str, id: u32) -> Self {
 
        self.prefix = Some((prefix, id));
 
        self
 
    }
 

	
 
    fn with_s_key(self, key: &str) -> Self {
 
        self.temp_key.push_str(key);
 
        self
 
    }
 

	
 
    fn with_d_key<D: Display>(mut self, key: &D) -> Self {
 
    fn with_d_key<D: Display>(self, key: &D) -> Self {
 
        self.temp_key.push_str(&key.to_string());
 
        self
 
    }
 

	
 
    fn with_s_val(self, val: &str) -> Self {
 
        self.temp_val.push_str(val);
 
        self
 
    }
 

	
 
    fn with_disp_val<D: Display>(mut self, val: &D) -> Self {
 
    fn with_disp_val<D: Display>(self, val: &D) -> Self {
 
        self.temp_val.push_str(&format!("{}", val));
 
        self
 
    }
 

	
 
    fn with_debug_val<D: Debug>(mut self, val: &D) -> Self {
 
    fn with_debug_val<D: Debug>(self, val: &D) -> Self {
 
        self.temp_val.push_str(&format!("{:?}", val));
 
        self
 
    }
 

	
 
    fn with_ascii_val(self, val: &[u8]) -> Self {
 
        self.temp_val.push_str(&*String::from_utf8_lossy(val));
 
        self
 
    }
 

	
 
    fn with_opt_disp_val<D: Display>(mut self, val: Option<&D>) -> Self {
 
    fn with_opt_disp_val<D: Display>(self, val: Option<&D>) -> Self {
 
        match val {
 
            Some(v) => { self.temp_val.push_str(&format!("Some({})", v)); },
 
            None => { self.temp_val.push_str("None"); }
 
        }
 
        self
 
    }
 

	
 
    fn with_opt_ascii_val(self, val: Option<&[u8]>) -> Self {
 
        match val {
 
            Some(v) => {
 
                self.temp_val.push_str("Some(");
 
                self.temp_val.push_str(&*String::from_utf8_lossy(v));
 
                self.temp_val.push(')');
 
            },
 
            None => {
 
                self.temp_val.push_str("None");
 
            }
 
        }
 
        self
 
    }
 

	
 
    fn with_custom_val<F: Fn(&mut String)>(mut self, val_fn: F) -> Self {
 
        val_fn(&mut self.temp_val);
 
        self
 
    }
 
}
 

	
 
impl<'a> Drop for KV<'a> {
 
    fn drop(&mut self) {
 
        // Prefix and indent
 
        if let Some((prefix, id)) = &self.prefix {
 
            self.buffer.push_str(&format!("{}[{:04}]", prefix, id));
 
        } else {
 
            self.buffer.push_str("           ");
 
        }
 

	
 
        for _ in 0..self.indent * INDENT {
 
            self.buffer.push(' ');
 
        }
 

	
 
        // Leading dash
 
        self.buffer.push_str("- ");
 

	
 
        // Key and value
 
        self.buffer.push_str(self.temp_key);
 
        if self.temp_val.is_empty() {
 
            self.buffer.push(':');
 
        } else {
 
            self.buffer.push_str(": ");
 
            self.buffer.push_str(&self.temp_val);
 
        }
 
        self.buffer.push('\n');
 
    }
 
}
 

	
 
pub(crate) struct ASTWriter {
 
    cur_definition: Option<DefinitionId>,
 
    buffer: String,
 
    temp1: String,
 
    temp2: String,
 
}
 

	
 
impl ASTWriter {
 
    pub(crate) fn new() -> Self {
 
        Self{
 
            cur_definition: None,
 
            buffer: String::with_capacity(4096),
 
            temp1: String::with_capacity(256),
 
            temp2: String::with_capacity(256),
 
        }
 
    }
 
    pub(crate) fn write_ast<W: IOWrite>(&mut self, w: &mut W, heap: &Heap) {
 
        for root_id in heap.protocol_descriptions.iter().map(|v| v.this) {
 
            self.write_module(heap, root_id);
 
            w.write_all(self.buffer.as_bytes()).expect("flush buffer");
 
            self.buffer.clear();
 
        }
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Top-level module writing
 
    //--------------------------------------------------------------------------
 

	
 
    fn write_module(&mut self, heap: &Heap, root_id: RootId) {
 
        self.kv(0).with_id(PREFIX_ROOT_ID, root_id.index)
 
            .with_s_key("Module");
 

	
 
        let root = &heap[root_id];
 
        self.kv(1).with_s_key("Pragmas");
 
        for pragma_id in &root.pragmas {
 
            self.write_pragma(heap, *pragma_id, 2);
 
        }
 

	
 
        self.kv(1).with_s_key("Imports");
 
        for import_id in &root.imports {
 
            self.write_import(heap, *import_id, 2);
 
        }
 

	
 
        self.kv(1).with_s_key("Definitions");
 
        for def_id in &root.definitions {
 
            self.write_definition(heap, *def_id, 2);
 
        }
 
    }
 

	
 
    fn write_pragma(&mut self, heap: &Heap, pragma_id: PragmaId, indent: usize) {
 
        match &heap[pragma_id] {
 
            Pragma::Version(pragma) => {
 
                self.kv(indent).with_id(PREFIX_PRAGMA_ID, pragma.this.index)
 
                    .with_s_key("PragmaVersion")
 
                    .with_disp_val(&pragma.version);
 
            },
 
            Pragma::Module(pragma) => {
 
                self.kv(indent).with_id(PREFIX_PRAGMA_ID, pragma.this.index)
 
                    .with_s_key("PragmaModule")
 
                    .with_ascii_val(&pragma.value);
 
            }
 
        }
 
    }
 

	
 
    fn write_import(&mut self, heap: &Heap, import_id: ImportId, indent: usize) {
 
        let import = &heap[import_id];
 
        let indent2 = indent + 1;
 

	
 
        match import {
 
            Import::Module(import) => {
 
                self.kv(indent).with_id(PREFIX_IMPORT_ID, import.this.index)
 
                    .with_s_key("ImportModule");
 

	
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&import.module_name);
 
                self.kv(indent2).with_s_key("Alias").with_ascii_val(&import.alias);
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(import.module_id.as_ref().map(|v| &v.index));
 
            },
 
            Import::Symbols(import) => {
 
                self.kv(indent).with_id(PREFIX_IMPORT_ID, import.this.index)
 
                    .with_s_key("ImportSymbol");
 

	
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&import.module_name);
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(import.module_id.as_ref().map(|v| &v.index));
 

	
 
                self.kv(indent2).with_s_key("Symbols");
 

	
 
                let indent3 = indent2 + 1;
 
                let indent4 = indent3 + 1;
 
                for symbol in &import.symbols {
 
                    self.kv(indent3).with_s_key("AliasedSymbol");
 
                    self.kv(indent4).with_s_key("Name").with_ascii_val(&symbol.name);
 
                    self.kv(indent4).with_s_key("Alias").with_ascii_val(&symbol.alias);
 
                    self.kv(indent4).with_s_key("Definition")
 
                        .with_opt_disp_val(symbol.definition_id.as_ref().map(|v| &v.index));
 
                }
 
            }
 
        }
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Top-level definition writing
 
    //--------------------------------------------------------------------------
 

	
 
    fn write_definition(&mut self, heap: &Heap, def_id: DefinitionId, indent: usize) {
 
        self.cur_definition = Some(def_id);
 
        let indent2 = indent + 1;
 
        let indent3 = indent2 + 1;
 
        let indent4 = indent3 + 1;
 

	
 
        match &heap[def_id] {
 
            Definition::Struct(_) => todo!("implement Definition::Struct"),
 
            Definition::Enum(_) => todo!("implement Definition::Enum"),
 
            Definition::Function(def) => {
 
                self.kv(indent).with_id(PREFIX_FUNCTION_ID, def.this.0.index)
 
                    .with_s_key("DefinitionFunction");
 

	
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&def.identifier.value);
 
                for poly_var_id in &def.poly_vars {
 
                    self.kv(indent3).with_s_key("PolyVar");
 
                    self.kv(indent4).with_s_key("Name").with_ascii_val(&poly_var_id.value);
 
                }
 

	
 
                self.kv(indent2).with_s_key("ReturnParserType").with_custom_val(|s| write_parser_type(s, heap, &heap[def.return_type]));
 

	
 
                self.kv(indent2).with_s_key("Parameters");
 
                for param_id in &def.parameters {
 
                    self.write_parameter(heap, *param_id, indent3);
 
                }
 

	
 
                self.kv(indent2).with_s_key("Body");
 
                self.write_stmt(heap, def.body, indent3);
 
            },
 
            Definition::Component(def) => {
 
                self.kv(indent).with_id(PREFIX_COMPONENT_ID,def.this.0.index)
 
                    .with_s_key("DefinitionComponent");
 

	
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&def.identifier.value);
 
                self.kv(indent2).with_s_key("Variant").with_debug_val(&def.variant);
 

	
 
                self.kv(indent2).with_s_key("PolymorphicVariables");
 
                for poly_var_id in &def.poly_vars {
 
                    self.kv(indent3).with_s_key("PolyVar");
 
                    self.kv(indent4).with_s_key("Name").with_ascii_val(&poly_var_id.value);
 
                }
 

	
 
                self.kv(indent2).with_s_key("Parameters");
 
                for param_id in &def.parameters {
 
                    self.write_parameter(heap, *param_id, indent3)
 
                }
 

	
 
                self.kv(indent2).with_s_key("Body");
 
                self.write_stmt(heap, def.body, indent3);
 
            }
 
        }
 
    }
 

	
 
    fn write_parameter(&mut self, heap: &Heap, param_id: ParameterId, indent: usize) {
 
        let indent2 = indent + 1;
 
        let param = &heap[param_id];
 

	
 
        self.kv(indent).with_id(PREFIX_PARAMETER_ID, param_id.0.index)
 
            .with_s_key("Parameter");
 
        self.kv(indent2).with_s_key("Name").with_ascii_val(&param.identifier.value);
 
        self.kv(indent2).with_s_key("ParserType").with_custom_val(|w| write_parser_type(w, heap, &heap[param.parser_type]));
 
    }
 

	
 
    fn write_stmt(&mut self, heap: &Heap, stmt_id: StatementId, indent: usize) {
 
        let stmt = &heap[stmt_id];
 
        let indent2 = indent + 1;
 
        let indent3 = indent2 + 1;
 

	
 
        match stmt {
 
            Statement::Block(stmt) => {
 
                self.kv(indent).with_id(PREFIX_BLOCK_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Block");
 

	
 
                for stmt_id in &stmt.statements {
 
                    self.write_stmt(heap, *stmt_id, indent2);
 
                }
 
            },
 
            Statement::Local(stmt) => {
 
                match stmt {
 
                    LocalStatement::Channel(stmt) => {
 
                        self.kv(indent).with_id(PREFIX_CHANNEL_STMT_ID, stmt.this.0.0.index)
 
                            .with_s_key("LocalChannel");
 

	
 
                        self.kv(indent2).with_s_key("From");
 
                        self.write_local(heap, stmt.from, indent3);
 
                        self.kv(indent2).with_s_key("To");
 
                        self.write_local(heap, stmt.to, indent3);
 
                        self.kv(indent2).with_s_key("Next")
 
                            .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
                    },
 
                    LocalStatement::Memory(stmt) => {
 
                        self.kv(indent).with_id(PREFIX_MEM_STMT_ID, stmt.this.0.0.index)
 
                            .with_s_key("LocalMemory");
 

	
 
                        self.kv(indent2).with_s_key("Variable");
 
                        self.write_local(heap, stmt.variable, indent3);
 
                        self.kv(indent2).with_s_key("Next")
 
                            .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
@@ -404,418 +407,429 @@ impl ASTWriter {
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::While(stmt) => {
 
                self.kv(indent).with_id(PREFIX_WHILE_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("While");
 

	
 
                self.kv(indent2).with_s_key("EndWhile")
 
                    .with_opt_disp_val(stmt.end_while.as_ref().map(|v| &v.0.index));
 
                self.kv(indent2).with_s_key("InSync")
 
                    .with_opt_disp_val(stmt.in_sync.as_ref().map(|v| &v.0.index));
 
                self.kv(indent2).with_s_key("Condition");
 
                self.write_expr(heap, stmt.test, indent3);
 
                self.kv(indent2).with_s_key("Body");
 
                self.write_stmt(heap, stmt.body, indent3);
 
            },
 
            Statement::EndWhile(stmt) => {
 
                self.kv(indent).with_id(PREFIX_ENDWHILE_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("EndWhile");
 
                self.kv(indent2).with_s_key("StartWhile").with_disp_val(&stmt.start_while.0.index);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::Break(stmt) => {
 
                self.kv(indent).with_id(PREFIX_BREAK_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Break");
 
                self.kv(indent2).with_s_key("Label")
 
                    .with_opt_ascii_val(stmt.label.as_ref().map(|v| v.value.as_slice()));
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(stmt.target.as_ref().map(|v| &v.0.index));
 
            },
 
            Statement::Continue(stmt) => {
 
                self.kv(indent).with_id(PREFIX_CONTINUE_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Continue");
 
                self.kv(indent2).with_s_key("Label")
 
                    .with_opt_ascii_val(stmt.label.as_ref().map(|v| v.value.as_slice()));
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(stmt.target.as_ref().map(|v| &v.0.index));
 
            },
 
            Statement::Synchronous(stmt) => {
 
                self.kv(indent).with_id(PREFIX_SYNC_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Synchronous");
 
                self.kv(indent2).with_s_key("EndSync")
 
                    .with_opt_disp_val(stmt.end_sync.as_ref().map(|v| &v.0.index));
 
                self.kv(indent2).with_s_key("Body");
 
                self.write_stmt(heap, stmt.body, indent3);
 
            },
 
            Statement::EndSynchronous(stmt) => {
 
                self.kv(indent).with_id(PREFIX_ENDSYNC_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("EndSynchronous");
 
                self.kv(indent2).with_s_key("StartSync").with_disp_val(&stmt.start_sync.0.index);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::Return(stmt) => {
 
                self.kv(indent).with_id(PREFIX_RETURN_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Return");
 
                self.kv(indent2).with_s_key("Expression");
 
                self.write_expr(heap, stmt.expression, indent3);
 
            },
 
            Statement::Assert(stmt) => {
 
                self.kv(indent).with_id(PREFIX_ASSERT_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Assert");
 
                self.kv(indent2).with_s_key("Expression");
 
                self.write_expr(heap, stmt.expression, indent3);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::Goto(stmt) => {
 
                self.kv(indent).with_id(PREFIX_GOTO_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Goto");
 
                self.kv(indent2).with_s_key("Label").with_ascii_val(&stmt.label.value);
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(stmt.target.as_ref().map(|v| &v.0.index));
 
            },
 
            Statement::New(stmt) => {
 
                self.kv(indent).with_id(PREFIX_NEW_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("New");
 
                self.kv(indent2).with_s_key("Expression");
 
                self.write_expr(heap, stmt.expression.upcast(), indent3);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::Expression(stmt) => {
 
                self.kv(indent).with_id(PREFIX_EXPR_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("ExpressionStatement");
 
                self.write_expr(heap, stmt.expression, indent2);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            }
 
        }
 
    }
 

	
 
    fn write_expr(&mut self, heap: &Heap, expr_id: ExpressionId, indent: usize) {
 
        let expr = &heap[expr_id];
 
        let indent2 = indent + 1;
 
        let indent3 = indent2 + 1;
 
        let def_id = self.cur_definition.unwrap();
 

	
 
        match expr {
 
            Expression::Assignment(expr) => {
 
                self.kv(indent).with_id(PREFIX_ASSIGNMENT_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("AssignmentExpr");
 
                self.kv(indent2).with_s_key("Operation").with_debug_val(&expr.operation);
 
                self.kv(indent2).with_s_key("Left");
 
                self.write_expr(heap, expr.left, indent3);
 
                self.kv(indent2).with_s_key("Right");
 
                self.write_expr(heap, expr.right, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Conditional(expr) => {
 
                self.kv(indent).with_id(PREFIX_CONDITIONAL_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("ConditionalExpr");
 
                self.kv(indent2).with_s_key("Condition");
 
                self.write_expr(heap, expr.test, indent3);
 
                self.kv(indent2).with_s_key("TrueExpression");
 
                self.write_expr(heap, expr.true_expression, indent3);
 
                self.kv(indent2).with_s_key("FalseExpression");
 
                self.write_expr(heap, expr.false_expression, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Binary(expr) => {
 
                self.kv(indent).with_id(PREFIX_BINARY_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("BinaryExpr");
 
                self.kv(indent2).with_s_key("Operation").with_debug_val(&expr.operation);
 
                self.kv(indent2).with_s_key("Left");
 
                self.write_expr(heap, expr.left, indent3);
 
                self.kv(indent2).with_s_key("Right");
 
                self.write_expr(heap, expr.right, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Unary(expr) => {
 
                self.kv(indent).with_id(PREFIX_UNARY_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("UnaryExpr");
 
                self.kv(indent2).with_s_key("Operation").with_debug_val(&expr.operation);
 
                self.kv(indent2).with_s_key("Argument");
 
                self.write_expr(heap, expr.expression, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Indexing(expr) => {
 
                self.kv(indent).with_id(PREFIX_INDEXING_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("IndexingExpr");
 
                self.kv(indent2).with_s_key("Subject");
 
                self.write_expr(heap, expr.subject, indent3);
 
                self.kv(indent2).with_s_key("Index");
 
                self.write_expr(heap, expr.index, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Slicing(expr) => {
 
                self.kv(indent).with_id(PREFIX_SLICING_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("SlicingExpr");
 
                self.kv(indent2).with_s_key("Subject");
 
                self.write_expr(heap, expr.subject, indent3);
 
                self.kv(indent2).with_s_key("FromIndex");
 
                self.write_expr(heap, expr.from_index, indent3);
 
                self.kv(indent2).with_s_key("ToIndex");
 
                self.write_expr(heap, expr.to_index, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Select(expr) => {
 
                self.kv(indent).with_id(PREFIX_SELECT_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("SelectExpr");
 
                self.kv(indent2).with_s_key("Subject");
 
                self.write_expr(heap, expr.subject, indent3);
 

	
 
                match &expr.field {
 
                    Field::Length => {
 
                        self.kv(indent2).with_s_key("Field").with_s_val("length");
 
                    },
 
                    Field::Symbolic(field) => {
 
                        self.kv(indent2).with_s_key("Field").with_ascii_val(&field.value);
 
                    }
 
                }
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Array(expr) => {
 
                self.kv(indent).with_id(PREFIX_ARRAY_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("ArrayExpr");
 
                self.kv(indent2).with_s_key("Elements");
 
                for expr_id in &expr.elements {
 
                    self.write_expr(heap, *expr_id, indent3);
 
                }
 

	
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Constant(expr) => {
 
                self.kv(indent).with_id(PREFIX_CONST_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("ConstantExpr");
 

	
 
                let val = self.kv(indent2).with_s_key("Value");
 
                match &expr.value {
 
                    Constant::Null => { val.with_s_val("null"); },
 
                    Constant::True => { val.with_s_val("true"); },
 
                    Constant::False => { val.with_s_val("false"); },
 
                    Constant::Character(char) => { val.with_ascii_val(char); },
 
                    Constant::Integer(int) => { val.with_disp_val(int); },
 
                }
 

	
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Call(expr) => {
 
                self.kv(indent).with_id(PREFIX_CALL_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("CallExpr");
 

	
 
                // Method
 
                let method = self.kv(indent2).with_s_key("Method");
 
                match &expr.method {
 
                    Method::Get => { method.with_s_val("get"); },
 
                    Method::Put => { method.with_s_val("put"); },
 
                    Method::Fires => { method.with_s_val("fires"); },
 
                    Method::Create => { method.with_s_val("create"); },
 
                    Method::Symbolic(symbolic) => {
 
                        method.with_s_val("symbolic");
 
                        self.kv(indent3).with_s_key("Name").with_ascii_val(&symbolic.identifier.value);
 
                        self.kv(indent3).with_s_key("Definition")
 
                            .with_opt_disp_val(symbolic.definition.as_ref().map(|v| &v.index));
 
                    }
 
                }
 

	
 
                // Arguments
 
                self.kv(indent2).with_s_key("Arguments");
 
                for arg_id in &expr.arguments {
 
                    self.write_expr(heap, *arg_id, indent3);
 
                }
 

	
 
                // Parent
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Variable(expr) => {
 
                self.kv(indent).with_id(PREFIX_VARIABLE_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("VariableExpr");
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&expr.identifier.value);
 
                self.kv(indent2).with_s_key("Definition")
 
                    .with_opt_disp_val(expr.declaration.as_ref().map(|v| &v.index));
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            }
 
        }
 
    }
 

	
 
    fn write_local(&mut self, heap: &Heap, local_id: LocalId, indent: usize) {
 
        let local = &heap[local_id];
 
        let indent2 = indent + 1;
 

	
 
        self.kv(indent).with_id(PREFIX_LOCAL_ID, local_id.0.index)
 
            .with_s_key("Local");
 

	
 
        self.kv(indent2).with_s_key("Name").with_ascii_val(&local.identifier.value);
 
        self.kv(indent2).with_s_key("ParserType")
 
            .with_custom_val(|w| write_parser_type(w, heap, &heap[local.parser_type]));
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Printing Utilities
 
    //--------------------------------------------------------------------------
 

	
 
    fn kv(&mut self, indent: usize) -> KV {
 
        KV::new(&mut self.buffer, &mut self.temp1, &mut self.temp2, indent)
 
    }
 

	
 
    fn flush<W: IOWrite>(&mut self, w: &mut W) {
 
        w.write(self.buffer.as_bytes()).unwrap();
 
        self.buffer.clear()
 
    }
 
}
 

	
 
fn write_option<V: Display>(target: &mut String, value: Option<V>) {
 
    target.clear();
 
    match &value {
 
        Some(v) => target.push_str(&format!("Some({})", v)),
 
        None => target.push_str("None")
 
    };
 
}
 

	
 
fn write_parser_type(target: &mut String, heap: &Heap, t: &ParserType) {
 
    use ParserTypeVariant as PTV;
 

	
 
    let mut embedded = Vec::new();
 
    match &t.variant {
 
        PTV::Input(id) => { target.push_str("in"); embedded.push(*id); }
 
        PTV::Output(id) => { target.push_str("out"); embedded.push(*id) }
 
        PTV::Array(id) => { target.push_str("array"); embedded.push(*id) }
 
        PTV::Message => { target.push_str("msg"); }
 
        PTV::Bool => { target.push_str("bool"); }
 
        PTV::Byte => { target.push_str("byte"); }
 
        PTV::Short => { target.push_str("short"); }
 
        PTV::Int => { target.push_str("int"); }
 
        PTV::Long => { target.push_str("long"); }
 
        PTV::String => { target.push_str("str"); }
 
        PTV::IntegerLiteral => { target.push_str("int_lit"); }
 
        PTV::Inferred => { target.push_str("auto"); }
 
        PTV::Symbolic(symbolic) => {
 
            target.push_str(&String::from_utf8_lossy(&symbolic.identifier.value));
 
            match symbolic.variant {
 
                Some(SymbolicParserTypeVariant::PolyArg(def_id, idx)) => {
 
                    target.push_str(&format!("{{def: {}, idx: {}}}", def_id.index, idx));
 
                },
 
                Some(SymbolicParserTypeVariant::Definition(def_id)) => {
 
                    target.push_str(&format!("{{def: {}}}", def_id.index));
 
                },
 
                None => {
 
                    target.push_str("{None}");
 
                }
 
            }
 
            embedded.extend(&symbolic.poly_args);
 
        }
 
    };
 

	
 
    if !embedded.is_empty() {
 
        target.push_str("<");
 
        for (idx, embedded_id) in embedded.into_iter().enumerate() {
 
            if idx != 0 { target.push_str(", "); }
 
            write_parser_type(target, heap, &heap[embedded_id]);
 
        }
 
        target.push_str(">");
 
    }
 
}
 

	
 
fn write_concrete_type(target: &mut String, heap: &Heap, t: &ConcreteType) {
 
fn write_concrete_type(target: &mut String, heap: &Heap, def_id: DefinitionId, t: &ConcreteType) {
 
    use ConcreteTypePart as CTP;
 

	
 
    fn write_concrete_part(target: &mut String, heap: &Heap, t: &ConcreteType, mut idx: usize) -> usize {
 
    fn write_concrete_part(target: &mut String, heap: &Heap, def_id: DefinitionId, t: &ConcreteType, mut idx: usize) -> usize {
 
        if idx >= t.parts.len() {
 
            target.push_str("Programmer error: invalid concrete type tree");
 
            return idx;
 
        }
 

	
 
        match &t.parts[idx] {
 
            CTP::Marker(marker) => {
 
                // Marker points to polymorphic variable index
 
                let definition = &heap[def_id];
 
                let poly_var_ident = match definition {
 
                    Definition::Struct(_) | Definition::Enum(_) => unreachable!(),
 
                    Definition::Function(definition) => &definition.poly_vars[*marker].value,
 
                    Definition::Component(definition) => &definition.poly_vars[*marker].value,
 
                };
 
                target.push_str(&String::from_utf8_lossy(&poly_var_ident));
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
            },
 
            CTP::Void => target.push_str("void"),
 
            CTP::Message => target.push_str("msg"),
 
            CTP::Bool => target.push_str("bool"),
 
            CTP::Byte => target.push_str("byte"),
 
            CTP::Short => target.push_str("short"),
 
            CTP::Int => target.push_str("int"),
 
            CTP::Long => target.push_str("long"),
 
            CTP::String => target.push_str("string"),
 
            CTP::Array => {
 
                idx = write_concrete_part(target, heap, t, idx + 1);
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                target.push_str("[]");
 
            },
 
            CTP::Slice => {
 
                idx = write_concrete_part(target, heap, t, idx + 1);
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                target.push_str("[..]");
 
            }
 
            CTP::Input => {
 
                target.push_str("in<");
 
                idx = write_concrete_part(target, heap, t, idx + 1);
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                target.push('>');
 
            },
 
            CTP::Output => {
 
                target.push_str("out<");
 
                idx = write_concrete_part(target, heap, t, idx + 1);
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                target.push('>')
 
            },
 
            CTP::Instance(definition_id, num_embedded) => {
 
                let identifier = heap[*definition_id].identifier();
 
                target.push_str(&String::from_utf8_lossy(&identifier.value));
 
                target.push('<');
 
                for idx_embedded in 0..*num_embedded {
 
                    if idx_embedded != 0 {
 
                        target.push_str(", ");
 
                    }
 
                    idx = write_concrete_part(target, heap, t, idx + 1);
 
                    idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                }
 
                target.push('>');
 
            }
 
        }
 

	
 
        idx + 1
 
    }
 

	
 
    write_concrete_part(target, heap, t, 0);
 
    write_concrete_part(target, heap, def_id, t, 0);
 
}
 

	
 
fn write_expression_parent(target: &mut String, parent: &ExpressionParent) {
 
    use ExpressionParent as EP;
 

	
 
    *target = match parent {
 
        EP::None => String::from("None"),
 
        EP::If(id) => format!("IfStmt({})", id.0.index),
 
        EP::While(id) => format!("WhileStmt({})", id.0.index),
 
        EP::Return(id) => format!("ReturnStmt({})", id.0.index),
 
        EP::Assert(id) => format!("AssertStmt({})", id.0.index),
 
        EP::New(id) => format!("NewStmt({})", id.0.index),
 
        EP::ExpressionStmt(id) => format!("ExprStmt({})", id.0.index),
 
        EP::Expression(id, idx) => format!("Expr({}, {})", id.index, idx)
 
    };
 
}
 
\ No newline at end of file
src/protocol/eval.rs
Show inline comments
 
@@ -815,345 +815,361 @@ impl From<&Value> for i64 {
 
        match val {
 
            Value::Byte(ByteValue(b)) => i64::from(*b),
 
            Value::Short(ShortValue(s)) => i64::from(*s),
 
            Value::Int(IntValue(i)) => i64::from(*i),
 
            Value::Long(LongValue(l)) => *l,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 

	
 
impl ValueImpl for Value {
 
    fn exact_type(&self) -> Type {
 
        match self {
 
            Value::Unassigned => Type::UNASSIGNED,
 
            Value::Input(val) => val.exact_type(),
 
            Value::Output(val) => val.exact_type(),
 
            Value::Message(val) => val.exact_type(),
 
            Value::Boolean(val) => val.exact_type(),
 
            Value::Byte(val) => val.exact_type(),
 
            Value::Short(val) => val.exact_type(),
 
            Value::Int(val) => val.exact_type(),
 
            Value::Long(val) => val.exact_type(),
 
            Value::InputArray(val) => val.exact_type(),
 
            Value::OutputArray(val) => val.exact_type(),
 
            Value::MessageArray(val) => val.exact_type(),
 
            Value::BooleanArray(val) => val.exact_type(),
 
            Value::ByteArray(val) => val.exact_type(),
 
            Value::ShortArray(val) => val.exact_type(),
 
            Value::IntArray(val) => val.exact_type(),
 
            Value::LongArray(val) => val.exact_type(),
 
        }
 
    }
 
    fn is_type_compatible(&self, h: &Heap, t: &ParserType) -> bool {
 
        match self {
 
            Value::Unassigned => true,
 
            Value::Input(_) => InputValue::is_type_compatible_hack(h, t),
 
            Value::Output(_) => OutputValue::is_type_compatible_hack(h, t),
 
            Value::Message(_) => MessageValue::is_type_compatible_hack(h, t),
 
            Value::Boolean(_) => BooleanValue::is_type_compatible_hack(h, t),
 
            Value::Byte(_) => ByteValue::is_type_compatible_hack(h, t),
 
            Value::Short(_) => ShortValue::is_type_compatible_hack(h, t),
 
            Value::Int(_) => IntValue::is_type_compatible_hack(h, t),
 
            Value::Long(_) => LongValue::is_type_compatible_hack(h, t),
 
            Value::InputArray(_) => InputArrayValue::is_type_compatible_hack(h, t),
 
            Value::OutputArray(_) => OutputArrayValue::is_type_compatible_hack(h, t),
 
            Value::MessageArray(_) => MessageArrayValue::is_type_compatible_hack(h, t),
 
            Value::BooleanArray(_) => BooleanArrayValue::is_type_compatible_hack(h, t),
 
            Value::ByteArray(_) => ByteArrayValue::is_type_compatible_hack(h, t),
 
            Value::ShortArray(_) => ShortArrayValue::is_type_compatible_hack(h, t),
 
            Value::IntArray(_) => InputArrayValue::is_type_compatible_hack(h, t),
 
            Value::LongArray(_) => LongArrayValue::is_type_compatible_hack(h, t),
 
        }
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, _t: &ParserType) -> bool { false }
 
}
 

	
 
impl Display for Value {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        let disp: &dyn Display;
 
        match self {
 
            Value::Unassigned => disp = &Type::UNASSIGNED,
 
            Value::Input(val) => disp = val,
 
            Value::Output(val) => disp = val,
 
            Value::Message(val) => disp = val,
 
            Value::Boolean(val) => disp = val,
 
            Value::Byte(val) => disp = val,
 
            Value::Short(val) => disp = val,
 
            Value::Int(val) => disp = val,
 
            Value::Long(val) => disp = val,
 
            Value::InputArray(val) => disp = val,
 
            Value::OutputArray(val) => disp = val,
 
            Value::MessageArray(val) => disp = val,
 
            Value::BooleanArray(val) => disp = val,
 
            Value::ByteArray(val) => disp = val,
 
            Value::ShortArray(val) => disp = val,
 
            Value::IntArray(val) => disp = val,
 
            Value::LongArray(val) => disp = val,
 
        }
 
        disp.fmt(f)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct InputValue(pub PortId);
 

	
 
impl Display for InputValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "#in")
 
    }
 
}
 

	
 
impl ValueImpl for InputValue {
 
    fn exact_type(&self) -> Type {
 
        Type::INPUT
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        return if let ParserTypeVariant::Input(_) = t.variant { true } else { false }
 
        use ParserTypeVariant::*;
 
        match &t.variant {
 
            Input(_) | Inferred | Symbolic(_) => true,
 
            _ => false,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct OutputValue(pub PortId);
 

	
 
impl Display for OutputValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "#out")
 
    }
 
}
 

	
 
impl ValueImpl for OutputValue {
 
    fn exact_type(&self) -> Type {
 
        Type::OUTPUT
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        return if let ParserTypeVariant::Output(_) = t.variant { true } else { false }
 
        use ParserTypeVariant::*;
 
        match &t.variant {
 
            Output(_) | Inferred | Symbolic(_) => true,
 
            _ => false,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct MessageValue(pub Option<Payload>);
 

	
 
impl Display for MessageValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        match &self.0 {
 
            None => write!(f, "null"),
 
            Some(payload) => {
 
                // format print up to 10 bytes
 
                let mut slice = payload.as_slice();
 
                if slice.len() > 10 {
 
                    slice = &slice[..10];
 
                }
 
                f.debug_list().entries(slice.iter().copied()).finish()
 
            }
 
        }
 
    }
 
}
 

	
 
impl ValueImpl for MessageValue {
 
    fn exact_type(&self) -> Type {
 
        Type::MESSAGE
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        return if let ParserTypeVariant::Message = t.variant { true } else { false };
 
        use ParserTypeVariant::*;
 
        match &t.variant {
 
            Message | Inferred | Symbolic(_) => true,
 
            _ => false,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct BooleanValue(bool);
 

	
 
impl Display for BooleanValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for BooleanValue {
 
    fn exact_type(&self) -> Type {
 
        Type::BOOLEAN
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        use ParserTypeVariant::*;
 
        match t.variant {
 
            Bool | Byte | Short | Int | Long => true,
 
            Symbolic(_) | Inferred | Bool | Byte | Short | Int | Long => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ByteValue(i8);
 

	
 
impl Display for ByteValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for ByteValue {
 
    fn exact_type(&self) -> Type {
 
        Type::BYTE
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        use ParserTypeVariant::*;
 
        match t.variant {
 
            Byte | Short | Int | Long => true,
 
            Symbolic(_) | Inferred | Byte | Short | Int | Long => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ShortValue(i16);
 

	
 
impl Display for ShortValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for ShortValue {
 
    fn exact_type(&self) -> Type {
 
        Type::SHORT
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        use ParserTypeVariant::*;
 
        match t.variant {
 
            Short | Int | Long => true,
 
            Symbolic(_) | Inferred | Short | Int | Long => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct IntValue(i32);
 

	
 
impl Display for IntValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for IntValue {
 
    fn exact_type(&self) -> Type {
 
        Type::INT
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        use ParserTypeVariant::*;
 
        match t.variant {
 
            Int | Long => true,
 
            Symbolic(_) | Inferred | Int | Long => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct LongValue(i64);
 

	
 
impl Display for LongValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for LongValue {
 
    fn exact_type(&self) -> Type {
 
        Type::LONG
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        return if let ParserTypeVariant::Long = t.variant { true } else { false }
 
        use ParserTypeVariant::*;
 
        match &t.variant {
 
            Long | Inferred | Symbolic(_) => true,
 
            _ => false,
 
        }
 
    }
 
}
 

	
 
fn get_array_inner(t: &ParserType) -> Option<ParserTypeId> {
 
    match t.variant {
 
        ParserTypeVariant::Array(inner) => Some(inner),
 
        _ => None
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct InputArrayValue(Vec<InputValue>);
 

	
 
impl Display for InputArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
            if !first {
 
                write!(f, ",")?;
 
            }
 
            write!(f, "{}", v)?;
 
            first = false;
 
        }
 
        write!(f, "}}")
 
    }
 
}
 

	
 
impl ValueImpl for InputArrayValue {
 
    fn exact_type(&self) -> Type {
 
        Type::INPUT_ARRAY
 
    }
 
    fn is_type_compatible_hack(h: &Heap, t: &ParserType) -> bool {
 
        get_array_inner(t)
 
            .map(|v| InputValue::is_type_compatible_hack(h, &h[v]))
 
            .unwrap_or(false)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct OutputArrayValue(Vec<OutputValue>);
 

	
 
impl Display for OutputArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
            if !first {
 
                write!(f, ",")?;
 
            }
 
            write!(f, "{}", v)?;
 
            first = false;
 
        }
 
        write!(f, "}}")
 
    }
 
}
 

	
 
impl ValueImpl for OutputArrayValue {
 
    fn exact_type(&self) -> Type {
 
        Type::OUTPUT_ARRAY
 
    }
 
    fn is_type_compatible_hack(h: &Heap, t: &ParserType) -> bool {
 
        get_array_inner(t)
 
            .map(|v| OutputValue::is_type_compatible_hack(h, &h[v]))
 
            .unwrap_or(false)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct MessageArrayValue(Vec<MessageValue>);
 

	
 
impl Display for MessageArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
            if !first {
 
                write!(f, ",")?;
 
            }
 
            write!(f, "{}", v)?;
 
            first = false;
 
        }
 
        write!(f, "}}")
 
    }
 
}
 

	
 
impl ValueImpl for MessageArrayValue {
 
    fn exact_type(&self) -> Type {
 
        Type::MESSAGE_ARRAY
 
    }
 
    fn is_type_compatible_hack(h: &Heap, t: &ParserType) -> bool {
 
        get_array_inner(t)
 
            .map(|v| MessageValue::is_type_compatible_hack(h, &h[v]))
 
            .unwrap_or(false)
 
    }
 
}
 
@@ -1422,199 +1438,196 @@ impl Store {
 
                    AssignmentOperator::Subtracted => {
 
                        let old = self.get(h, ctx, expr.left)?;
 
                        self.update(h, ctx, expr.left, old.minus(&value))?;
 
                    }
 
                    _ => unimplemented!("{:?}", expr),
 
                }
 
                Ok(value)
 
            }
 
            Expression::Conditional(expr) => {
 
                let test = self.eval(h, ctx, expr.test)?;
 
                if test.as_boolean().0 {
 
                    self.eval(h, ctx, expr.true_expression)
 
                } else {
 
                    self.eval(h, ctx, expr.false_expression)
 
                }
 
            }
 
            Expression::Binary(expr) => {
 
                let left = self.eval(h, ctx, expr.left)?;
 
                let right;
 
                match expr.operation {
 
                    BinaryOperator::LogicalAnd => {
 
                        if left.as_boolean().0 == false {
 
                            return Ok(left);
 
                        }
 
                        right = self.eval(h, ctx, expr.right)?;
 
                        right.as_boolean(); // panics if not a boolean
 
                        return Ok(right);
 
                    }
 
                    BinaryOperator::LogicalOr => {
 
                        if left.as_boolean().0 == true {
 
                            return Ok(left);
 
                        }
 
                        right = self.eval(h, ctx, expr.right)?;
 
                        right.as_boolean(); // panics if not a boolean
 
                        return Ok(right);
 
                    }
 
                    _ => {}
 
                }
 
                right = self.eval(h, ctx, expr.right)?;
 
                match expr.operation {
 
                    BinaryOperator::Equality => Ok(left.eq(&right)),
 
                    BinaryOperator::Inequality => Ok(left.neq(&right)),
 
                    BinaryOperator::LessThan => Ok(left.lt(&right)),
 
                    BinaryOperator::LessThanEqual => Ok(left.lte(&right)),
 
                    BinaryOperator::GreaterThan => Ok(left.gt(&right)),
 
                    BinaryOperator::GreaterThanEqual => Ok(left.gte(&right)),
 
                    BinaryOperator::Remainder => Ok(left.modulus(&right)),
 
                    BinaryOperator::Add => Ok(left.plus(&right)),
 
                    _ => unimplemented!("{:?}", expr.operation),
 
                }
 
            }
 
            Expression::Unary(expr) => {
 
                let mut value = self.eval(h, ctx, expr.expression)?;
 
                match expr.operation {
 
                    UnaryOperation::PostIncrement => {
 
                        self.update(h, ctx, expr.expression, value.plus(&ONE))?;
 
                    }
 
                    UnaryOperation::PreIncrement => {
 
                        value = value.plus(&ONE);
 
                        self.update(h, ctx, expr.expression, value.clone())?;
 
                    }
 
                    UnaryOperation::PostDecrement => {
 
                        self.update(h, ctx, expr.expression, value.minus(&ONE))?;
 
                    }
 
                    UnaryOperation::PreDecrement => {
 
                        value = value.minus(&ONE);
 
                        self.update(h, ctx, expr.expression, value.clone())?;
 
                    }
 
                    _ => unimplemented!(),
 
                }
 
                Ok(value)
 
            }
 
            Expression::Indexing(expr) => self.get(h, ctx, expr.this.upcast()),
 
            Expression::Slicing(_expr) => unimplemented!(),
 
            Expression::Select(expr) => self.get(h, ctx, expr.this.upcast()),
 
            Expression::Array(expr) => {
 
                let mut elements = Vec::new();
 
                for &elem in expr.elements.iter() {
 
                    elements.push(self.eval(h, ctx, elem)?);
 
                }
 
                todo!()
 
            }
 
            Expression::Constant(expr) => Ok(Value::from_constant(&expr.value)),
 
            Expression::Call(expr) => match &expr.method {
 
                Method::Get => {
 
                    assert_eq!(1, expr.arguments.len());
 
                    let value = self.eval(h, ctx, expr.arguments[0])?;
 
                    match ctx.get(value.clone()) {
 
                        None => Err(EvalContinuation::BlockGet(value)),
 
                        Some(result) => Ok(result),
 
                    }
 
                }
 
                Method::Put => {
 
                    assert_eq!(2, expr.arguments.len());
 
                    let port_value = self.eval(h, ctx, expr.arguments[0])?;
 
                    let msg_value = self.eval(h, ctx, expr.arguments[1])?;
 
                    println!("DEBUG: Handiling put({:?}, {:?})", port_value, msg_value);
 
                    if ctx.did_put(port_value.clone()) {
 
                        println!("DEBUG: Already put...");
 
                        // Return bogus, replacing this at some point anyway
 
                        Ok(Value::Message(MessageValue(None)))
 
                    } else {
 
                        println!("DEBUG: Did not yet put...");
 
                        Err(EvalContinuation::Put(port_value, msg_value))
 
                    }
 
                }
 
                Method::Fires => {
 
                    assert_eq!(1, expr.arguments.len());
 
                    let value = self.eval(h, ctx, expr.arguments[0])?;
 
                    match ctx.fires(value.clone()) {
 
                        None => Err(EvalContinuation::BlockFires(value)),
 
                        Some(result) => Ok(result),
 
                    }
 
                }
 
                Method::Create => {
 
                    assert_eq!(1, expr.arguments.len());
 
                    let length = self.eval(h, ctx, expr.arguments[0])?;
 
                    Ok(Value::create_message(length))
 
                }
 
                Method::Symbolic(_symbol) => unimplemented!(),
 
            },
 
            Expression::Variable(expr) => self.get(h, ctx, expr.this.upcast()),
 
        }
 
    }
 
}
 

	
 
type EvalResult = Result<Value, EvalContinuation>;
 
pub enum EvalContinuation {
 
    Stepping,
 
    Inconsistent,
 
    Terminal,
 
    SyncBlockStart,
 
    SyncBlockEnd,
 
    NewComponent(DefinitionId, Vec<Value>),
 
    BlockFires(Value),
 
    BlockGet(Value),
 
    Put(Value, Value),
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub(crate) struct Prompt {
 
    definition: DefinitionId,
 
    store: Store,
 
    position: Option<StatementId>,
 
}
 

	
 
impl Prompt {
 
    pub fn new(h: &Heap, def: DefinitionId, args: &Vec<Value>) -> Self {
 
        let mut prompt =
 
            Prompt { definition: def, store: Store::new(), position: Some((&h[def]).body()) };
 
        prompt.set_arguments(h, args);
 
        prompt
 
    }
 
    fn set_arguments(&mut self, h: &Heap, args: &Vec<Value>) {
 
        let def = &h[self.definition];
 
        let params = def.parameters();
 
        assert_eq!(params.len(), args.len());
 
        for (param, value) in params.iter().zip(args.iter()) {
 
            let hparam = &h[*param];
 
            let parser_type = &h[hparam.parser_type];
 
            assert!(value.is_type_compatible(h, parser_type));
 
            self.store.initialize(h, param.upcast(), value.clone());
 
        }
 
    }
 
    pub fn step(&mut self, h: &Heap, ctx: &mut EvalContext) -> EvalResult {
 
        if self.position.is_none() {
 
            return Err(EvalContinuation::Terminal);
 
        }
 

	
 
        let stmt = &h[self.position.unwrap()];
 
        match stmt {
 
            Statement::Block(stmt) => {
 
                // Continue to first statement
 
                self.position = Some(stmt.first());
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::Local(stmt) => {
 
                match stmt {
 
                    LocalStatement::Memory(stmt) => {
 
                        // Update store
 
                        self.store.initialize(h, stmt.variable.upcast(), Value::Unassigned);
 
                    }
 
                    LocalStatement::Channel(stmt) => {
 
                        let [from, to] = ctx.new_channel();
 
                        // Store the values in the declared variables
 
                        self.store.initialize(h, stmt.from.upcast(), from);
 
                        self.store.initialize(h, stmt.to.upcast(), to);
 
                    }
 
                }
 
                // Continue to next statement
 
                self.position = stmt.next();
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::Skip(stmt) => {
 
                // Continue to next statement
 
                self.position = stmt.next;
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::Labeled(stmt) => {
src/protocol/lexer.rs
Show inline comments
 
@@ -2362,447 +2362,96 @@ impl Lexer<'_> {
 

	
 
                    // A comma indicates that we may have another symbol coming
 
                    // up (not necessary), but if not present then we expect the
 
                    // end of the symbol list
 
                    self.consume_whitespace(false)?;
 

	
 
                    next = self.source.next();
 
                    if let Some(b',') = next {
 
                        self.source.consume();
 
                        self.consume_whitespace(false)?;
 
                        next = self.source.next();
 
                    } else {
 
                        break;
 
                    }
 
                }
 

	
 
                if let Some(b'}') = next {
 
                    // We are fine, push the imported symbols
 
                    self.source.consume();
 
                    if symbols.is_empty() {
 
                        return Err(ParseError2::new_error(self.source, position, "empty symbol import list"));
 
                    }
 

	
 
                    h.alloc_import(|this| Import::Symbols(ImportSymbols{
 
                        this,
 
                        position,
 
                        module_name: value,
 
                        module_id: None,
 
                        symbols,
 
                    }))
 
                } else {
 
                    return Err(self.error_at_pos("Expected '}'"));
 
                }
 
            } else if let Some(b'*') = self.source.next() {
 
                // Import all symbols without alias
 
                self.source.consume();
 
                h.alloc_import(|this| Import::Symbols(ImportSymbols{
 
                    this,
 
                    position,
 
                    module_name: value,
 
                    module_id: None,
 
                    symbols: Vec::new()
 
                }))
 
            } else {
 
                return Err(self.error_at_pos("Expected '*' or '{'"));
 
            }
 
        } else {
 
            // No explicit alias or subimports, so implicit alias
 
            let alias = Vec::from(&value[last_ident_start..]);
 
            h.alloc_import(|this| Import::Module(ImportModule{
 
                this,
 
                position,
 
                module_name: value,
 
                alias,
 
                module_id: None,
 
            }))
 
        };
 

	
 
        self.consume_whitespace(false)?;
 
        self.consume_string(b";")?;
 
        Ok(import)
 
    }
 
    pub fn consume_protocol_description(&mut self, h: &mut Heap) -> Result<RootId, ParseError2> {
 
        let position = self.source.pos();
 
        let mut pragmas = Vec::new();
 
        let mut imports = Vec::new();
 
        let mut definitions = Vec::new();
 
        self.consume_whitespace(false)?;
 
        while self.has_pragma() {
 
            let pragma = self.consume_pragma(h)?;
 
            pragmas.push(pragma);
 
            self.consume_whitespace(false)?;
 
        }
 
        while self.has_import() {
 
            let import = self.consume_import(h)?;
 
            imports.push(import);
 
            self.consume_whitespace(false)?;
 
        }
 
        while self.has_symbol_definition() {
 
            let def = self.consume_symbol_definition(h)?;
 
            definitions.push(def);
 
            self.consume_whitespace(false)?;
 
        }
 
        // end of file
 
        if !self.source.is_eof() {
 
            return Err(self.error_at_pos("Expected end of file"));
 
        }
 
        Ok(h.alloc_protocol_description(|this| Root {
 
            this,
 
            position,
 
            pragmas,
 
            imports,
 
            definitions,
 
        }))
 
    }
 
}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use crate::protocol::ast::*;
 
    use crate::protocol::lexer::*;
 
    use crate::protocol::inputsource::*;
 

	
 
    #[derive(Debug, Eq, PartialEq)]
 
    enum ParserTypeClass {
 
        Message, Bool, Byte, Short, Int, Long, String, Array, Nope
 
    }
 
    impl ParserTypeClass {
 
        fn from(v: &ParserType) -> ParserTypeClass {
 
            use ParserTypeVariant as PTV;
 
            use ParserTypeClass as PTC;
 
            match &v.variant {
 
                PTV::Message => PTC::Message,
 
                PTV::Bool => PTC::Bool,
 
                PTV::Byte => PTC::Byte,
 
                PTV::Short => PTC::Short,
 
                PTV::Int => PTC::Int,
 
                PTV::Long => PTC::Long,
 
                PTV::String => PTC::String,
 
                PTV::Array(_) => PTC::Array,
 
                _ => PTC::Nope,
 
            }
 
        }
 
    }
 

	
 
    #[test]
 
    fn test_pragmas() {
 
        let mut h = Heap::new();
 
        let mut input = InputSource::from_string("
 
        #version 0o7777
 
        #module something.dot.separated
 
        ").expect("new InputSource");
 
        let mut lex = Lexer::new(&mut input);
 
        let lexed = lex.consume_protocol_description(&mut h)
 
            .expect("lex input source");
 
        let root = &h[lexed];
 
        assert_eq!(root.pragmas.len(), 2);
 
        let pv = &h[root.pragmas[0]];
 
        let pm = &h[root.pragmas[1]];
 

	
 
        if let Pragma::Version(v) = pv {
 
            assert_eq!(v.version, 0o7777)
 
        } else {
 
            assert!(false, "first pragma not version");
 
        }
 
        if let Pragma::Module(m) = pm {
 
            assert_eq!(m.value, b"something.dot.separated");
 
        } else {
 
            assert!(false, "second pragma not module");
 
        }
 
    }
 

	
 
    #[test]
 
    fn test_import() {
 
        let mut h = Heap::new();
 
        let mut input = InputSource::from_string("
 
        // Module imports, with optional and explicit aliasing
 
        import single_module;
 
        import std.reo;
 
        import something.other as alias;
 
        // Symbol imports
 
        import some_module::*;
 
        import some_module::{Foo as Bar, Qux, Dix as Flu};
 
        import std.reo::{
 
            Foo as Bar, // because thing
 
            Qux as Mox, // more explanations
 
            Dix, /* yesh, import me */
 
        };
 
        ").unwrap();
 
        let mut lex = Lexer::new(&mut input);
 
        let lexed = lex.consume_protocol_description(&mut h).unwrap();
 
        let root = &h[lexed];
 
        assert_eq!(root.imports.len(), 6);
 
        let no_alias_single = h[root.imports[0]].as_module();
 
        let no_alias_multi = h[root.imports[1]].as_module();
 
        let with_alias = h[root.imports[2]].as_module();
 

	
 
        assert_eq!(no_alias_single.module_name, b"single_module");
 
        assert_eq!(no_alias_single.alias, b"single_module");
 
        assert_eq!(no_alias_multi.module_name, b"std.reo");
 
        assert_eq!(no_alias_multi.alias, b"reo");
 
        assert_eq!(with_alias.module_name, b"something.other");
 
        assert_eq!(with_alias.alias, b"alias");
 

	
 
        let all_symbols = h[root.imports[3]].as_symbols();
 
        let single_line_symbols = h[root.imports[4]].as_symbols();
 
        let multi_line_symbols = h[root.imports[5]].as_symbols();
 

	
 
        assert_eq!(all_symbols.module_name, b"some_module");
 
        assert!(all_symbols.symbols.is_empty());
 
        assert_eq!(single_line_symbols.module_name, b"some_module");
 
        assert_eq!(single_line_symbols.symbols.len(), 3);
 
        assert_eq!(single_line_symbols.symbols[0].name, b"Foo");
 
        assert_eq!(single_line_symbols.symbols[0].alias, b"Bar");
 
        assert_eq!(single_line_symbols.symbols[1].name, b"Qux");
 
        assert_eq!(single_line_symbols.symbols[1].alias, b"Qux");
 
        assert_eq!(single_line_symbols.symbols[2].name, b"Dix");
 
        assert_eq!(single_line_symbols.symbols[2].alias, b"Flu");
 
        assert_eq!(multi_line_symbols.module_name, b"std.reo");
 
        assert_eq!(multi_line_symbols.symbols.len(), 3);
 
        assert_eq!(multi_line_symbols.symbols[0].name, b"Foo");
 
        assert_eq!(multi_line_symbols.symbols[0].alias, b"Bar");
 
        assert_eq!(multi_line_symbols.symbols[1].name, b"Qux");
 
        assert_eq!(multi_line_symbols.symbols[1].alias, b"Mox");
 
        assert_eq!(multi_line_symbols.symbols[2].name, b"Dix");
 
        assert_eq!(multi_line_symbols.symbols[2].alias, b"Dix");
 
    }
 

	
 
    #[test]
 
    fn test_struct_definition() {
 
        let mut h = Heap::new();
 
        let mut input = InputSource::from_string("
 
        struct Foo {
 
            byte one,
 
            short two,
 
            Bar three,
 
        }
 
        struct Bar{int[] one, int[] two, Qux[] three}
 
        ").unwrap();
 
        let mut lex = Lexer::new(&mut input);
 
        let lexed = lex.consume_protocol_description(&mut h);
 
        if let Err(err) = &lexed {
 
            println!("{}", err);
 
        }
 
        let lexed = lexed.unwrap();
 
        let root = &h[lexed];
 

	
 
        assert_eq!(root.definitions.len(), 2);
 

	
 
        // let symbolic_type = |v: &PrimitiveType| -> Vec<u8> {
 
        //     if let PrimitiveType::Symbolic(v) = v {
 
        //         v.identifier.value.clone()
 
        //     } else {
 
        //         assert!(false);
 
        //         unreachable!();
 
        //     }
 
        // };
 

	
 
        let foo_def = h[root.definitions[0]].as_struct();
 
        assert_eq!(foo_def.identifier.value, b"Foo");
 
        assert_eq!(foo_def.fields.len(), 3);
 
        assert_eq!(foo_def.fields[0].field.value, b"one");
 
        assert_eq!(ParserTypeClass::from(&h[foo_def.fields[0].parser_type]), ParserTypeClass::Byte);
 
        assert_eq!(foo_def.fields[1].field.value, b"two");
 
        assert_eq!(ParserTypeClass::from(&h[foo_def.fields[1].parser_type]), ParserTypeClass::Short);
 
        assert_eq!(foo_def.fields[2].field.value, b"three");
 
        // assert_eq!(
 
        //     symbolic_type(&h[foo_def.fields[2].the_type].the_type.primitive),
 
        //     Vec::from("Bar".as_bytes())
 
        // );
 

	
 
        let bar_def = h[root.definitions[1]].as_struct();
 
        assert_eq!(bar_def.identifier.value, b"Bar");
 
        assert_eq!(bar_def.fields.len(), 3);
 
        assert_eq!(bar_def.fields[0].field.value, b"one");
 
        assert_eq!(ParserTypeClass::from(&h[bar_def.fields[0].parser_type]), ParserTypeClass::Array);
 
        assert_eq!(bar_def.fields[1].field.value, b"two");
 
        assert_eq!(ParserTypeClass::from(&h[bar_def.fields[1].parser_type]), ParserTypeClass::Array);
 
        assert_eq!(bar_def.fields[2].field.value, b"three");
 
        assert_eq!(ParserTypeClass::from(&h[bar_def.fields[2].parser_type]), ParserTypeClass::Array);
 
        // assert_eq!(
 
        //     symbolic_type(&h[bar_def.fields[2].parser_type].the_type.primitive),
 
        //     Vec::from("Qux".as_bytes())
 
        // );
 
    }
 

	
 
    #[test]
 
    fn test_enum_definition() {
 
        let mut h = Heap::new();
 
        let mut input = InputSource::from_string("
 
        enum Foo {
 
            A = 0,
 
            B = 5,
 
            C,
 
            D = 0xFF,
 
        }
 
        enum Bar { Ayoo, Byoo, Cyoo,}
 
        enum Qux { A(byte[]), B(Bar[]), C(byte)
 
        }
 
        ").unwrap();
 
        let mut lex = Lexer::new(&mut input);
 
        let lexed = lex.consume_protocol_description(&mut h).unwrap();
 
        let root = &h[lexed];
 

	
 
        assert_eq!(root.definitions.len(), 3);
 

	
 
        let foo_def = h[root.definitions[0]].as_enum();
 
        assert_eq!(foo_def.identifier.value, b"Foo");
 
        assert_eq!(foo_def.variants.len(), 4);
 
        assert_eq!(foo_def.variants[0].identifier.value, b"A");
 
        assert_eq!(foo_def.variants[0].value, EnumVariantValue::Integer(0));
 
        assert_eq!(foo_def.variants[1].identifier.value, b"B");
 
        assert_eq!(foo_def.variants[1].value, EnumVariantValue::Integer(5));
 
        assert_eq!(foo_def.variants[2].identifier.value, b"C");
 
        assert_eq!(foo_def.variants[2].value, EnumVariantValue::None);
 
        assert_eq!(foo_def.variants[3].identifier.value, b"D");
 
        assert_eq!(foo_def.variants[3].value, EnumVariantValue::Integer(0xFF));
 

	
 
        let bar_def = h[root.definitions[1]].as_enum();
 
        assert_eq!(bar_def.identifier.value, b"Bar");
 
        assert_eq!(bar_def.variants.len(), 3);
 
        assert_eq!(bar_def.variants[0].identifier.value, b"Ayoo");
 
        assert_eq!(bar_def.variants[0].value, EnumVariantValue::None);
 
        assert_eq!(bar_def.variants[1].identifier.value, b"Byoo");
 
        assert_eq!(bar_def.variants[1].value, EnumVariantValue::None);
 
        assert_eq!(bar_def.variants[2].identifier.value, b"Cyoo");
 
        assert_eq!(bar_def.variants[2].value, EnumVariantValue::None);
 

	
 
        let qux_def = h[root.definitions[2]].as_enum();
 
        let enum_type = |value: &EnumVariantValue| -> &ParserType {
 
            if let EnumVariantValue::Type(t) = value {
 
                &h[*t]
 
            } else {
 
                assert!(false);
 
                unreachable!();
 
            }
 
        };
 
        assert_eq!(qux_def.identifier.value, b"Qux");
 
        assert_eq!(qux_def.variants.len(), 3);
 
        assert_eq!(qux_def.variants[0].identifier.value, b"A");
 
        assert_eq!(ParserTypeClass::from(enum_type(&qux_def.variants[0].value)), ParserTypeClass::Array);
 
        assert_eq!(qux_def.variants[1].identifier.value, b"B");
 
        assert_eq!(ParserTypeClass::from(enum_type(&qux_def.variants[1].value)), ParserTypeClass::Array);
 
        // if let PrimitiveType::Symbolic(t) = &enum_type(&qux_def.variants[1].value).the_type.primitive {
 
        //     assert_eq!(t.identifier.value, Vec::from("Bar".as_bytes()));
 
        // } else { assert!(false) }
 

	
 
        assert_eq!(qux_def.variants[2].identifier.value, b"C");
 
        assert_eq!(ParserTypeClass::from(enum_type(&qux_def.variants[2].value)), ParserTypeClass::Byte);
 
    }
 

	
 
//     #[test]
 
//     fn test_lowercase() {
 
//         assert_eq!(lowercase(b'a'), b'a');
 
//         assert_eq!(lowercase(b'A'), b'a');
 
//         assert_eq!(lowercase(b'z'), b'z');
 
//         assert_eq!(lowercase(b'Z'), b'z');
 
//     }
 

	
 
//     #[test]
 
//     fn test_basic_expression() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string("a+b;").unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_expression(&mut h) {
 
//             Ok(expr) => {
 
//                 println!("{:?}", expr);
 
//                 if let Binary(bin) = &h[expr] {
 
//                     if let Variable(left) = &h[bin.left] {
 
//                         if let Variable(right) = &h[bin.right] {
 
//                             assert_eq!("a", format!("{}", h[left.identifier]));
 
//                             assert_eq!("b", format!("{}", h[right.identifier]));
 
//                             assert_eq!(Some(b';'), is.next());
 
//                             return;
 
//                         }
 
//                     }
 
//                 }
 
//                 assert!(false);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 

	
 
//     #[test]
 
//     fn test_paren_expression() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string("(true)").unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_paren_expression(&mut h) {
 
//             Ok(expr) => {
 
//                 println!("{:#?}", expr);
 
//                 if let Constant(con) = &h[expr] {
 
//                     if let ast::Constant::True = con.value {
 
//                         return;
 
//                     }
 
//                 }
 
//                 assert!(false);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 

	
 
//     #[test]
 
//     fn test_expression() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string("(x(1+5,get(y))-w[5])+z++\n").unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_expression(&mut h) {
 
//             Ok(expr) => {
 
//                 println!("{:#?}", expr);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 

	
 
//     #[test]
 
//     fn test_basic_statement() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string("while (true) { skip; }").unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_statement(&mut h) {
 
//             Ok(stmt) => {
 
//                 println!("{:#?}", stmt);
 
//                 if let Statement::While(w) = &h[stmt] {
 
//                     if let Expression::Constant(_) = h[w.test] {
 
//                         if let Statement::Block(_) = h[w.body] {
 
//                             return;
 
//                         }
 
//                     }
 
//                 }
 
//                 assert!(false);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 

	
 
//     #[test]
 
//     fn test_statement() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string(
 
//             "label: while (true) { if (x++ > y[0]) break label; else continue; }\n",
 
//         )
 
//         .unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_statement(&mut h) {
 
//             Ok(stmt) => {
 
//                 println!("{:#?}", stmt);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 
}
src/protocol/parser/mod.rs
Show inline comments
 
@@ -132,195 +132,195 @@ impl Parser {
 
    }
 

	
 
    fn resolve_symbols_and_types(&mut self) -> Result<(SymbolTable, TypeTable), ParseError2> {
 
        // Construct the symbol table to resolve any imports and/or definitions,
 
        // then use the symbol table to actually annotate all of the imports.
 
        // If the type table is constructed correctly then all imports MUST be
 
        // resolvable.
 
        // TODO: Update once namespaced identifiers are implemented
 
        let symbol_table = SymbolTable::new(&self.heap, &self.modules)?;
 

	
 
        // Not pretty, but we need to work around rust's borrowing rules, it is
 
        // totally safe to mutate the contents of an AST element that we are
 
        // not borrowing anywhere else.
 
        // TODO: Maybe directly access heap's members to allow borrowing from
 
        //  mutliple members of Heap? Not pretty though...
 
        let mut module_index = 0;
 
        let mut import_index = 0;
 
        loop {
 
            if module_index >= self.modules.len() {
 
                break;
 
            }
 

	
 
            let module_root_id = self.modules[module_index].root_id;
 
            let import_id = {
 
                let root = &self.heap[module_root_id];
 
                if import_index >= root.imports.len() {
 
                    module_index += 1;
 
                    import_index = 0;
 
                    continue
 
                }
 
                root.imports[import_index]
 
            };
 

	
 
            let import = &mut self.heap[import_id];
 
            match import {
 
                Import::Module(import) => {
 
                    debug_assert!(import.module_id.is_none(), "module import already resolved");
 
                    let target_module_id = symbol_table.resolve_module(&import.module_name)
 
                        .expect("module import is resolved by symbol table");
 
                    import.module_id = Some(target_module_id)
 
                },
 
                Import::Symbols(import) => {
 
                    debug_assert!(import.module_id.is_none(), "module of symbol import already resolved");
 
                    let target_module_id = symbol_table.resolve_module(&import.module_name)
 
                        .expect("symbol import's module is resolved by symbol table");
 
                    import.module_id = Some(target_module_id);
 

	
 
                    for symbol in &mut import.symbols {
 
                        debug_assert!(symbol.definition_id.is_none(), "symbol import already resolved");
 
                        let (_, target_definition_id) = symbol_table.resolve_symbol(module_root_id, &symbol.alias)
 
                            .expect("symbol import is resolved by symbol table")
 
                            .as_definition()
 
                            .expect("symbol import does not resolve to namespace symbol");
 
                        symbol.definition_id = Some(target_definition_id);
 
                    }
 
                }
 
            }
 
        }
 

	
 
        // All imports in the AST are now annotated. We now use the symbol table
 
        // to construct the type table.
 
        let mut type_ctx = TypeCtx::new(&symbol_table, &mut self.heap, &self.modules);
 
        let type_table = TypeTable::new(&mut type_ctx)?;
 

	
 
        Ok((symbol_table, type_table))
 
    }
 

	
 
    // TODO: @fix, temporary impl to keep code compilable
 
    pub fn parse(&mut self) -> Result<RootId, ParseError2> {
 
        assert_eq!(self.modules.len(), 1, "Fix meeeee");
 
        let root_id = self.modules[0].root_id;
 

	
 
        let (mut symbol_table, mut type_table) = self.resolve_symbols_and_types()?;
 

	
 
        // TODO: @cleanup
 
        let mut ctx = visitor::Ctx{
 
            heap: &mut self.heap,
 
            module: &self.modules[0],
 
            symbols: &mut symbol_table,
 
            types: &mut type_table,
 
        };
 
        let mut visit = ValidityAndLinkerVisitor::new();
 
        visit.visit_module(&mut ctx)?;
 
        let mut type_visit = TypeResolvingVisitor::new();
 
        let mut queue = ResolveQueue::new();
 
        TypeResolvingVisitor::queue_module_definitions(&ctx, &mut queue);
 
        while !queue.is_empty() {
 
            let top = queue.pop().unwrap();
 
            println!("Resolving root={}, def={}, mono={:?}", top.root_id.index, top.definition_id.index, top.monomorph_types);
 
            type_visit.handle_module_definition(&mut ctx, &mut queue, top)?;
 
        }
 

	
 
        if let Err((position, message)) = Self::parse_inner(&mut self.heap, root_id) {
 
            return Err(ParseError2::new_error(&self.modules[0].source, position, &message))
 
        }
 

	
 
        let mut writer = ASTWriter::new();
 
        let mut file = std::fs::File::create(std::path::Path::new("ast.txt")).unwrap();
 
        writer.write_ast(&mut file, &self.heap);
 
        // let mut writer = ASTWriter::new();
 
        // let mut file = std::fs::File::create(std::path::Path::new("ast.txt")).unwrap();
 
        // writer.write_ast(&mut file, &self.heap);
 

	
 
        Ok(root_id)
 
    }
 

	
 
    pub fn parse_inner(h: &mut Heap, pd: RootId) -> VisitorResult {
 
        // TODO: @cleanup, slowly phasing out old compiler
 
        // NestedSynchronousStatements::new().visit_protocol_description(h, pd)?;
 
        // ChannelStatementOccurrences::new().visit_protocol_description(h, pd)?;
 
        // FunctionStatementReturns::new().visit_protocol_description(h, pd)?;
 
        // ComponentStatementReturnNew::new().visit_protocol_description(h, pd)?;
 
        // CheckBuiltinOccurrences::new().visit_protocol_description(h, pd)?;
 
        // BuildSymbolDeclarations::new().visit_protocol_description(h, pd)?;
 
        // LinkCallExpressions::new().visit_protocol_description(h, pd)?;
 
        // BuildScope::new().visit_protocol_description(h, pd)?;
 
        // ResolveVariables::new().visit_protocol_description(h, pd)?;
 
        LinkStatements::new().visit_protocol_description(h, pd)?;
 
        // BuildLabels::new().visit_protocol_description(h, pd)?;
 
        // ResolveLabels::new().visit_protocol_description(h, pd)?;
 
        AssignableExpressions::new().visit_protocol_description(h, pd)?;
 
        IndexableExpressions::new().visit_protocol_description(h, pd)?;
 
        SelectableExpressions::new().visit_protocol_description(h, pd)?;
 

	
 
        Ok(())
 
    }
 
}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use std::fs::File;
 
    use std::io::Read;
 
    use std::path::Path;
 

	
 
    use super::*;
 

	
 
    // #[test]
 
    fn positive_tests() {
 
        for resource in TestFileIter::new("testdata/parser/positive", "pdl") {
 
            let resource = resource.expect("read testdata filepath");
 
            // println!(" * running: {}", &resource);
 
            let path = Path::new(&resource);
 
            let source = InputSource::from_file(&path).unwrap();
 
            // println!("DEBUG -- input:\n{}", String::from_utf8_lossy(&source.input));
 
            let mut parser = Parser::new_with_source(source).expect("parse source");
 
            match parser.parse() {
 
                Ok(_) => {}
 
                Err(err) => {
 
                    println!(" > file: {}", &resource);
 
                    println!("{}", err);
 
                    assert!(false);
 
                }
 
            }
 
        }
 
    }
 

	
 
    // #[test]
 
    fn negative_tests() {
 
        for resource in TestFileIter::new("testdata/parser/negative", "pdl") {
 
            let resource = resource.expect("read testdata filepath");
 
            let path = Path::new(&resource);
 
            let expect = path.with_extension("txt");
 
            let mut source = InputSource::from_file(&path).unwrap();
 
            let mut parser = Parser::new_with_source(source).expect("construct parser");
 
            match parser.parse() {
 
                Ok(pd) => {
 
                    println!("Expected parse error:");
 

	
 
                    let mut cev: Vec<u8> = Vec::new();
 
                    let mut f = File::open(expect).unwrap();
 
                    f.read_to_end(&mut cev).unwrap();
 
                    println!("{}", String::from_utf8_lossy(&cev));
 
                    assert!(false);
 
                }
 
                Err(err) => {
 
                    let expected = format!("{}", err);
 
                    println!("{}", &expected);
 

	
 
                    let mut cev: Vec<u8> = Vec::new();
 
                    let mut f = File::open(expect).unwrap();
 
                    f.read_to_end(&mut cev).unwrap();
 
                    println!("{}", String::from_utf8_lossy(&cev));
 

	
 
                    assert_eq!(expected.as_bytes(), cev);
 
                }
 
            }
 
        }
 
    }
 

	
 
    // #[test]
 
    fn counterexample_tests() {
 
        for resource in TestFileIter::new("testdata/parser/counterexamples", "pdl") {
 
            let resource = resource.expect("read testdata filepath");
 
            let path = Path::new(&resource);
 
            let source = InputSource::from_file(&path).unwrap();
 
            let mut parser = Parser::new_with_source(source).expect("construct parser");
 

	
 
            fn print_header(s: &str) {
src/protocol/parser/type_resolver.rs
Show inline comments
 
/// type_resolver.rs
 
///
 
/// Performs type inference and type checking
 
/// Performs type inference and type checking. Type inference is implemented by
 
/// applying constraints on (sub)trees of types. During this process the
 
/// resolver takes the `ParserType` structs (the representation of the types
 
/// written by the programmer), converts them to `InferenceType` structs (the
 
/// temporary data structure used during type inference) and attempts to arrive
 
/// at `ConcreteType` structs (the representation of a fully checked and
 
/// validated type).
 
///
 
/// The resolver will visit every statement and expression relevant to the
 
/// procedure and insert and determine its initial type based on context (e.g. a
 
/// return statement's expression must match the function's return type, an
 
/// if statement's test expression must evaluate to a boolean). When all are
 
/// visited we attempt to make progress in evaluating the types. Whenever a type
 
/// is progressed we queue the related expressions for further type progression.
 
/// Once no more expressions are in the queue the algorithm is finished. At this
 
/// point either all types are inferred (or can be trivially implicitly
 
/// determined), or we have incomplete types. In the latter casee we return an
 
/// error.
 
///
 
/// Inference may be applied on non-polymorphic procedures and on polymorphic
 
/// procedures. When dealing with a non-polymorphic procedure we apply the type
 
/// resolver and annotate the AST with the `ConcreteType`s. When dealing with
 
/// polymorphic procedures we will only annotate the AST once, preserving
 
/// references to polymorphic variables. Any later pass will perform just the
 
/// type checking.
 
///
 
/// TODO: Needs an optimization pass
 
/// TODO: Needs a cleanup pass
 
/// TODO: Disallow `Void` types in various expressions (and other future types)
 
/// TODO: Maybe remove msg type?
 

	
 
macro_rules! enabled_debug_print {
 
    (false, $name:literal, $format:literal) => {};
 
    (false, $name:literal, $format:literal, $($args:expr),*) => {};
 
    (true, $name:literal, $format:literal) => {
 
        println!("[{}] {}", $name, $format)
 
    };
 
    (true, $name:literal, $format:literal, $($args:expr),*) => {
 
        println!("[{}] {}", $name, format!($format, $($args),*))
 
    };
 
}
 

	
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(true, "types", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(true, "types", $format, $($args),*);
 
    };
 
}
 

	
 
use std::collections::{HashMap, HashSet, VecDeque};
 

	
 
use crate::protocol::ast::*;
 
use crate::protocol::inputsource::*;
 
use super::type_table::*;
 
use super::symbol_table::*;
 
use super::visitor::{
 
    STMT_BUFFER_INIT_CAPACITY,
 
    EXPR_BUFFER_INIT_CAPACITY,
 
    Ctx,
 
    Visitor2,
 
    VisitorResult
 
};
 
use std::collections::hash_map::Entry;
 
use crate::protocol::parser::type_resolver::InferenceTypePart::IntegerLike;
 

	
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Message ];
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Message, InferenceTypePart::Byte ];
 
const BOOL_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Bool ];
 
const NUMBERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::NumberLike ];
 
const INTEGERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::IntegerLike ];
 
const ARRAY_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Array, InferenceTypePart::Unknown ];
 
const ARRAYLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::ArrayLike, InferenceTypePart::Unknown ];
 
const PORTLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::PortLike, InferenceTypePart::Unknown ];
 

	
 
/// TODO: @performance Turn into PartialOrd+Ord to simplify checks
 
/// TODO: @types Remove the Message -> Byte hack at some point...
 
#[derive(Debug, Clone, Eq, PartialEq)]
 
pub(crate) enum InferenceTypePart {
 
    // A marker with an identifier which we can use to seek subsections of the 
 
    // inferred type
 
    Marker(usize),
 
    // A marker with an identifier which we can use to retrieve the type subtree
 
    // that follows the marker. This is used to perform type inference on
 
    // polymorphs: an expression may determine the polymorphs type, after we
 
    // need to apply that information to all other places where the polymorph is
 
    // used.
 
    MarkerDefinition(usize), // marker for polymorph types on a procedure's definition
 
    MarkerBody(usize), // marker for polymorph types within a procedure body
 
    // Completely unknown type, needs to be inferred
 
    Unknown,
 
    // Partially known type, may be inferred to to be the appropriate related 
 
    // type.
 
    // IndexLike,      // index into array/slice
 
    NumberLike,     // any kind of integer/float
 
    IntegerLike,    // any kind of integer
 
    ArrayLike,      // array or slice. Note that this must have a subtype
 
    PortLike,       // input or output port
 
    // Special types that cannot be instantiated by the user
 
    Void, // For builtin functions that do not return anything
 
    // Concrete types without subtypes
 
    Message,
 
    Bool,
 
    Byte,
 
    Short,
 
    Int,
 
    Long,
 
    String,
 
    // One subtype
 
    Message,
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // A user-defined type with any number of subtypes
 
    Instance(DefinitionId, usize)
 
}
 

	
 
impl InferenceTypePart {
 
    fn is_marker(&self) -> bool {
 
        if let InferenceTypePart::Marker(_) = self { true } else { false }
 
        use InferenceTypePart as ITP;
 

	
 
        match self {
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if the type is concrete, markers are interpreted as concrete
 
    /// types.
 
    fn is_concrete(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | 
 
            ITP::ArrayLike | ITP::PortLike => false,
 
            _ => true
 
        }
 
    }
 

	
 
    fn is_concrete_number(&self) -> bool {
 
        // TODO: @float
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_integer(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_array_or_slice(&self) -> bool {
 
    fn is_concrete_msg_array_or_slice(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Array | ITP::Slice => true,
 
            ITP::Array | ITP::Slice | ITP::Message => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_port(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Input | ITP::Output => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if a part is less specific than the argument. Only checks for 
 
    /// single-part inference (i.e. not the replacement of an `Unknown` variant 
 
    /// with the argument)
 
    fn may_be_inferred_from(&self, arg: &InferenceTypePart) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        (*self == ITP::IntegerLike && arg.is_concrete_integer()) ||
 
        (*self == ITP::NumberLike && (arg.is_concrete_number() || *arg == ITP::IntegerLike)) ||
 
        (*self == ITP::ArrayLike && arg.is_concrete_array_or_slice()) ||
 
        (*self == ITP::ArrayLike && arg.is_concrete_msg_array_or_slice()) ||
 
        (*self == ITP::PortLike && arg.is_concrete_port())
 
    }
 

	
 
    /// Returns the change in "iteration depth" when traversing this particular
 
    /// part. The iteration depth is used to traverse the tree in a linear 
 
    /// fashion. It is basically `number_of_subtypes - 1`
 
    fn depth_change(&self) -> i32 {
 
        use InferenceTypePart as ITP;
 
        match &self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike |
 
            ITP::Void | ITP::Message | ITP::Bool | 
 
            ITP::Void | ITP::Bool |
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long | 
 
            ITP::String => {
 
                -1
 
            },
 
            ITP::Marker(_) | ITP::ArrayLike | ITP::Array | ITP::Slice | 
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) |
 
            ITP::ArrayLike | ITP::Message | ITP::Array | ITP::Slice |
 
            ITP::PortLike | ITP::Input | ITP::Output => {
 
                // One subtype, so do not modify depth
 
                0
 
            },
 
            ITP::Instance(_, num_args) => {
 
                (*num_args as i32) - 1
 
            }
 
        }
 
    }
 
}
 

	
 
impl From<ConcreteTypePart> for InferenceTypePart {
 
    fn from(v: ConcreteTypePart) -> InferenceTypePart {
 
        use ConcreteTypePart as CTP;
 
        use InferenceTypePart as ITP;
 

	
 
        match v {
 
            CTP::Marker(_) => {
 
                unreachable!("encountered marker while converting concrete type to inferred type");
 
            }
 
            CTP::Void => ITP::Void,
 
            CTP::Message => ITP::Message,
 
            CTP::Bool => ITP::Bool,
 
            CTP::Byte => ITP::Byte,
 
            CTP::Short => ITP::Short,
 
            CTP::Int => ITP::Int,
 
            CTP::Long => ITP::Long,
 
            CTP::String => ITP::String,
 
            CTP::Array => ITP::Array,
 
            CTP::Slice => ITP::Slice,
 
            CTP::Input => ITP::Input,
 
            CTP::Output => ITP::Output,
 
            CTP::Instance(id, num) => ITP::Instance(id, num),
 
        }
 
    }
 
}
 

	
 
#[derive(Debug)]
 
struct InferenceType {
 
    has_marker: bool,
 
    has_body_marker: bool,
 
    is_done: bool,
 
    parts: Vec<InferenceTypePart>,
 
}
 

	
 
impl InferenceType {
 
    fn new(has_marker: bool, is_done: bool, parts: Vec<InferenceTypePart>) -> Self {
 
    fn new(has_body_marker: bool, is_done: bool, parts: Vec<InferenceTypePart>) -> Self {
 
        if cfg!(debug_assertions) {
 
            debug_assert!(!parts.is_empty());
 
            if !has_marker {
 
                debug_assert!(parts.iter().all(|v| !v.is_marker()));
 
            if !has_body_marker {
 
                debug_assert!(parts.iter().all(|v| {
 
                    if let InferenceTypePart::MarkerBody(_) = v { false } else { true }
 
                }));
 
            }
 
            if is_done {
 
                debug_assert!(parts.iter().all(|v| v.is_concrete()));
 
            }
 
        }
 
        Self{ has_marker, is_done, parts }
 
        Self{ has_body_marker: has_body_marker, is_done, parts }
 
    }
 

	
 
    fn replace_subtree(&mut self, start_idx: usize, with: &[InferenceTypePart]) {
 
         let end_idx = Self::find_subtree_end_idx(&self.parts, start_idx);
 
        debug_assert_eq!(with.len(), Self::find_subtree_end_idx(with, 0));
 
        self.parts.splice(start_idx..end_idx, with.iter().cloned());
 
        self.recompute_is_done();
 
    }
 

	
 
    // TODO: @performance, might all be done inline in the type inference methods
 
    fn recompute_is_done(&mut self) {
 
        self.is_done = self.parts.iter().all(|v| v.is_concrete());
 
    }
 

	
 
    /// Checks if type is, or may be inferred as, a number
 
    // TODO: @float
 
    fn might_be_number(&self) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        // TODO: @marker?
 
        if self.parts.len() != 1 { return false; }
 
        match self.parts[0] {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike |
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long =>
 
                true,
 
            _ =>
 
                false,
 
        }
 
    }
 

	
 
    /// Checks if type is, or may be inferred as, an integer
 
    fn might_be_integer(&self) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        // TODO: @marker?
 
        if self.parts.len() != 1 { return false; }
 
        match self.parts[0] {
 
            ITP::Unknown | ITP::IntegerLike |
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long =>
 
                true,
 
            _ =>
 
                false,
 
        }
 
    }
 

	
 
    /// Checks if type is, or may be inferred as, a boolean
 
    fn might_be_boolean(&self) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        // TODO: @marker?
 
        if self.parts.len() != 1 { return false; }
 
        match self.parts[0] {
 
            ITP::Unknown | ITP::Bool => true,
 
            _ => false
 
        }
 
    }
 

	
 
    /// Returns an iterator over all markers and the partial type tree that
 
    /// Returns an iterator over all body markers and the partial type tree that
 
    /// follows those markers.
 
    fn marker_iter(&self) -> InferenceTypeMarkerIter {
 
    fn body_marker_iter(&self) -> InferenceTypeMarkerIter {
 
        InferenceTypeMarkerIter::new(&self.parts)
 
    }
 

	
 
    /// Attempts to find a marker with a specific value appearing at or after
 
    /// the specified index. If found then the partial type tree's bounding
 
    /// indices that follow that marker are returned.
 
    fn find_subtree_idx_for_marker(&self, marker: usize, mut idx: usize) -> Option<(usize, usize)> {
 
        // Seek ahead to find a marker
 
        let marker = InferenceTypePart::Marker(marker);
 
    /// Attempts to find a specific type part appearing at or after the
 
    /// specified index. If found then the partial type tree's bounding indices
 
    /// that follow that marker are returned.
 
    fn find_subtree_idx_for_part(&self, part: InferenceTypePart, mut idx: usize) -> Option<(usize, usize)> {
 
        debug_assert!(part.depth_change() >= 0, "cannot find subtree for leaf part");
 
        while idx < self.parts.len() {
 
            if marker == self.parts[idx] {
 
                // Found the marker
 
            if part == self.parts[idx] {
 
                // Found the specified part
 
                let start_idx = idx + 1;
 
                let end_idx = Self::find_subtree_end_idx(&self.parts, start_idx);
 
                return Some((start_idx, end_idx))
 
            }
 

	
 
            idx += 1;
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Given that the `parts` are a depth-first serialized tree of types, this
 
    /// function finds the subtree anchored at a specific node. The returned 
 
    /// index is exclusive.
 
    fn find_subtree_end_idx(parts: &[InferenceTypePart], start_idx: usize) -> usize {
 
        let mut depth = 1;
 
        let mut idx = start_idx;
 

	
 
        while idx < parts.len() {
 
            depth += parts[idx].depth_change();
 
            if depth == 0 {
 
                return idx + 1;
 
            }
 
            idx += 1;
 
        }
 

	
 
        // If here, then the inference type is malformed
 
        unreachable!();
 
    }
 

	
 
    /// Call that attempts to infer the part at `to_infer.parts[to_infer_idx]` 
 
    /// using the subtree at `template.parts[template_idx]`. Will return 
 
    /// `Some(depth_change_due_to_traversal)` if type inference has been 
 
    /// applied. In this case the indices will also be modified to point to the 
 
    /// next part in both templates. If type inference has not (or: could not) 
 
    /// be applied then `None` will be returned. Note that this might mean that 
 
    /// the types are incompatible.
 
    ///
 
    /// As this is a helper functions, some assumptions: the parts are not 
 
    /// exactly equal, and neither of them contains a marker. Also: only the
 
    /// `to_infer` parts are checked for inference. It might be that this 
 
    /// function returns `None`, but that that `template` is still compatible
 
    /// with `to_infer`, e.g. when `template` has an `Unknown` part.
 
    fn infer_part_for_single_type(
 
        to_infer: &mut InferenceType, to_infer_idx: &mut usize,
 
        template_parts: &[InferenceTypePart], template_idx: &mut usize,
 
    ) -> Option<i32> {
 
        use InferenceTypePart as ITP;
 

	
 
        let to_infer_part = &to_infer.parts[*to_infer_idx];
 
        let template_part = &template_parts[*template_idx];
 

	
 
        // TODO: Maybe do this differently?
 
        let mut template_definition_marker = None;
 
        if *template_idx > 0 {
 
            if let ITP::MarkerDefinition(marker) = &template_parts[*template_idx - 1] {
 
                template_definition_marker = Some(*marker)
 
            }
 
        }
 

	
 
        // Check for programmer mistakes
 
        debug_assert_ne!(to_infer_part, template_part);
 
        debug_assert!(!to_infer_part.is_marker(), "marker encountered in 'infer part'");
 
        debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'");
 

	
 
        // Inference of a somewhat-specified type
 
        if to_infer_part.may_be_inferred_from(template_part) {
 
            let depth_change = to_infer_part.depth_change();
 
            debug_assert_eq!(depth_change, template_part.depth_change());
 

	
 
            if let Some(marker) = template_definition_marker {
 
                to_infer.parts.insert(*to_infer_idx, ITP::MarkerDefinition(marker));
 
                *to_infer_idx += 1;
 
            }
 

	
 
            to_infer.parts[*to_infer_idx] = template_part.clone();
 

	
 
            *to_infer_idx += 1;
 
            *template_idx += 1;
 
            return Some(depth_change);
 
        }
 

	
 
        // Inference of a completely unknown type
 
        if *to_infer_part == ITP::Unknown {
 
            // template part is different, so cannot be unknown, hence copy the
 
            // entire subtree
 
            let template_end_idx = Self::find_subtree_end_idx(template_parts, *template_idx);
 
            to_infer.parts[*to_infer_idx] = template_part.clone();
 
            *to_infer_idx += 1;
 
            for insert_idx in (*template_idx + 1)..template_end_idx {
 
                to_infer.parts.insert(*to_infer_idx, template_parts[insert_idx].clone());
 
            let erase_offset = if let Some(marker) = template_definition_marker {
 
                to_infer.parts[*to_infer_idx] = ITP::MarkerDefinition(marker);
 
                *to_infer_idx += 1;
 
            }
 
                0
 
            } else {
 
                1
 
            };
 

	
 
            to_infer.parts.splice(
 
                *to_infer_idx..*to_infer_idx + erase_offset,
 
                template_parts[*template_idx..template_end_idx].iter().cloned()
 
            );
 
            *to_infer_idx += (template_end_idx - *template_idx);
 
            *template_idx = template_end_idx;
 

	
 
            // Note: by definition the LHS was Unknown and the RHS traversed a 
 
            // full subtree.
 
            return Some(-1);
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Call that checks if the `to_check` part is compatible with the `infer`
 
    /// part. This essentially implements `infer_part_for_single_type` but skips
 
    /// over the matching parts.
 
    /// part. This is essentially a copy of `infer_part_for_single_type`, but
 
    /// without actually copying the type parts.
 
    fn check_part_for_single_type(
 
        to_check_parts: &[InferenceTypePart], to_check_idx: &mut usize,
 
        template_parts: &[InferenceTypePart], template_idx: &mut usize
 
    ) -> Option<i32> {
 
        use InferenceTypePart as ITP;
 

	
 
        let to_check_part = &to_check_parts[*to_check_idx];
 
        let template_part = &template_parts[*template_idx];
 

	
 
        // Checking programmer errors
 
        debug_assert_ne!(to_check_part, template_part);
 
        debug_assert!(!to_check_part.is_marker(), "marker encountered in 'to_check part'");
 
        debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'");
 

	
 
        if to_check_part.may_be_inferred_from(template_part) {
 
            let depth_change = to_check_part.depth_change();
 
            debug_assert_eq!(depth_change, template_part.depth_change());
 
            *to_check_idx += 1;
 
            *template_idx += 1;
 
            return Some(depth_change);
 
        }
 

	
 
        if *to_check_part == ITP::Unknown {
 
            *to_check_idx += 1;
 
            *template_idx = Self::find_subtree_end_idx(template_parts, *template_idx);
 

	
 
            // By definition LHS and RHS had depth change of -1
 
            return Some(-1);
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Attempts to infer types between two `InferenceType` instances. This 
 
    /// function is unsafe as it accepts pointers to work around Rust's 
 
    /// borrowing rules. The caller must ensure that the pointers are distinct.
 
    unsafe fn infer_subtrees_for_both_types(
 
        type_a: *mut InferenceType, start_idx_a: usize,
 
        type_b: *mut InferenceType, start_idx_b: usize
 
    ) -> DualInferenceResult {
 
        use InferenceTypePart as ITP;
 

	
 
        debug_assert!(!std::ptr::eq(type_a, type_b), "same inference types");
 
        debug_assert!(!std::ptr::eq(type_a, type_b), "encountered pointers to the same inference type");
 
        let type_a = &mut *type_a;
 
        let type_b = &mut *type_b;
 

	
 
        let mut modified_a = false;
 
        let mut modified_b = false;
 
        let mut idx_a = start_idx_a;
 
        let mut idx_b = start_idx_b;
 
        let mut depth = 1;
 

	
 
        while depth > 0 {
 
            // Advance indices if we encounter markers or equal parts
 
            let part_a = &type_a.parts[idx_a];
 
            let part_b = &type_b.parts[idx_b];
 
            
 
            if part_a == part_b {
 
                let depth_change = part_a.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, part_b.depth_change());
 
                idx_a += 1;
 
                idx_b += 1;
 
                continue;
 
            }
 
            if let ITP::Marker(_) = part_a { idx_a += 1; continue; }
 
            if let ITP::Marker(_) = part_b { idx_b += 1; continue; }
 
            if part_a.is_marker() { idx_a += 1; continue; }
 
            if part_b.is_marker() { idx_b += 1; continue; }
 

	
 
            // Types are not equal and are both not markers
 
            if let Some(depth_change) = Self::infer_part_for_single_type(type_a, &mut idx_a, &type_b.parts, &mut idx_b) {
 
                depth += depth_change;
 
                modified_a = true;
 
                continue;
 
            }
 
            if let Some(depth_change) = Self::infer_part_for_single_type(type_b, &mut idx_b, &type_a.parts, &mut idx_a) {
 
                depth += depth_change;
 
                modified_b = true;
 
                continue;
 
            }
 

	
 
            // And can also not be inferred in any way: types must be incompatible
 
            return DualInferenceResult::Incompatible;
 
        }
 

	
 
        if modified_a { type_a.recompute_is_done(); }
 
        if modified_b { type_b.recompute_is_done(); }
 

	
 
        // If here then we completely inferred the subtrees.
 
        match (modified_a, modified_b) {
 
            (false, false) => DualInferenceResult::Neither,
 
            (false, true) => DualInferenceResult::Second,
 
            (true, false) => DualInferenceResult::First,
 
            (true, true) => DualInferenceResult::Both
 
        }
 
    }
 

	
 
    /// Attempts to infer the first subtree based on the template. Like
 
    /// `infer_subtrees_for_both_types`, but now only applying inference to
 
    /// `to_infer` based on the type information in `template`.
 
    /// Secondary use is to make sure that a type follows a certain template.
 
    fn infer_subtree_for_single_type(
 
        to_infer: &mut InferenceType, mut to_infer_idx: usize,
 
        template: &[InferenceTypePart], mut template_idx: usize,
 
    ) -> SingleInferenceResult {
 
        let mut modified = false;
 
        let mut depth = 1;
 

	
 
        while depth > 0 {
 
            let to_infer_part = &to_infer.parts[to_infer_idx];
 
            let template_part = &template[template_idx];
 

	
 
            if to_infer_part == template_part {
 
                let depth_change = to_infer_part.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, template_part.depth_change());
 
                to_infer_idx += 1;
 
                template_idx += 1;
 
                continue;
 
            }
 
            if to_infer_part.is_marker() { to_infer_idx += 1; continue; }
 
            if template_part.is_marker() { template_idx += 1; continue; }
 

	
 
            // Types are not equal and not markers. So check if we can infer 
 
            // anything
 
            if let Some(depth_change) = Self::infer_part_for_single_type(
 
                to_infer, &mut to_infer_idx, template, &mut template_idx
 
            ) {
 
                depth += depth_change;
 
                modified = true;
 
                continue;
 
            }
 

	
 
            // We cannot infer anything, but the template may still be 
 
            // compatible with the type we're inferring
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                template, &mut template_idx, &to_infer.parts, &mut to_infer_idx
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 

	
 
            return SingleInferenceResult::Incompatible
 
        }
 

	
 
        return if modified {
 
            to_infer.recompute_is_done();
 
            SingleInferenceResult::Modified
 
        } else {
 
            SingleInferenceResult::Unmodified
 
        }
 
    }
 

	
 
    /// Checks if both types are compatible, doesn't perform any inference
 
    fn check_subtrees(
 
        type_parts_a: &[InferenceTypePart], start_idx_a: usize,
 
        type_parts_b: &[InferenceTypePart], start_idx_b: usize
 
    ) -> bool {
 
        let mut depth = 1;
 
        let mut idx_a = start_idx_a;
 
        let mut idx_b = start_idx_b;
 

	
 
        while depth > 0 {
 
            let part_a = &type_parts_a[idx_a];
 
            let part_b = &type_parts_b[idx_b];
 

	
 
            if part_a == part_b {
 
                let depth_change = part_a.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, part_b.depth_change());
 
                idx_a += 1;
 
                idx_b += 1;
 
                continue;
 
            }
 
            
 
            if part_a.is_marker() { idx_a += 1; continue; }
 
            if part_b.is_marker() { idx_b += 1; continue; }
 

	
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                type_parts_a, &mut idx_a, type_parts_b, &mut idx_b
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                type_parts_b, &mut idx_b, type_parts_a, &mut idx_a
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 

	
 
            return false;
 
        }
 

	
 
        true
 
    }
 

	
 
    /// Performs the conversion of the inference type into a concrete type.
 
    /// By calling this function you must make sure that no unspecified types
 
    /// (e.g. Unknown or IntegerLike) exist in the type.
 
    fn write_concrete_type(&self, concrete_type: &mut ConcreteType) {
 
        use InferenceTypePart as ITP;
 
        use ConcreteTypePart as CTP;
 

	
 
        // Make sure inference type is specified but concrete type is not yet specified
 
        debug_assert!(!self.parts.is_empty());
 
        debug_assert!(concrete_type.parts.is_empty());
 
        concrete_type.parts.reserve(self.parts.len());
 

	
 
        for part in &self.parts {
 
        let mut idx = 0;
 
        while idx < self.parts.len() {
 
            let part = &self.parts[idx];
 
            let converted_part = match part {
 
                ITP::Marker(_) => { continue; },
 
                ITP::MarkerDefinition(marker) => {
 
                    // Outer markers are converted to regular markers, we
 
                    // completely remove the type subtree that follows it
 
                    idx = InferenceType::find_subtree_end_idx(&self.parts, idx + 1);
 
                    concrete_type.parts.push(CTP::Marker(*marker));
 
                    continue;
 
                },
 
                ITP::MarkerBody(_) => {
 
                    // Inner markers are removed when writing to the concrete
 
                    // type.
 
                    idx += 1;
 
                    continue;
 
                },
 
                ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | ITP::ArrayLike | ITP::PortLike => {
 
                    debug_assert!(false, "Attempted to convert inference type part {:?} into concrete type", part);
 
                    unreachable!();
 
                    unreachable!("Attempted to convert inference type part {:?} into concrete type", part);
 
                },
 
                ITP::Void => CTP::Void,
 
                ITP::Message => CTP::Message,
 
                ITP::Bool => CTP::Bool,
 
                ITP::Byte => CTP::Byte,
 
                ITP::Short => CTP::Short,
 
                ITP::Int => CTP::Int,
 
                ITP::Long => CTP::Long,
 
                ITP::String => CTP::String,
 
                ITP::Array => CTP::Array,
 
                ITP::Slice => CTP::Slice,
 
                ITP::Input => CTP::Input,
 
                ITP::Output => CTP::Output,
 
                ITP::Instance(id, num) => CTP::Instance(*id, *num),
 
            };
 

	
 
            concrete_type.parts.push(converted_part);
 
            idx += 1;
 
        }
 
    }
 

	
 
    /// Writes a human-readable version of the type to a string. Mostly a
 
    /// function for interior use.
 
    fn write_display_name(
 
        buffer: &mut String, heap: &Heap, parts: &[InferenceTypePart], mut idx: usize
 
    ) -> usize {
 
        use InferenceTypePart as ITP;
 

	
 
        match &parts[idx] {
 
            ITP::Marker(_) => {
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1)
 
            },
 
            ITP::Unknown => buffer.push_str("?"),
 
            ITP::NumberLike => buffer.push_str("num?"),
 
            ITP::IntegerLike => buffer.push_str("int?"),
 
            ITP::ArrayLike => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[?]");
 
            },
 
            ITP::PortLike => {
 
                buffer.push_str("port?<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            }
 
            ITP::Void => buffer.push_str("void"),
 
            ITP::Message => buffer.push_str("msg"),
 
            ITP::Bool => buffer.push_str("bool"),
 
            ITP::Byte => buffer.push_str("byte"),
 
            ITP::Short => buffer.push_str("short"),
 
            ITP::Int => buffer.push_str("int"),
 
            ITP::Long => buffer.push_str("long"),
 
            ITP::String => buffer.push_str("str"),
 
            ITP::Message => {
 
                buffer.push_str("msg<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Array => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[]");
 
            },
 
            ITP::Slice => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[..]");
 
            },
 
            ITP::Input => {
 
                buffer.push_str("in<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Output => {
 
                buffer.push_str("out<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Instance(definition_id, num_sub) => {
 
                let definition = &heap[*definition_id];
 
                buffer.push_str(&String::from_utf8_lossy(&definition.identifier().value));
 
                if *num_sub > 0 {
 
                    buffer.push('<');
 
                    idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    for _sub_idx in 1..*num_sub {
 
                        buffer.push_str(", ");
 
                        idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    }
 
                    buffer.push('>');
 
                }
 
            },
 
        }
 

	
 
        idx
 
    }
 

	
 
    /// Returns the display name of a (part of) the type tree. Will allocate a
 
    /// string.
 
    fn partial_display_name(heap: &Heap, parts: &[InferenceTypePart]) -> String {
 
        let mut buffer = String::with_capacity(parts.len() * 6);
 
        Self::write_display_name(&mut buffer, heap, parts, 0);
 
        buffer
 
    }
 

	
 
    /// Returns the display name of the full type tree. Will allocate a string.
 
    fn display_name(&self, heap: &Heap) -> String {
 
        Self::partial_display_name(heap, &self.parts)
 
    }
 
}
 

	
 
/// Iterator over the subtrees that follow a marker in an `InferenceType`
 
/// instance. Returns immutable slices over the internal parts
 
struct InferenceTypeMarkerIter<'a> {
 
    parts: &'a [InferenceTypePart],
 
    idx: usize,
 
}
 

	
 
impl<'a> InferenceTypeMarkerIter<'a> {
 
    fn new(parts: &'a [InferenceTypePart]) -> Self {
 
        Self{ parts, idx: 0 }
 
    }
 
}
 

	
 
impl<'a> Iterator for InferenceTypeMarkerIter<'a> {
 
    type Item = (usize, &'a [InferenceTypePart]);
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        // Iterate until we find a marker
 
        while self.idx < self.parts.len() {
 
            if let InferenceTypePart::Marker(marker) = self.parts[self.idx] {
 
            if let InferenceTypePart::MarkerBody(marker) = self.parts[self.idx] {
 
                // Found a marker, find the subtree end
 
                let start_idx = self.idx + 1;
 
                let end_idx = InferenceType::find_subtree_end_idx(self.parts, start_idx);
 

	
 
                // Modify internal index, then return items
 
                self.idx = end_idx;
 
                return Some((marker, &self.parts[start_idx..end_idx]))
 
            }
 

	
 
            self.idx += 1;
 
        }
 

	
 
        None
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum DualInferenceResult {
 
    Neither,        // neither argument is clarified
 
    First,          // first argument is clarified using the second one
 
    Second,         // second argument is clarified using the first one
 
    Both,           // both arguments are clarified
 
    Incompatible,   // types are incompatible: programmer error
 
}
 

	
 
impl DualInferenceResult {
 
    fn modified_any(&self) -> bool {
 
        match self {
 
            DualInferenceResult::First | DualInferenceResult::Second | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
    fn modified_lhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::First | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
    fn modified_rhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::Second | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum SingleInferenceResult {
 
    Unmodified,
 
    Modified,
 
    Incompatible
 
}
 

	
 
enum DefinitionType{
 
    None,
 
    Component(ComponentId),
 
    Function(FunctionId),
 
}
 

	
 
pub(crate) struct ResolveQueueElement {
 
    pub(crate) root_id: RootId,
 
    pub(crate) definition_id: DefinitionId,
 
    pub(crate) monomorph_types: Vec<ConcreteType>,
 
}
 

	
 
pub(crate) type ResolveQueue = Vec<ResolveQueueElement>;
 

	
 
/// This particular visitor will recurse depth-first into the AST and ensures
 
/// that all expressions have the appropriate types.
 
pub(crate) struct TypeResolvingVisitor {
 
    // Current definition we're typechecking.
 
    definition_type: DefinitionType,
 
    poly_vars: Vec<ConcreteType>,
 

	
 
    // Buffers for iteration over substatements and subexpressions
 
    stmt_buffer: Vec<StatementId>,
 
    expr_buffer: Vec<ExpressionId>,
 

	
 
    // Mapping from parser type to inferred type. We attempt to continue to
 
    // specify these types until we're stuck or we've fully determined the type.
 
    var_types: HashMap<VariableId, VarData>,      // types of variables
 
    expr_types: HashMap<ExpressionId, InferenceType>,   // types of expressions
 
    extra_data: HashMap<ExpressionId, ExtraData>,       // data for function call inference
 
    // Keeping track of which expressions need to be reinferred because the
 
    // expressions they're linked to made progression on an associated type
 
    expr_queued: HashSet<ExpressionId>,
 
}
 

	
 
// TODO: @rename used for calls and struct literals, maybe union literals?
 
struct ExtraData {
 
    /// Progression of polymorphic variables (if any)
 
    poly_vars: Vec<InferenceType>,
 
    /// Progression of types of call arguments or struct members
 
    embedded: Vec<InferenceType>,
 
    returned: InferenceType,
 
}
 

	
 
struct VarData {
 
    /// Type of the variable
 
    var_type: InferenceType,
 
    /// VariableExpressions that use the variable
 
    used_at: Vec<ExpressionId>,
 
    /// For channel statements we link to the other variable such that when one
 
    /// channel's interior type is resolved, we can also resolve the other one.
 
    linked_var: Option<VariableId>,
 
}
 

	
 
impl VarData {
 
    fn new_channel(var_type: InferenceType, other_port: VariableId) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: Some(other_port) }
 
    }
 
    fn new_local(var_type: InferenceType) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: None }
 
    }
 
}
 

	
 
impl TypeResolvingVisitor {
 
    pub(crate) fn new() -> Self {
 
        TypeResolvingVisitor{
 
            definition_type: DefinitionType::None,
 
            poly_vars: Vec::new(),
 
            stmt_buffer: Vec::with_capacity(STMT_BUFFER_INIT_CAPACITY),
 
            expr_buffer: Vec::with_capacity(EXPR_BUFFER_INIT_CAPACITY),
 
            var_types: HashMap::new(),
 
            expr_types: HashMap::new(),
 
            extra_data: HashMap::new(),
 
            expr_queued: HashSet::new(),
 
        }
 
    }
 

	
 
    // TODO: @cleanup Unsure about this, maybe a pattern will arise after
 
    //  a while.
 
    pub(crate) fn queue_module_definitions(ctx: &Ctx, queue: &mut ResolveQueue) {
 
        let root_id = ctx.module.root_id;
 
        let root = &ctx.heap.protocol_descriptions[root_id];
 
        for definition_id in &root.definitions {
 
            let definition = &ctx.heap[*definition_id];
 
            match definition {
 
                Definition::Function(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        queue.push(ResolveQueueElement{
 
                            root_id,
 
                            definition_id: *definition_id,
 
                            monomorph_types: Vec::new(),
 
                        })
 
                    }
 
                },
 
                Definition::Component(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        queue.push(ResolveQueueElement{
 
                            root_id,
 
                            definition_id: *definition_id,
 
                            monomorph_types: Vec::new(),
 
                        })
 
                    }
 
                },
 
                Definition::Enum(_) | Definition::Struct(_) => {},
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn handle_module_definition(
 
        &mut self, ctx: &mut Ctx, queue: &mut ResolveQueue, element: ResolveQueueElement
 
    ) -> VisitorResult {
 
        // Visit the definition
 
        debug_assert_eq!(ctx.module.root_id, element.root_id);
 
        self.reset();
 
        self.poly_vars.clear();
 
        self.poly_vars.extend(element.monomorph_types.iter().cloned());
 
        self.visit_definition(ctx, element.definition_id)?;
 

	
 
        // Keep resolving types
 
        self.resolve_types(ctx, queue)?;
 
        Ok(())
 
    }
 

	
 
    fn reset(&mut self) {
 
        self.definition_type = DefinitionType::None;
 
        self.poly_vars.clear();
 
        self.stmt_buffer.clear();
 
        self.expr_buffer.clear();
 
        self.var_types.clear();
 
        self.expr_types.clear();
 
        self.extra_data.clear();
 
        self.expr_queued.clear();
 
    }
 
}
 

	
 
impl Visitor2 for TypeResolvingVisitor {
 
    // Definitions
 

	
 
    fn visit_component_definition(&mut self, ctx: &mut Ctx, id: ComponentId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Component(id);
 

	
 
        let comp_def = &ctx.heap[id];
 
        debug_assert_eq!(comp_def.poly_vars.len(), self.poly_vars.len(), "component polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(80));
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting component '{}': {}", &String::from_utf8_lossy(&comp_def.identifier.value), id.0.index);
 
        debug_log!("{}", "-".repeat(80));
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        for param_id in comp_def.parameters.clone() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type(ctx, param.parser_type, true);
 
            debug_assert!(var_type.is_done, "expected component arguments to be concrete types");
 
            self.var_types.insert(param_id.upcast(), VarData{ var_type, used_at: Vec::new() });
 
            self.var_types.insert(param_id.upcast(), VarData::new_local(var_type));
 
        }
 

	
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    fn visit_function_definition(&mut self, ctx: &mut Ctx, id: FunctionId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Function(id);
 

	
 
        let func_def = &ctx.heap[id];
 
        debug_assert_eq!(func_def.poly_vars.len(), self.poly_vars.len(), "function polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(80));
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting function '{}': {}", &String::from_utf8_lossy(&func_def.identifier.value), id.0.index);
 
        debug_log!("{}", "-".repeat(80));
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        for param_id in func_def.parameters.clone() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type(ctx, param.parser_type, true);
 
            debug_assert!(var_type.is_done, "expected function arguments to be concrete types");
 
            self.var_types.insert(param_id.upcast(), VarData{ var_type, used_at: Vec::new() });
 
            self.var_types.insert(param_id.upcast(), VarData::new_local(var_type));
 
        }
 

	
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    // Statements
 

	
 
    fn visit_block_stmt(&mut self, ctx: &mut Ctx, id: BlockStatementId) -> VisitorResult {
 
        // Transfer statements for traversal
 
        let block = &ctx.heap[id];
 

	
 
        for stmt_id in block.statements.clone() {
 
            self.visit_stmt(ctx, stmt_id)?;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_memory_stmt(&mut self, ctx: &mut Ctx, id: MemoryStatementId) -> VisitorResult {
 
        let memory_stmt = &ctx.heap[id];
 

	
 
        let local = &ctx.heap[memory_stmt.variable];
 
        let var_type = self.determine_inference_type_from_parser_type(ctx, local.parser_type, true);
 
        self.var_types.insert(memory_stmt.variable.upcast(), VarData{ var_type, used_at: Vec::new() });
 
        self.var_types.insert(memory_stmt.variable.upcast(), VarData::new_local(var_type));
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_channel_stmt(&mut self, ctx: &mut Ctx, id: ChannelStatementId) -> VisitorResult {
 
        let channel_stmt = &ctx.heap[id];
 

	
 
        let from_local = &ctx.heap[channel_stmt.from];
 
        let from_var_type = self.determine_inference_type_from_parser_type(ctx, from_local.parser_type, true);
 
        self.var_types.insert(from_local.this.upcast(), VarData{ var_type: from_var_type, used_at: Vec::new() });
 
        self.var_types.insert(from_local.this.upcast(), VarData::new_channel(from_var_type, channel_stmt.to.upcast()));
 

	
 
        let to_local = &ctx.heap[channel_stmt.to];
 
        let to_var_type = self.determine_inference_type_from_parser_type(ctx, to_local.parser_type, true);
 
        self.var_types.insert(to_local.this.upcast(), VarData{ var_type: to_var_type, used_at: Vec::new() });
 
        self.var_types.insert(to_local.this.upcast(), VarData::new_channel(to_var_type, channel_stmt.from.upcast()));
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_labeled_stmt(&mut self, ctx: &mut Ctx, id: LabeledStatementId) -> VisitorResult {
 
        let labeled_stmt = &ctx.heap[id];
 
        let substmt_id = labeled_stmt.body;
 
        self.visit_stmt(ctx, substmt_id)
 
    }
 

	
 
    fn visit_if_stmt(&mut self, ctx: &mut Ctx, id: IfStatementId) -> VisitorResult {
 
        let if_stmt = &ctx.heap[id];
 

	
 
        let true_body_id = if_stmt.true_body;
 
        let false_body_id = if_stmt.false_body;
 
        let test_expr_id = if_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_stmt(ctx, true_body_id)?;
 
        self.visit_stmt(ctx, false_body_id)?;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_while_stmt(&mut self, ctx: &mut Ctx, id: WhileStatementId) -> VisitorResult {
 
        let while_stmt = &ctx.heap[id];
 

	
 
        let body_id = while_stmt.body;
 
        let test_expr_id = while_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_stmt(ctx, body_id)?;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_synchronous_stmt(&mut self, ctx: &mut Ctx, id: SynchronousStatementId) -> VisitorResult {
 
        let sync_stmt = &ctx.heap[id];
 
        let body_id = sync_stmt.body;
 

	
 
        self.visit_stmt(ctx, body_id)
 
    }
 

	
 
    fn visit_return_stmt(&mut self, ctx: &mut Ctx, id: ReturnStatementId) -> VisitorResult {
 
        let return_stmt = &ctx.heap[id];
 
        let expr_id = return_stmt.expression;
 

	
 
        self.visit_expr(ctx, expr_id)
 
    }
 

	
 
    fn visit_assert_stmt(&mut self, ctx: &mut Ctx, id: AssertStatementId) -> VisitorResult {
 
        let assert_stmt = &ctx.heap[id];
 
        let test_expr_id = assert_stmt.expression;
 

	
 
        self.visit_expr(ctx, test_expr_id)
 
    }
 

	
 
    fn visit_new_stmt(&mut self, ctx: &mut Ctx, id: NewStatementId) -> VisitorResult {
 
        let new_stmt = &ctx.heap[id];
 
        let call_expr_id = new_stmt.expression;
 

	
 
        self.visit_call_expr(ctx, call_expr_id)
 
    }
 

	
 
    fn visit_expr_stmt(&mut self, ctx: &mut Ctx, id: ExpressionStatementId) -> VisitorResult {
 
        let expr_stmt = &ctx.heap[id];
 
        let subexpr_id = expr_stmt.expression;
 

	
 
        self.visit_expr(ctx, subexpr_id)
 
    }
 

	
 
    // Expressions
 

	
 
    fn visit_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let assign_expr = &ctx.heap[id];
 
        let left_expr_id = assign_expr.left;
 
        let right_expr_id = assign_expr.right;
 

	
 
        self.visit_expr(ctx, left_expr_id)?;
 
        self.visit_expr(ctx, right_expr_id)?;
 

	
 
        self.progress_assignment_expr(ctx, id)
 
    }
 

	
 
    fn visit_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let conditional_expr = &ctx.heap[id];
 
        let test_expr_id = conditional_expr.test;
 
        let true_expr_id = conditional_expr.true_expression;
 
        let false_expr_id = conditional_expr.false_expression;
 

	
 
@@ -1124,269 +1179,269 @@ impl Visitor2 for TypeResolvingVisitor {
 
        let select_expr = &ctx.heap[id];
 
        let subject_expr_id = select_expr.subject;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 

	
 
        self.progress_select_expr(ctx, id)
 
    }
 

	
 
    fn visit_array_expr(&mut self, ctx: &mut Ctx, id: ArrayExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let array_expr = &ctx.heap[id];
 
        // TODO: @performance
 
        for element_id in array_expr.elements.clone().into_iter() {
 
            self.visit_expr(ctx, element_id)?;
 
        }
 

	
 
        self.progress_array_expr(ctx, id)
 
    }
 

	
 
    fn visit_constant_expr(&mut self, ctx: &mut Ctx, id: ConstantExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 
        self.progress_constant_expr(ctx, id)
 
    }
 

	
 
    fn visit_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 
        self.insert_initial_call_polymorph_data(ctx, id);
 

	
 
        // TODO: @performance
 
        let call_expr = &ctx.heap[id];
 
        for arg_expr_id in call_expr.arguments.clone() {
 
            self.visit_expr(ctx, arg_expr_id)?;
 
        }
 

	
 
        self.progress_call_expr(ctx, id)
 
    }
 

	
 
    fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let var_expr = &ctx.heap[id];
 
        debug_assert!(var_expr.declaration.is_some());
 
        let var_data = self.var_types.get_mut(var_expr.declaration.as_ref().unwrap()).unwrap();
 
        var_data.used_at.push(upcast_id);
 

	
 
        self.progress_variable_expr(ctx, id)
 
    }
 
}
 

	
 
macro_rules! debug_assert_expr_ids_unique_and_known {
 
    // Base case for a single expression ID
 
    ($resolver:ident, $id:ident) => {
 
        if cfg!(debug_assertions) {
 
            $resolver.expr_types.contains_key(&$id);
 
        }
 
    };
 
    // Base case for two expression IDs
 
    ($resolver:ident, $id1:ident, $id2:ident) => {
 
        debug_assert_ne!($id1, $id2);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id1);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id2);
 
    };
 
    // Generic case
 
    ($resolver:ident, $id1:ident, $id2:ident, $($tail:ident),+) => {
 
        debug_assert_ne!($id1, $id2);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id1);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id2, $($tail),+);
 
    };
 
}
 

	
 
macro_rules! debug_assert_ptrs_distinct {
 
    // Base case
 
    ($ptr1:ident, $ptr2:ident) => {
 
        debug_assert!(!std::ptr::eq($ptr1, $ptr2));
 
    };
 
    // Generic case
 
    ($ptr1:ident, $ptr2:ident, $($tail:ident),+) => {
 
        debug_assert_ptrs_distinct!($ptr1, $ptr2);
 
        debug_assert_ptrs_distinct!($ptr2, $($tail),+);
 
    };
 
}
 

	
 
impl TypeResolvingVisitor {
 
    fn resolve_types(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) -> Result<(), ParseError2> {
 
        // Keep inferring until we can no longer make any progress
 
        while let Some(next_expr_id) = self.expr_queued.iter().next() {
 
            let next_expr_id = *next_expr_id;
 
            self.expr_queued.remove(&next_expr_id);
 
            self.progress_expr(ctx, next_expr_id)?;
 
        }
 

	
 
        // Should have inferred everything
 
        for (expr_id, expr_type) in self.expr_types.iter() {
 
        // Should have inferred everything. Check for this and optionally
 
        // auto-infer the remaining types
 
        for (expr_id, expr_type) in self.expr_types.iter_mut() {
 
            if !expr_type.is_done {
 
                // TODO: Auto-inference of integerlike types
 
                let expr = &ctx.heap[*expr_id];
 
                return Err(ParseError2::new_error(
 
                    &ctx.module.source, expr.position(),
 
                    &format!(
 
                        "Could not fully infer the type of this expression (got '{}')",
 
                        expr_type.display_name(&ctx.heap)
 
                    )
 
                ))
 
                // Auto-infer numberlike/integerlike types to a regular int
 
                if expr_type.parts.len() == 1 && expr_type.parts[0] == InferenceTypePart::IntegerLike {
 
                    expr_type.parts[0] = InferenceTypePart::Int;
 
                } else {
 
                    let expr = &ctx.heap[*expr_id];
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, expr.position(),
 
                        &format!(
 
                            "Could not fully infer the type of this expression (got '{}')",
 
                            expr_type.display_name(&ctx.heap)
 
                        )
 
                    ))
 
                }
 
            }
 

	
 
            let concrete_type = ctx.heap[*expr_id].get_type_mut();
 
            expr_type.write_concrete_type(concrete_type);
 
        }
 

	
 
        // Check all things we need to monomorphize
 
        // TODO: Struct/enum/union monomorphization
 
        for (call_expr_id, extra_data) in self.extra_data.iter() {
 
            if extra_data.poly_vars.is_empty() { continue; }
 

	
 
            // We have a polymorph
 
            // Retrieve polymorph variable specification
 
            let mut monomorph_types = Vec::with_capacity(extra_data.poly_vars.len());
 
            for (poly_idx, poly_type) in extra_data.poly_vars.iter().enumerate() {
 
                if !poly_type.is_done {
 
                    // TODO: Single clean function for function signatures and polyvars.
 
                    // TODO: Better error message
 
                    let expr = &ctx.heap[*call_expr_id];
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, expr.position(),
 
                        &format!(
 
                            "Could not fully infer the type of polymorphic variable {} of this expression (got '{}')",
 
                            poly_idx, poly_type.display_name(&ctx.heap)
 
                        )
 
                    ))
 
                }
 

	
 
                let mut concrete_type = ConcreteType::default();
 
                poly_type.write_concrete_type(&mut concrete_type);
 
                monomorph_types.insert(poly_idx, concrete_type);
 
            }
 

	
 
            // Resolve to call expression's definition
 
            let call_expr = if let Expression::Call(call_expr) = &ctx.heap[*call_expr_id] {
 
                call_expr
 
            } else {
 
                todo!("implement different kinds of polymorph expressions");
 
            };
 

	
 
            // Add to type table if not yet typechecked
 
            if let Method::Symbolic(symbolic) = &call_expr.method {
 
                let definition_id = symbolic.definition.unwrap();
 
                let root_id = ctx.types
 
                    .get_base_definition(&definition_id)
 
                    .unwrap()
 
                    .ast_root;
 

	
 
                queue.push(ResolveQueueElement{
 
                    root_id,
 
                    definition_id,
 
                    monomorph_types,
 
                })
 
                if !ctx.types.has_monomorph(&definition_id, &monomorph_types) {
 
                    let root_id = ctx.types
 
                        .get_base_definition(&definition_id)
 
                        .unwrap()
 
                        .ast_root;
 

	
 
                    // Pre-emptively add the monomorph to the type table, but
 
                    // we still need to perform typechecking on it
 
                    ctx.types.add_monomorph(&definition_id, monomorph_types.clone());
 
                    queue.push(ResolveQueueElement {
 
                        root_id,
 
                        definition_id,
 
                        monomorph_types,
 
                    })
 
                }
 
            }
 
        }
 

	
 
        // Finally, if the currently resolved definition is a monomoprh, then we
 
        // add it to the type table
 
        if !self.poly_vars.is_empty() {
 
            let definition_id = match &self.definition_type {
 
                DefinitionType::None => unreachable!(),
 
                DefinitionType::Function(id) => id.upcast(),
 
                DefinitionType::Component(id) => id.upcast(),
 
            };
 
            ctx.types.instantiate_monomorph(&definition_id, &self.poly_vars)
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_expr(&mut self, ctx: &mut Ctx, id: ExpressionId) -> Result<(), ParseError2> {
 
        match &ctx.heap[id] {
 
            Expression::Assignment(expr) => {
 
                let id = expr.this;
 
                self.progress_assignment_expr(ctx, id)
 
            },
 
            Expression::Conditional(expr) => {
 
                let id = expr.this;
 
                self.progress_conditional_expr(ctx, id)
 
            },
 
            Expression::Binary(expr) => {
 
                let id = expr.this;
 
                self.progress_binary_expr(ctx, id)
 
            },
 
            Expression::Unary(expr) => {
 
                let id = expr.this;
 
                self.progress_unary_expr(ctx, id)
 
            },
 
            Expression::Indexing(expr) => {
 
                let id = expr.this;
 
                self.progress_indexing_expr(ctx, id)
 
            },
 
            Expression::Slicing(expr) => {
 
                let id = expr.this;
 
                self.progress_slicing_expr(ctx, id)
 
            },
 
            Expression::Select(expr) => {
 
                let id = expr.this;
 
                self.progress_select_expr(ctx, id)
 
            },
 
            Expression::Array(expr) => {
 
                let id = expr.this;
 
                self.progress_array_expr(ctx, id)
 
            },
 
            Expression::Constant(expr) => {
 
                let id = expr.this;
 
                self.progress_constant_expr(ctx, id)
 
            },
 
            Expression::Call(expr) => {
 
                let id = expr.this;
 
                self.progress_call_expr(ctx, id)
 
            },
 
            Expression::Variable(expr) => {
 
                let id = expr.this;
 
                self.progress_variable_expr(ctx, id)
 
            }
 
        }
 
    }
 

	
 
    fn progress_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> Result<(), ParseError2> {
 
        use AssignmentOperator as AO;
 

	
 
        // TODO: Assignable check
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.left;
 
        let arg2_expr_id = expr.right;
 

	
 
        debug_log!("Assignment expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let progress_base = match expr.operation {
 
            AO::Set =>
 
                false,
 
            AO::Multiplied | AO::Divided | AO::Added | AO::Subtracted =>
 
                self.apply_forced_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?,
 
            AO::Remained | AO::ShiftedLeft | AO::ShiftedRight |
 
            AO::BitwiseAnded | AO::BitwiseXored | AO::BitwiseOred =>
 
                self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?,
 
        };
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = self.apply_equal3_constraint(
 
            ctx, upcast_id, arg1_expr_id, arg2_expr_id, 0
 
        )?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_base || progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 

	
 
        if progress_base || progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> Result<(), ParseError2> {
 
        // Note: test expression type is already enforced
 
@@ -1603,1077 +1658,1154 @@ impl TypeResolvingVisitor {
 
        let (progress_expr, progress_subject) =
 
            self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, subject_id, 1)?;
 

	
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject_base || progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - FromIdx type [{}]: {}", progress_idx_base || progress_from, self.expr_types.get(&from_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - ToIdx   type [{}]: {}", progress_idx_base || progress_to, self.expr_types.get(&to_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_subject_base || progress_subject { self.queue_expr(subject_id); }
 
        if progress_idx_base || progress_from { self.queue_expr(from_id); }
 
        if progress_idx_base || progress_to { self.queue_expr(to_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 

	
 
        debug_log!("Select expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_subject, progress_expr) = match &expr.field {
 
            Field::Length => {
 
                let progress_subject = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                (progress_subject, progress_expr)
 
            },
 
            Field::Symbolic(_field) => {
 
                todo!("implement select expr for symbolic fields");
 
            }
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_subject { self.queue_expr(subject_id); }
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_array_expr(&mut self, ctx: &mut Ctx, id: ArrayExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let expr_elements = expr.elements.clone(); // TODO: @performance
 

	
 
        debug_log!("Array expr ({} elements): {}", expr_elements.len(), upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // All elements should have an equal type
 
        let progress = self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements)?;
 
        let mut any_progress = false;
 
        for (progress_arg, arg_id) in progress.iter().zip(expr_elements.iter()) {
 
            if *progress_arg {
 
                any_progress = true;
 
                self.queue_expr(*arg_id);
 
            }
 
        }
 

	
 
        // And the output should be an array of the element types
 
        let mut expr_progress = self.apply_forced_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
        if !expr_elements.is_empty() {
 
            let first_arg_id = expr_elements[0];
 
            let (inner_expr_progress, arg_progress) = self.apply_equal2_constraint(
 
                ctx, upcast_id, upcast_id, 1, first_arg_id, 0
 
            )?;
 

	
 
            expr_progress = expr_progress || inner_expr_progress;
 

	
 
            // Note that if the array type progressed the type of the arguments,
 
            // then we should enqueue this progression function again
 
            // TODO: @fix Make apply_equal_n accept a start idx as well
 
            if arg_progress { self.queue_expr(upcast_id); }
 
        }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Expr type [{}]: {}", expr_progress, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if expr_progress { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_constant_expr(&mut self, ctx: &mut Ctx, id: ConstantExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let template = match &expr.value {
 
            Constant::Null => &MESSAGE_TEMPLATE,
 
            Constant::Integer(_) => &INTEGERLIKE_TEMPLATE,
 
            Constant::True | Constant::False => &BOOL_TEMPLATE,
 
            Constant::Null => &MESSAGE_TEMPLATE[..],
 
            Constant::Integer(_) => &INTEGERLIKE_TEMPLATE[..],
 
            Constant::True | Constant::False => &BOOL_TEMPLATE[..],
 
            Constant::Character(_) => todo!("character literals")
 
        };
 

	
 
        let progress = self.apply_forced_constraint(ctx, upcast_id, template)?;
 
        if progress { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    // TODO: @cleanup, see how this can be cleaned up once I implement
 
    //  polymorphic struct/enum/union literals. These likely follow the same
 
    //  pattern as here.
 
    fn progress_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let extra = self.extra_data.get_mut(&upcast_id).unwrap();
 

	
 
        debug_log!("Call expr '{}': {}", match &expr.method {
 
            Method::Create => String::from("create"),
 
            Method::Fires => String::from("fires"),
 
            Method::Get => String::from("get"),
 
            Method::Put => String::from("put"),
 
            Method::Symbolic(method) => String::from_utf8_lossy(&method.identifier.value).to_string()
 
        },upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 
        debug_log!(" * During (inferring types from arguments and return type):");
 

	
 
        // Check if we can make progress using the arguments and/or return types
 
        // while keeping track of the polyvars we've extended
 
        let mut poly_progress = HashSet::new();
 
        debug_assert_eq!(extra.embedded.len(), expr.arguments.len());
 
        let mut poly_infer_error = false;
 

	
 
        for (arg_idx, arg_id) in expr.arguments.clone().into_iter().enumerate() {
 
            let signature_type = &mut extra.embedded[arg_idx];
 
            let argument_type: *mut _ = self.expr_types.get_mut(&arg_id).unwrap();
 
            let (progress_sig, progress_arg) = Self::apply_equal2_constraint_types(
 
                ctx, upcast_id, signature_type, 0, argument_type, 0
 
            let (progress_sig, progress_arg) = Self::apply_equal2_signature_constraint(
 
                ctx, upcast_id, Some(arg_id), signature_type, 0, argument_type, 0
 
            )?;
 

	
 
            debug_log!("   - Arg {} type | sig: {}, arg: {}", arg_idx, signature_type.display_name(&ctx.heap), unsafe{&*argument_type}.display_name(&ctx.heap));
 

	
 
            if progress_sig {
 
                // Progressed signature, so also apply inference to the 
 
                // polymorph types using the markers 
 
                debug_assert!(signature_type.has_marker, "progress on signature argument type without markers");
 
                for (poly_idx, poly_section) in signature_type.marker_iter() {
 
                debug_assert!(signature_type.has_body_marker, "progress on signature argument type without markers");
 
                for (poly_idx, poly_section) in signature_type.body_marker_iter() {
 
                    let polymorph_type = &mut extra.poly_vars[poly_idx];
 
                    match Self::apply_forced_constraint_types(
 
                        polymorph_type, 0, poly_section, 0
 
                    ) {
 
                        Ok(true) => { poly_progress.insert(poly_idx); },
 
                        Ok(false) => {},
 
                        Err(()) => { poly_infer_error = true; }
 
                    }
 

	
 
                    debug_log!("   - Poly {} type | sig: {}, arg: {}", poly_idx, polymorph_type.display_name(&ctx.heap), InferenceType::partial_display_name(&ctx.heap, poly_section));
 
                }
 
            }
 
            if progress_arg {
 
                // Progressed argument expression
 
                self.expr_queued.insert(arg_id);
 
            }
 
        }
 

	
 
        // Do the same for the return type
 
        let signature_type = &mut extra.returned;
 
        let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 
        let (progress_sig, progress_expr) = Self::apply_equal2_constraint_types(
 
            ctx, upcast_id, signature_type, 0, expr_type, 0
 
        let (progress_sig, progress_expr) = Self::apply_equal2_signature_constraint(
 
            ctx, upcast_id, None, signature_type, 0, expr_type, 0
 
        )?;
 

	
 
        debug_log!("   - Ret type | sig: {}, arg: {}", signature_type.display_name(&ctx.heap), unsafe{&*expr_type}.display_name(&ctx.heap));
 

	
 
        if progress_sig {
 
            // As above: apply inference to polyargs as well
 
            debug_assert!(signature_type.has_marker, "progress on signature return type without markers");
 
            for (poly_idx, poly_section) in signature_type.marker_iter() {
 
            debug_assert!(signature_type.has_body_marker, "progress on signature return type without markers");
 
            for (poly_idx, poly_section) in signature_type.body_marker_iter() {
 
                let polymorph_type = &mut extra.poly_vars[poly_idx];
 
                match Self::apply_forced_constraint_types(
 
                    polymorph_type, 0, poly_section, 0
 
                ) {
 
                    Ok(true) => { poly_progress.insert(poly_idx); },
 
                    Ok(false) => {},
 
                    Err(()) => { poly_infer_error = true; }
 
                }
 
                debug_log!("   - Poly {} type | sig: {}, arg: {}", poly_idx, polymorph_type.display_name(&ctx.heap), InferenceType::partial_display_name(&ctx.heap, poly_section));
 
            }
 
        }
 
        if progress_expr {
 
            if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                self.expr_queued.insert(parent_id);
 
            }
 
        }
 

	
 
        // If we had an error in the polymorphic variable's inference, then we
 
        // need to provide a human readable error: find a pair of inference
 
        // types in the arguments/return type that do not agree on the
 
        // polymorphic variable's type
 
        if poly_infer_error { return Err(self.construct_poly_arg_error(ctx, id)) }
 

	
 
        // If we did not have an error in the polymorph inference above, then
 
        // reapplying the polymorph type to each argument type and the return
 
        // type should always succeed.
 
        debug_log!(" * During (reinferring from progress polyvars):");
 
        // TODO: @performance If the algorithm is changed to be more "on demand
 
        //  argument re-evaluation", instead of "all-argument re-evaluation",
 
        //  then this is no longer true
 
        for poly_idx in poly_progress.into_iter() {
 
            // For each polymorphic argument: first extend the signature type,
 
            // then reapply the equal2 constraint to the expressions
 
            let poly_type = &extra.poly_vars[poly_idx];
 
            for (arg_idx, sig_type) in extra.embedded.iter_mut().enumerate() {
 
                let mut seek_idx = 0;
 
                let mut modified_sig = false;
 
                while let Some((start_idx, end_idx)) = sig_type.find_subtree_idx_for_marker(poly_idx, seek_idx) {
 
                while let Some((start_idx, end_idx)) = sig_type.find_subtree_idx_for_part(
 
                    InferenceTypePart::MarkerBody(poly_idx), seek_idx
 
                ) {
 
                    let modified_at_marker = Self::apply_forced_constraint_types(
 
                        sig_type, start_idx, &poly_type.parts, 0
 
                    ).unwrap();
 
                    modified_sig = modified_sig || modified_at_marker;
 
                    seek_idx = end_idx;
 
                }
 

	
 
                if !modified_sig {
 
                    debug_log!("   - Poly {} | Arg {} type | signature has not changed", poly_idx, arg_idx);
 
                    continue;
 
                }
 

	
 
                // Part of signature was modified, so update expression used as
 
                // argument as well
 
                let arg_expr_id = expr.arguments[arg_idx];
 
                let arg_type: *mut _ = self.expr_types.get_mut(&arg_expr_id).unwrap();
 
                let (progress_arg, _) = Self::apply_equal2_constraint_types(
 
                    ctx, arg_expr_id, arg_type, 0, sig_type, 0
 
                let (_, progress_arg) = Self::apply_equal2_signature_constraint(
 
                    ctx, arg_expr_id, Some(arg_expr_id), sig_type, 0, arg_type, 0
 
                ).expect("no inference error at argument type");
 
                if progress_arg { self.expr_queued.insert(arg_expr_id); }
 
                debug_log!("   - Poly {} | Arg {} type | sig: {}, arg: {}", poly_idx, arg_idx, sig_type.display_name(&ctx.heap), unsafe{&*arg_type}.display_name(&ctx.heap));
 
            }
 

	
 
            // Again: do the same for the return type
 
            let sig_type = &mut extra.returned;
 
            let mut seek_idx = 0;
 
            let mut modified_sig = false;
 
            while let Some((start_idx, end_idx)) = sig_type.find_subtree_idx_for_marker(poly_idx, seek_idx) {
 
            while let Some((start_idx, end_idx)) = sig_type.find_subtree_idx_for_part(
 
                InferenceTypePart::MarkerBody(poly_idx), seek_idx
 
            ) {
 
                let modified_at_marker = Self::apply_forced_constraint_types(
 
                    sig_type, start_idx, &poly_type.parts, 0
 
                ).unwrap();
 
                modified_sig = modified_sig || modified_at_marker;
 
                seek_idx = end_idx;
 
            }
 

	
 
            if modified_sig {
 
                let ret_type = self.expr_types.get_mut(&upcast_id).unwrap();
 
                let (progress_ret, _) = Self::apply_equal2_constraint_types(
 
                    ctx, upcast_id, ret_type, 0, sig_type, 0
 
                let (_, progress_ret) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, sig_type, 0, ret_type, 0
 
                ).expect("no inference error at return type");
 
                if progress_ret {
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        self.expr_queued.insert(parent_id);
 
                    }
 
                }
 
                debug_log!("   - Poly {} | Ret type | sig: {}, arg: {}", poly_idx, sig_type.display_name(&ctx.heap), ret_type.display_name(&ctx.heap));
 
            } else {
 
                debug_log!("   - Poly {} | Ret type | signature has not changed", poly_idx);
 
            }
 
        }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let var_expr = &ctx.heap[id];
 
        let var_id = var_expr.declaration.unwrap();
 

	
 
        debug_log!("Variable expr '{}': {}", &String::from_utf8_lossy(&ctx.heap[var_id].identifier().value), upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Var  type: {}", self.var_types.get(&var_id).unwrap().var_type.display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Retrieve shared variable type and expression type and apply inference
 
        let var_data = self.var_types.get_mut(&var_id).unwrap();
 
        let expr_type = self.expr_types.get_mut(&upcast_id).unwrap();
 

	
 
        let infer_res = unsafe{ InferenceType::infer_subtrees_for_both_types(
 
            &mut var_data.var_type as *mut _, 0, expr_type, 0
 
        ) };
 
        if infer_res == DualInferenceResult::Incompatible {
 
            let var_decl = &ctx.heap[var_id];
 
            return Err(ParseError2::new_error(
 
                &ctx.module.source, var_decl.position(),
 
                &format!(
 
                    "Conflicting types for this variable, previously assigned the type '{}'",
 
                    var_data.var_type.display_name(&ctx.heap)
 
                )
 
            ).with_postfixed_info(
 
                &ctx.module.source, var_expr.position,
 
                &format!(
 
                    "But inferred to have incompatible type '{}' here",
 
                    expr_type.display_name(&ctx.heap)
 
                )
 
            ))
 
        }
 

	
 
        let progress_var = infer_res.modified_lhs();
 
        let progress_expr = infer_res.modified_rhs();
 

	
 
        if progress_var {
 
            // Let other variable expressions using this type progress as well
 
            for other_expr in var_data.used_at.iter() {
 
                if *other_expr != upcast_id {
 
                    self.expr_queued.insert(*other_expr);
 
                }
 
            }
 

	
 
            // Let a linked port know that our type has updated
 
            if let Some(linked_id) = var_data.linked_var {
 
                // Only perform one-way inference to prevent updating our type, this
 
                // would lead to an inconsistency
 
                let var_type: *mut _ = &mut var_data.var_type;
 
                let mut link_data = self.var_types.get_mut(&linked_id).unwrap();
 

	
 
                debug_assert!(
 
                    unsafe{&*var_type}.parts[0] == InferenceTypePart::Input ||
 
                    unsafe{&*var_type}.parts[0] == InferenceTypePart::Output
 
                );
 
                debug_assert!(
 
                    link_data.var_type.parts[0] == InferenceTypePart::Input ||
 
                    link_data.var_type.parts[0] == InferenceTypePart::Output
 
                );
 
                match InferenceType::infer_subtree_for_single_type(&mut link_data.var_type, 1, &unsafe{&*var_type}.parts, 1) {
 
                    SingleInferenceResult::Modified => {
 
                        for other_expr in &link_data.used_at {
 
                            self.expr_queued.insert(*other_expr);
 
                        }
 
                    },
 
                    SingleInferenceResult::Unmodified => {},
 
                    SingleInferenceResult::Incompatible => {
 
                        let var_data = self.var_types.get(&var_id).unwrap();
 
                        let link_data = self.var_types.get(&linked_id).unwrap();
 
                        let var_decl = &ctx.heap[var_id];
 
                        let link_decl = &ctx.heap[linked_id];
 

	
 
                        return Err(ParseError2::new_error(
 
                            &ctx.module.source, var_decl.position(),
 
                            &format!(
 
                                "Conflicting types for this variable, assigned the type '{}'",
 
                                var_data.var_type.display_name(&ctx.heap)
 
                            )
 
                        ).with_postfixed_info(
 
                            &ctx.module.source, link_decl.position(),
 
                            &format!(
 
                                "Because it is incompatible with this variable, assigned the type '{}'",
 
                                link_data.var_type.display_name(&ctx.heap)
 
                            )
 
                        ));
 
                    }
 
                }
 
            }
 
        }
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Var  type [{}]: {}", progress_var, self.var_types.get(&var_id).unwrap().var_type.display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 

	
 
        Ok(())
 
    }
 

	
 
    fn queue_expr_parent(&mut self, ctx: &Ctx, expr_id: ExpressionId) {
 
        if let ExpressionParent::Expression(parent_expr_id, _) = &ctx.heap[expr_id].parent() {
 
            self.expr_queued.insert(*parent_expr_id);
 
        }
 
    }
 

	
 
    fn queue_expr(&mut self, expr_id: ExpressionId) {
 
        self.expr_queued.insert(expr_id);
 
    }
 

	
 
    /// Applies a forced type constraint: the type associated with the supplied
 
    /// expression will be molded into the provided "template". The template may
 
    /// be fully specified (e.g. a bool) or contain "inference" variables (e.g.
 
    /// an array of T)
 
    fn apply_forced_constraint(
 
        &mut self, ctx: &mut Ctx, expr_id: ExpressionId, template: &[InferenceTypePart]
 
    ) -> Result<bool, ParseError2> {
 
        debug_assert_expr_ids_unique_and_known!(self, expr_id);
 
        let expr_type = self.expr_types.get_mut(&expr_id).unwrap();
 
        match InferenceType::infer_subtree_for_single_type(expr_type, 0, template, 0) {
 
            SingleInferenceResult::Modified => Ok(true),
 
            SingleInferenceResult::Unmodified => Ok(false),
 
            SingleInferenceResult::Incompatible => Err(
 
                self.construct_template_type_error(ctx, expr_id, template)
 
            )
 
        }
 
    }
 

	
 
    fn apply_forced_constraint_types(
 
        to_infer: *mut InferenceType, to_infer_start_idx: usize,
 
        template: &[InferenceTypePart], template_start_idx: usize
 
    ) -> Result<bool, ()> {
 
        match InferenceType::infer_subtree_for_single_type(
 
            unsafe{ &mut *to_infer }, to_infer_start_idx,
 
            template, template_start_idx
 
        ) {
 
            SingleInferenceResult::Modified => Ok(true),
 
            SingleInferenceResult::Unmodified => Ok(false),
 
            SingleInferenceResult::Incompatible => Err(()),
 
        }
 
    }
 

	
 
    /// Applies a type constraint that expects the two provided types to be
 
    /// equal. We attempt to make progress in inferring the types. If the call
 
    /// is successful then the composition of all types are made equal.
 
    /// The "parent" `expr_id` is provided to construct errors.
 
    fn apply_equal2_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg1_start_idx: usize,
 
        arg2_id: ExpressionId, arg2_start_idx: usize
 
    ) -> Result<(bool, bool), ParseError2> {
 
        debug_assert_expr_ids_unique_and_known!(self, arg1_id, arg2_id);
 
        let arg1_type: *mut _ = self.expr_types.get_mut(&arg1_id).unwrap();
 
        let arg2_type: *mut _ = self.expr_types.get_mut(&arg2_id).unwrap();
 

	
 
        let infer_res = unsafe{ InferenceType::infer_subtrees_for_both_types(
 
            arg1_type, arg1_start_idx,
 
            arg2_type, arg2_start_idx
 
        ) };
 
        if infer_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        Ok((infer_res.modified_lhs(), infer_res.modified_rhs()))
 
    }
 

	
 
    fn apply_equal2_constraint_types(
 
        ctx: &Ctx, expr_id: ExpressionId,
 
        type1: *mut InferenceType, type1_start_idx: usize, 
 
        type2: *mut InferenceType, type2_start_idx: usize
 
    fn apply_equal2_signature_constraint(
 
        ctx: &Ctx, outer_expr_id: ExpressionId, expr_id: Option<ExpressionId>,
 
        signature_type: *mut InferenceType, signature_start_idx: usize,
 
        expression_type: *mut InferenceType, expression_start_idx: usize
 
    ) -> Result<(bool, bool), ParseError2> {
 
        debug_assert_ptrs_distinct!(type1, type2);
 
        debug_assert_ptrs_distinct!(signature_type, expression_type);
 
        let infer_res = unsafe { 
 
            InferenceType::infer_subtrees_for_both_types(
 
                type1, type1_start_idx, 
 
                type2, type2_start_idx
 
                signature_type, signature_start_idx,
 
                expression_type, expression_start_idx
 
            ) 
 
        };
 

	
 
        if infer_res == DualInferenceResult::Incompatible {
 
            // TODO: Check if I still need to use this
 
            let outer_position = ctx.heap[outer_expr_id].position();
 
            let (position_name, position) = match expr_id {
 
                Some(expr_id) => ("argument's", ctx.heap[expr_id].position()),
 
                None => ("return type's", outer_position)
 
            };
 
            let (signature_display_type, expression_display_type) = unsafe { (
 
                (&*signature_type).display_name(&ctx.heap),
 
                (&*expression_type).display_name(&ctx.heap)
 
            ) };
 

	
 
            return Err(ParseError2::new_error(
 
                &ctx.module.source, ctx.heap[expr_id].position(),
 
                "TODO: Write me, apply_equal2_constraint_types"
 
                &ctx.module.source, outer_position,
 
                "Failed to fully resolve the types of this expression"
 
            ).with_postfixed_info(
 
                &ctx.module.source, position,
 
                &format!(
 
                    "Because the {} signature has been resolved to '{}', but the expression has been resolved to '{}'",
 
                    position_name, signature_display_type, expression_display_type
 
                )
 
            ));
 
        }
 

	
 
        Ok((infer_res.modified_lhs(), infer_res.modified_rhs()))
 
    }
 

	
 
    /// Applies a type constraint that expects all three provided types to be
 
    /// equal. In case we can make progress in inferring the types then we
 
    /// attempt to do so. If the call is successful then the composition of all
 
    /// types is made equal.
 
    fn apply_equal3_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg2_id: ExpressionId,
 
        start_idx: usize
 
    ) -> Result<(bool, bool, bool), ParseError2> {
 
        // Safety: all expression IDs are always distinct, and we do not modify
 
        //  the container
 
        debug_assert_expr_ids_unique_and_known!(self, expr_id, arg1_id, arg2_id);
 
        let expr_type: *mut _ = self.expr_types.get_mut(&expr_id).unwrap();
 
        let arg1_type: *mut _ = self.expr_types.get_mut(&arg1_id).unwrap();
 
        let arg2_type: *mut _ = self.expr_types.get_mut(&arg2_id).unwrap();
 

	
 
        let expr_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(expr_type, start_idx, arg1_type, start_idx)
 
        };
 
        if expr_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_expr_type_error(ctx, expr_id, arg1_id));
 
        }
 

	
 
        let args_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(arg1_type, start_idx, arg2_type, start_idx) };
 
        if args_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        // If all types are compatible, but the second call caused the arg1_type
 
        // to be expanded, then we must also assign this to expr_type.
 
        let mut progress_expr = expr_res.modified_lhs();
 
        let mut progress_arg1 = expr_res.modified_rhs();
 
        let mut progress_arg2 = args_res.modified_rhs();
 
        let progress_arg2 = args_res.modified_rhs();
 

	
 
        if args_res.modified_lhs() { 
 
            unsafe {
 
                let end_idx = InferenceType::find_subtree_end_idx(&(*arg2_type).parts, start_idx);
 
                let subtree = &((*arg2_type).parts[start_idx..end_idx]);
 
                (*expr_type).replace_subtree(start_idx, subtree);
 
            }
 
            progress_expr = true;
 
            progress_arg1 = true;
 
        }
 

	
 
        Ok((progress_expr, progress_arg1, progress_arg2))
 
    }
 

	
 
    // TODO: @optimize Since we only deal with a single type this might be done
 
    //  a lot more efficiently, methinks (disregarding the allocations here)
 
    fn apply_equal_n_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId, args: &[ExpressionId],
 
    ) -> Result<Vec<bool>, ParseError2> {
 
        // Early exit
 
        match args.len() {
 
            0 => return Ok(vec!()),         // nothing to progress
 
            1 => return Ok(vec![false]),    // only one type, so nothing to infer
 
            _ => {}
 
        }
 

	
 
        let mut progress = Vec::new();
 
        progress.resize(args.len(), false);
 

	
 
        // Do pairwise inference, keep track of the last entry we made progress
 
        // on. Once done we need to update everything to the most-inferred type.
 
        let mut arg_iter = args.iter();
 
        let mut last_arg_id = *arg_iter.next().unwrap();
 
        let mut last_lhs_progressed = 0;
 
        let mut lhs_arg_idx = 0;
 

	
 
        while let Some(next_arg_id) = arg_iter.next() {
 
            let arg1_type: *mut _ = self.expr_types.get_mut(&last_arg_id).unwrap();
 
            let arg2_type: *mut _ = self.expr_types.get_mut(next_arg_id).unwrap();
 

	
 
            let res = unsafe {
 
                InferenceType::infer_subtrees_for_both_types(arg1_type, 0, arg2_type, 0)
 
            };
 

	
 
            if res == DualInferenceResult::Incompatible {
 
                return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, *next_arg_id));
 
            }
 

	
 
            if res.modified_lhs() {
 
                // We re-inferred something on the left hand side, so everything
 
                // up until now should be re-inferred.
 
                progress[lhs_arg_idx] = true;
 
                last_lhs_progressed = lhs_arg_idx;
 
            }
 
            progress[lhs_arg_idx + 1] = res.modified_rhs();
 

	
 
            last_arg_id = *next_arg_id;
 
            lhs_arg_idx += 1;
 
        }
 

	
 
        // Re-infer everything. Note that we do not need to re-infer the type
 
        // exactly at `last_lhs_progressed`, but only everything up to it.
 
        let last_type: *mut _ = self.expr_types.get_mut(args.last().unwrap()).unwrap();
 
        for arg_idx in 0..last_lhs_progressed {
 
            let arg_type: *mut _ = self.expr_types.get_mut(&args[arg_idx]).unwrap();
 
            unsafe{
 
                (*arg_type).replace_subtree(0, &(*last_type).parts);
 
            }
 
            progress[arg_idx] = true;
 
        }
 

	
 
        Ok(progress)
 
    }
 

	
 
    /// Determines the `InferenceType` for the expression based on the
 
    /// expression parent. Note that if the parent is another expression, we do
 
    /// not take special action, instead we let parent expressions fix the type
 
    /// of subexpressions before they have a chance to call this function.
 
    /// Hence: if the expression type is already set, this function doesn't do
 
    /// anything.
 
    fn insert_initial_expr_inference_type(
 
        &mut self, ctx: &mut Ctx, expr_id: ExpressionId
 
    ) -> Result<(), ParseError2> {
 
        use ExpressionParent as EP;
 
        use InferenceTypePart as ITP;
 

	
 
        let expr = &ctx.heap[expr_id];
 
        let inference_type = match expr.parent() {
 
            EP::None =>
 
                // Should have been set by linker
 
                unreachable!(),
 
            EP::ExpressionStmt(_) | EP::Expression(_, _) =>
 
                // Determined during type inference
 
                InferenceType::new(false, false, vec![ITP::Unknown]),
 
            EP::If(_) | EP::While(_) | EP::Assert(_) =>
 
                // Must be a boolean
 
                InferenceType::new(false, true, vec![ITP::Bool]),
 
            EP::Return(_) =>
 
                // Must match the return type of the function
 
                if let DefinitionType::Function(func_id) = self.definition_type {
 
                    let return_parser_type_id = ctx.heap[func_id].return_type;
 
                    self.determine_inference_type_from_parser_type(ctx, return_parser_type_id, true)
 
                } else {
 
                    // Cannot happen: definition always set upon body traversal
 
                    // and "return" calls in components are illegal.
 
                    unreachable!();
 
                },
 
            EP::New(_) =>
 
                // Must be a component call, which we assign a "Void" return
 
                // type
 
                InferenceType::new(false, true, vec![ITP::Void]),
 
        };
 

	
 
        match self.expr_types.entry(expr_id) {
 
            Entry::Vacant(vacant) => {
 
                vacant.insert(inference_type);
 
            },
 
            Entry::Occupied(mut preexisting) => {
 
                // We already have an entry, this happens if our parent fixed
 
                // our type (e.g. we're used in a conditional expression's test)
 
                // but we have a different type.
 
                // TODO: Is this ever called? Seems like it can't
 
                debug_assert!(false, "I am actually called, my ID is {}", expr_id.index);
 
                let old_type = preexisting.get_mut();
 
                if let SingleInferenceResult::Incompatible = InferenceType::infer_subtree_for_single_type(
 
                    old_type, 0, &inference_type.parts, 0
 
                ) {
 
                    return Err(self.construct_expr_type_error(ctx, expr_id, expr_id))
 
                }
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn insert_initial_call_polymorph_data(
 
        &mut self, ctx: &mut Ctx, call_id: CallExpressionId
 
    ) {
 
        use InferenceTypePart as ITP;
 

	
 
        // Note: the polymorph variables may be partially specified and may
 
        // contain references to the wrapping definition's (i.e. the proctype
 
        // we are currently visiting) polymorphic arguments.
 
        //
 
        // The arguments of the call may refer to polymorphic variables in the
 
        // definition of the function we're calling, not of the wrapping
 
        // definition. We insert markers in these inferred types to be able to
 
        // map them back and forth to the polymorphic arguments of the function
 
        // we are calling.
 
        let call = &ctx.heap[call_id];
 

	
 
        // Handle the polymorphic variables themselves
 
        let mut poly_vars = Vec::with_capacity(call.poly_args.len());
 
        for poly_arg_type_id in call.poly_args.clone() { // TODO: @performance
 
            poly_vars.push(self.determine_inference_type_from_parser_type(ctx, poly_arg_type_id, true));
 
        }
 

	
 
        // Handle the arguments
 
        // TODO: @cleanup: Maybe factor this out for reuse in the validator/linker, should also
 
        //  make the code slightly more robust.
 
        let (embedded_types, return_type) = match &call.method {
 
            Method::Create => {
 
                // Not polymorphic
 
                unreachable!("insert initial polymorph data for builtin 'create()' call")
 
                (
 
                    vec![InferenceType::new(false, true, vec![ITP::Int])],
 
                    InferenceType::new(false, true, vec![ITP::Message, ITP::Byte])
 
                )
 
            },
 
            Method::Fires => {
 
                // bool fires<T>(PortLike<T> arg)
 
                (
 
                    vec![InferenceType::new(true, false, vec![ITP::PortLike, ITP::Marker(0), ITP::Unknown])],
 
                    vec![InferenceType::new(true, false, vec![ITP::PortLike, ITP::MarkerBody(0), ITP::Unknown])],
 
                    InferenceType::new(false, true, vec![ITP::Bool])
 
                )
 
            },
 
            Method::Get => {
 
                // T get<T>(input<T> arg)
 
                (
 
                    vec![InferenceType::new(true, false, vec![ITP::Input, ITP::Marker(0), ITP::Unknown])],
 
                    InferenceType::new(true, false, vec![ITP::Marker(0), ITP::Unknown])
 
                    vec![InferenceType::new(true, false, vec![ITP::Input, ITP::MarkerBody(0), ITP::Unknown])],
 
                    InferenceType::new(true, false, vec![ITP::MarkerBody(0), ITP::Unknown])
 
                )
 
            },
 
            Method::Put => {
 
                // void Put<T>(output<T> port, T msg)
 
                (
 
                    vec![
 
                        InferenceType::new(true, false, vec![ITP::Output, ITP::Marker(0), ITP::Unknown]),
 
                        InferenceType::new(true, false, vec![ITP::Marker(0), ITP::Unknown])
 
                        InferenceType::new(true, false, vec![ITP::Output, ITP::MarkerBody(0), ITP::Unknown]),
 
                        InferenceType::new(true, false, vec![ITP::MarkerBody(0), ITP::Unknown])
 
                    ],
 
                    InferenceType::new(false, true, vec![ITP::Void])
 
                )
 
            }
 
            Method::Symbolic(symbolic) => {
 
                let definition = &ctx.heap[symbolic.definition.unwrap()];
 

	
 
                match definition {
 
                    Definition::Component(definition) => {
 
                        let mut parameter_types = Vec::with_capacity(definition.parameters.len());
 
                        for param_id in definition.parameters.clone() {
 
                            let param = &ctx.heap[param_id];
 
                            let param_parser_type_id = param.parser_type;
 
                            parameter_types.push(self.determine_inference_type_from_parser_type(ctx, param_parser_type_id, false));
 
                        }
 

	
 
                        (parameter_types, InferenceType::new(false, true, vec![InferenceTypePart::Void]))
 
                    },
 
                    Definition::Function(definition) => {
 
                        let mut parameter_types = Vec::with_capacity(definition.parameters.len());
 
                        for param_id in definition.parameters.clone() {
 
                            let param = &ctx.heap[param_id];
 
                            let param_parser_type_id = param.parser_type;
 
                            parameter_types.push(self.determine_inference_type_from_parser_type(ctx, param_parser_type_id, false));
 
                        }
 

	
 
                        let return_type = self.determine_inference_type_from_parser_type(ctx, definition.return_type, false);
 
                        (parameter_types, return_type)
 
                    },
 
                    Definition::Struct(_) | Definition::Enum(_) => {
 
                        unreachable!("insert initial polymorph data for struct/enum");
 
                    }
 
                }
 
            }
 
        };
 

	
 
        self.extra_data.insert(call_id.upcast(), ExtraData {
 
            poly_vars,
 
            embedded: embedded_types,
 
            returned: return_type
 
        });
 
    }
 

	
 
    /// Determines the initial InferenceType from the provided ParserType. This
 
    /// may be called with two kinds of intentions:
 
    /// 1. To resolve a ParserType within the body of a function, or on
 
    ///     polymorphic arguments to calls/instantiations within that body. This
 
    ///     means that the polymorphic variables are known and can be replaced
 
    ///     with the monomorph we're instantiating.
 
    /// 2. To resolve a ParserType on a called function's definition or on
 
    ///     an instantiated datatype's members. This means that the polymorphic
 
    ///     arguments inside those ParserTypes refer to the polymorphic
 
    ///     variables in the called/instantiated type's definition.
 
    /// In the second case we place InferenceTypePart::Marker instances such
 
    /// that we can perform type inference on the polymorphic variables.
 
    fn determine_inference_type_from_parser_type(
 
        &mut self, ctx: &Ctx, parser_type_id: ParserTypeId,
 
        parser_type_in_body: bool
 
    ) -> InferenceType {
 
        use ParserTypeVariant as PTV;
 
        use InferenceTypePart as ITP;
 

	
 
        let mut to_consider = VecDeque::with_capacity(16);
 
        to_consider.push_back(parser_type_id);
 

	
 
        let mut infer_type = Vec::new();
 
        let mut has_inferred = false;
 
        let mut has_markers = false;
 

	
 
        while !to_consider.is_empty() {
 
            let parser_type_id = to_consider.pop_front().unwrap();
 
            let parser_type = &ctx.heap[parser_type_id];
 
            match &parser_type.variant {
 
                PTV::Message => { infer_type.push(ITP::Message); },
 
                PTV::Message => {
 
                    /// TODO: @types Remove the Message -> Byte hack at some point...
 
                    infer_type.push(ITP::Message);
 
                    infer_type.push(ITP::Byte);
 
                },
 
                PTV::Bool => { infer_type.push(ITP::Bool); },
 
                PTV::Byte => { infer_type.push(ITP::Byte); },
 
                PTV::Short => { infer_type.push(ITP::Short); },
 
                PTV::Int => { infer_type.push(ITP::Int); },
 
                PTV::Long => { infer_type.push(ITP::Long); },
 
                PTV::String => { infer_type.push(ITP::String); },
 
                PTV::IntegerLiteral => { unreachable!("integer literal type on variable type"); },
 
                PTV::Inferred => {
 
                    infer_type.push(ITP::Unknown);
 
                    has_inferred = true;
 
                },
 
                PTV::Array(subtype_id) => {
 
                    infer_type.push(ITP::Array);
 
                    to_consider.push_front(*subtype_id);
 
                },
 
                PTV::Input(subtype_id) => {
 
                    infer_type.push(ITP::Input);
 
                    to_consider.push_front(*subtype_id);
 
                },
 
                PTV::Output(subtype_id) => {
 
                    infer_type.push(ITP::Output);
 
                    to_consider.push_front(*subtype_id);
 
                },
 
                PTV::Symbolic(symbolic) => {
 
                    debug_assert!(symbolic.variant.is_some(), "symbolic variant not yet determined");
 
                    match symbolic.variant.as_ref().unwrap() {
 
                        SymbolicParserTypeVariant::PolyArg(_, arg_idx) => {
 
                            // Retrieve concrete type of argument and add it to
 
                            // the inference type.
 
                            let arg_idx = *arg_idx;
 
                            debug_assert!(symbolic.poly_args.is_empty()); // TODO: @hkt
 

	
 
                            if parser_type_in_body {
 
                                // Polymorphic argument refers to definition's
 
                                // polymorphic variables
 
                                debug_assert!(arg_idx < self.poly_vars.len());
 
                                debug_assert!(!self.poly_vars[arg_idx].has_marker());
 
                                infer_type.push(ITP::MarkerDefinition(arg_idx));
 
                                for concrete_part in &self.poly_vars[arg_idx].parts {
 
                                    infer_type.push(ITP::from(*concrete_part));
 
                                }
 
                            } else {
 
                                // Polymorphic argument has to be inferred
 
                                has_markers = true;
 
                                has_inferred = true;
 
                                infer_type.push(ITP::Marker(arg_idx));
 
                                infer_type.push(ITP::MarkerBody(arg_idx));
 
                                infer_type.push(ITP::Unknown);
 
                            }
 
                        },
 
                        SymbolicParserTypeVariant::Definition(definition_id) => {
 
                            // TODO: @cleanup
 
                            if cfg!(debug_assertions) {
 
                                let definition = &ctx.heap[*definition_id];
 
                                debug_assert!(definition.is_struct() || definition.is_enum()); // TODO: @function_ptrs
 
                                let num_poly = match definition {
 
                                    Definition::Struct(v) => v.poly_vars.len(),
 
                                    Definition::Enum(v) => v.poly_vars.len(),
 
                                    _ => unreachable!(),
 
                                };
 
                                debug_assert_eq!(symbolic.poly_args.len(), num_poly);
 
                            }
 

	
 
                            infer_type.push(ITP::Instance(*definition_id, symbolic.poly_args.len()));
 
                            let mut poly_arg_idx = symbolic.poly_args.len();
 
                            while poly_arg_idx > 0 {
 
                                poly_arg_idx -= 1;
 
                                to_consider.push_front(symbolic.poly_args[poly_arg_idx]);
 
                            }
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        InferenceType::new(has_markers, !has_inferred, infer_type)
 
    }
 

	
 
    /// Construct an error when an expression's type does not match. This
 
    /// happens if we infer the expression type from its arguments (e.g. the
 
    /// expression type of an addition operator is the type of the arguments)
 
    /// But the expression type was already set due to our parent (e.g. an
 
    /// "if statement" or a "logical not" always expecting a boolean)
 
    fn construct_expr_type_error(
 
        &self, ctx: &Ctx, expr_id: ExpressionId, arg_id: ExpressionId
 
    ) -> ParseError2 {
 
        // TODO: Expand and provide more meaningful information for humans
 
        let expr = &ctx.heap[expr_id];
 
        let arg_expr = &ctx.heap[arg_id];
 
        let expr_type = self.expr_types.get(&expr_id).unwrap();
 
        let arg_type = self.expr_types.get(&arg_id).unwrap();
 

	
 
        return ParseError2::new_error(
 
            &ctx.module.source, expr.position(),
 
            &format!(
 
                "Incompatible types: this expression expected a '{}'", 
 
                expr_type.display_name(&ctx.heap)
 
            )
 
        ).with_postfixed_info(
 
            &ctx.module.source, arg_expr.position(),
 
            &format!(
 
                "But this expression yields a '{}'",
 
                arg_type.display_name(&ctx.heap)
 
            )
 
        )
 
    }
 

	
 
    fn construct_arg_type_error(
 
        &self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg2_id: ExpressionId
 
    ) -> ParseError2 {
 
        let expr = &ctx.heap[expr_id];
 
        let arg1 = &ctx.heap[arg1_id];
 
        let arg2 = &ctx.heap[arg2_id];
 

	
 
        let arg1_type = self.expr_types.get(&arg1_id).unwrap();
 
        let arg2_type = self.expr_types.get(&arg2_id).unwrap();
 

	
 
        return ParseError2::new_error(
 
            &ctx.module.source, expr.position(),
 
            "Incompatible types: cannot apply this expression"
 
        ).with_postfixed_info(
 
            &ctx.module.source, arg1.position(),
 
            &format!(
 
                "Because this expression has type '{}'",
 
                arg1_type.display_name(&ctx.heap)
 
            )
 
        ).with_postfixed_info(
 
            &ctx.module.source, arg2.position(),
 
            &format!(
 
                "But this expression has type '{}'",
 
                arg2_type.display_name(&ctx.heap)
 
            )
 
        )
 
    }
 

	
 
    fn construct_template_type_error(
 
        &self, ctx: &Ctx, expr_id: ExpressionId, template: &[InferenceTypePart]
 
    ) -> ParseError2 {
 
        let expr = &ctx.heap[expr_id];
 
        let expr_type = self.expr_types.get(&expr_id).unwrap();
 

	
 
        return ParseError2::new_error(
 
            &ctx.module.source, expr.position(),
 
            &format!(
 
                "Incompatible types: got a '{}' but expected a '{}'",
 
                expr_type.display_name(&ctx.heap), 
 
                InferenceType::partial_display_name(&ctx.heap, template)
 
            )
 
        )
 
    }
 

	
 
    /// Constructs a human interpretable error in the case that type inference
 
    /// on a polymorphic variable to a function call failed. This may only be
 
    /// caused by a pair of inference types (which may come from arguments or
 
    /// the return type) having two different inferred values for that
 
    /// polymorphic variable.
 
    ///
 
    /// So we find this pair (which may be a argument type or return type
 
    /// conflicting with itself) and construct the error using it.
 
    fn construct_poly_arg_error(
 
        &self, ctx: &Ctx, call_id: CallExpressionId
 
    ) -> ParseError2 {
 
        // Helper function to check for polymorph mismatch between two inference
 
        // types.
 
        fn has_poly_mismatch<'a>(type_a: &'a InferenceType, type_b: &'a InferenceType) -> Option<(usize, &'a [InferenceTypePart], &'a [InferenceTypePart])> {
 
            if !type_a.has_marker || !type_b.has_marker {
 
            if !type_a.has_body_marker || !type_b.has_body_marker {
 
                return None
 
            }
 

	
 
            for (marker_a, section_a) in type_a.marker_iter() {
 
                for (marker_b, section_b) in type_b.marker_iter() {
 
            for (marker_a, section_a) in type_a.body_marker_iter() {
 
                for (marker_b, section_b) in type_b.body_marker_iter() {
 
                    if marker_a != marker_b {
 
                        // Not the same polymorphic variable
 
                        continue;
 
                    }
 

	
 
                    if !InferenceType::check_subtrees(section_a, 0, section_b, 0) {
 
                        // Not compatible
 
                        return Some((marker_a, section_a, section_b))
 
                    }
 
                }
 
            }
 

	
 
            None
 
        }
 

	
 
        // Helpers function to retrieve polyvar name and function name
 
        fn get_poly_var_and_func_name(ctx: &Ctx, poly_var_idx: usize, expr: &CallExpression) -> (String, String) {
 
            match &expr.method {
 
                Method::Create => unreachable!(),
 
                Method::Fires => (String::from('T'), String::from("fires")),
 
                Method::Get => (String::from('T'), String::from("get")),
 
                Method::Put => (String::from('T'), String::from("put")),
 
                Method::Symbolic(symbolic) => {
 
                    let definition = &ctx.heap[symbolic.definition.unwrap()];
 
                    let poly_var = match definition {
 
                        Definition::Struct(_) | Definition::Enum(_) => unreachable!(),
 
                        Definition::Function(definition) => {
 
                            String::from_utf8_lossy(&definition.poly_vars[poly_var_idx].value).to_string()
 
                        },
 
                        Definition::Component(definition) => {
 
                            String::from_utf8_lossy(&definition.poly_vars[poly_var_idx].value).to_string()
 
                        }
 
                    };
 
                    let func_name = String::from_utf8_lossy(&symbolic.identifier.value).to_string();
 
                    (poly_var, func_name)
 
                }
 
            }
 
        }
 

	
 
        // Helper function to construct initial error
 
        fn construct_main_error(ctx: &Ctx, poly_var_idx: usize, expr: &CallExpression) -> ParseError2 {
 
            let (poly_var, func_name) = get_poly_var_and_func_name(ctx, poly_var_idx, expr);
 
            return ParseError2::new_error(
 
                &ctx.module.source, expr.position(),
 
                &format!(
 
                    "Conflicting type for polymorphic variable '{}' of '{}'",
 
                    poly_var, func_name
 
                )
 
            )
 
        }
 

	
 
        // Actual checking
 
        let extra = self.extra_data.get(&call_id.upcast()).unwrap();
 
        let expr = &ctx.heap[call_id];
 

	
 
        // - check return type with itself
 
        if let Some((poly_idx, section_a, section_b)) = has_poly_mismatch(&extra.returned, &extra.returned) {
 
            return construct_main_error(ctx, poly_idx, expr)
 
                .with_postfixed_info(
 
                    &ctx.module.source, expr.position(),
 
                    &format!(
 
                        "The return type inferred the conflicting types '{}' and '{}'",
 
                        InferenceType::partial_display_name(&ctx.heap, section_a),
 
                        InferenceType::partial_display_name(&ctx.heap, section_b)
 
                    )
 
                )
 
        }
 

	
 
        // - check arguments with each other argument and with return type
 
        for (arg_a_idx, arg_a) in extra.embedded.iter().enumerate() {
 
            for (arg_b_idx, arg_b) in extra.embedded.iter().enumerate() {
 
                if arg_b_idx > arg_a_idx {
 
                    break;
 
                }
 

	
 
                if let Some((poly_idx, section_a, section_b)) = has_poly_mismatch(&arg_a, &arg_b) {
 
                    let error = construct_main_error(ctx, poly_idx, expr);
 
                    if arg_a_idx == arg_b_idx {
 
                        // Same argument
 
                        let arg = &ctx.heap[expr.arguments[arg_a_idx]];
 
                        return error.with_postfixed_info(
 
                            &ctx.module.source, arg.position(),
 
                            &format!(
 
                                "This argument inferred the conflicting types '{}' and '{}'",
 
                                InferenceType::partial_display_name(&ctx.heap, section_a),
 
                                InferenceType::partial_display_name(&ctx.heap, section_b)
 
                            )
 
                        )
 
                    } else {
 
                        let arg_a = &ctx.heap[expr.arguments[arg_a_idx]];
 
                        let arg_b = &ctx.heap[expr.arguments[arg_b_idx]];
 
                        return error.with_postfixed_info(
 
                            &ctx.module.source, arg_a.position(),
 
                            &format!(
 
                                "This argument inferred it to '{}'",
 
                                InferenceType::partial_display_name(&ctx.heap, section_a)
 
                            )
 
                        ).with_postfixed_info(
 
                            &ctx.module.source, arg_b.position(),
 
                            &format!(
 
                                "While this argument inferred it to '{}'",
 
                                InferenceType::partial_display_name(&ctx.heap, section_b)
 
                            )
 
                        )
 
                    }
 
                }
 
            }
 

	
 
            // Check with return type
 
            if let Some((poly_idx, section_arg, section_ret)) = has_poly_mismatch(arg_a, &extra.returned) {
 
                let arg = &ctx.heap[expr.arguments[arg_a_idx]];
 
                return construct_main_error(ctx, poly_idx, expr)
 
                    .with_postfixed_info(
 
                        &ctx.module.source, arg.position(),
 
                        &format!(
 
                            "This argument inferred it to '{}'",
 
                            InferenceType::partial_display_name(&ctx.heap, section_arg)
 
                        )
 
                    )
 
                    .with_postfixed_info(
 
                        &ctx.module.source, expr.position,
 
                        &format!(
 
                            "While the return type inferred it to '{}'",
 
                            InferenceType::partial_display_name(&ctx.heap, section_ret)
 
                        )
 
                    )
 
            }
 
        }
 

	
 
        unreachable!("construct_poly_arg_error without actual error found?")
 
    }
 
}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use super::*;
 
    use crate::protocol::arena::Id;
 
    use InferenceTypePart as ITP;
 
    use InferenceType as IT;
 

	
 
    #[test]
 
    fn test_single_part_inference() {
 
        // lhs argument inferred from rhs
 
        let pairs = [
 
            (ITP::NumberLike, ITP::Byte),
 
            (ITP::IntegerLike, ITP::Int),
 
            (ITP::Unknown, ITP::Message),
 
            (ITP::Unknown, ITP::Long),
 
            (ITP::Unknown, ITP::String)
 
        ];
 
        for (lhs, rhs) in pairs.iter() {
 
            // Using infer-both
 
            let mut lhs_type = IT::new(false, false, vec![lhs.clone()]);
 
            let mut rhs_type = IT::new(false, true, vec![rhs.clone()]);
 
            let result = unsafe{ IT::infer_subtrees_for_both_types(
 
                &mut lhs_type, 0, &mut rhs_type, 0
 
            ) };
 
            assert_eq!(DualInferenceResult::First, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts);
 

	
 
            // Using infer-single
 
            let mut lhs_type = IT::new(false, false, vec![lhs.clone()]);
 
            let mut rhs_type = IT::new(false, true, vec![rhs.clone()]);
 
            let result = unsafe{ IT::infer_subtree_for_single_type(
 
            let rhs_type = IT::new(false, true, vec![rhs.clone()]);
 
            let result = IT::infer_subtree_for_single_type(
 
                &mut lhs_type, 0, &rhs_type.parts, 0
 
            ) };
 
            );
 
            assert_eq!(SingleInferenceResult::Modified, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts);
 
        }
 
    }
 

	
 
    #[test]
 
    fn test_multi_part_inference() {
 
        let pairs = [
 
            (vec![ITP::ArrayLike, ITP::NumberLike], vec![ITP::Slice, ITP::Byte]),
 
            (vec![ITP::Unknown], vec![ITP::Input, ITP::Array, ITP::String]),
 
            (vec![ITP::PortLike, ITP::Int], vec![ITP::Input, ITP::Int]),
 
            (vec![ITP::Unknown], vec![ITP::Output, ITP::Int]),
 
            (
 
                vec![ITP::Instance(Id::new(0), 2), ITP::Input, ITP::Unknown, ITP::Output, ITP::Unknown],
 
                vec![ITP::Instance(Id::new(0), 2), ITP::Input, ITP::Array, ITP::Int, ITP::Output, ITP::Int]
 
            )
 
        ];
 

	
 
        for (lhs, rhs) in pairs.iter() {
 
            let mut lhs_type = IT::new(false, false, lhs.clone());
 
            let mut rhs_type = IT::new(false, false, rhs.clone());
 
            let result = unsafe{ IT::infer_subtrees_for_both_types(
 
                &mut lhs_type, 0, &mut rhs_type, 0
 
            ) };
 
            assert_eq!(DualInferenceResult::First, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts);
 

	
 
            let mut lhs_type = IT::new(false, false, lhs.clone());
 
            let mut rhs_type = IT::new(false, false, rhs.clone());
 
            let result = unsafe{ IT::infer_subtree_for_single_type(
 
            let rhs_type = IT::new(false, false, rhs.clone());
 
            let result = IT::infer_subtree_for_single_type(
 
                &mut lhs_type, 0, &rhs_type.parts, 0
 
            ) };
 
            );
 
            assert_eq!(SingleInferenceResult::Modified, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts)
 
        }
 
    }
 
}
 
\ No newline at end of file
src/protocol/parser/type_table.rs
Show inline comments
 
@@ -23,412 +23,434 @@ process:
 
        non-polymorphic type and monomorphed instantiations of polymorphic types
 
        into concrete types.
 

	
 
So a base type is defined by its (optionally polymorphic) representation in the
 
AST. A concrete type has concrete types for each of the polymorphic arguments. A
 
struct, enum or union may have polymorphic arguments but not actually be a
 
polymorphic type. This happens when the polymorphic arguments are not used in
 
the type definition itself. Similarly for functions/components: but here we just
 
check the arguments/return type of the signature.
 

	
 
Apart from base types and concrete types, we also use the term "embedded type"
 
for types that are embedded within another type, such as a type of a struct
 
struct field or of a union variant. Embedded types may themselves have
 
polymorphic arguments and therefore form an embedded type tree.
 

	
 
NOTE: for now a polymorphic definition of a function/component is illegal if the
 
    polymorphic arguments are not used in the arguments/return type. It should
 
    be legal, but we disallow it for now.
 

	
 
TODO: Allow potentially cyclic datatypes and reject truly cyclic datatypes.
 
TODO: Allow for the full potential of polymorphism
 
TODO: Detect "true" polymorphism: for datatypes like structs/enum/unions this
 
    is simple. For functions we need to check the entire body. Do it here? Or
 
    do it somewhere else?
 
TODO: Do we want to check fn argument collision here, or in validation phase?
 
TODO: Make type table an on-demand thing instead of constructing all base types.
 
TODO: Cleanup everything, feels like a lot can be written cleaner and with less
 
    assumptions on each function call.
 
// TODO: Review all comments
 
*/
 

	
 
use std::fmt::{Formatter, Result as FmtResult};
 
use std::collections::{HashMap, VecDeque};
 

	
 
use crate::protocol::ast::*;
 
use crate::protocol::parser::symbol_table::{SymbolTable, Symbol};
 
use crate::protocol::inputsource::*;
 
use crate::protocol::parser::*;
 

	
 
//------------------------------------------------------------------------------
 
// Defined Types
 
//------------------------------------------------------------------------------
 

	
 
#[derive(Copy, Clone, PartialEq, Eq)]
 
pub enum TypeClass {
 
    Enum,
 
    Union,
 
    Struct,
 
    Function,
 
    Component
 
}
 

	
 
impl TypeClass {
 
    pub(crate) fn display_name(&self) -> &'static str {
 
        match self {
 
            TypeClass::Enum => "enum",
 
            TypeClass::Union => "enum",
 
            TypeClass::Struct => "struct",
 
            TypeClass::Function => "function",
 
            TypeClass::Component => "component",
 
        }
 
    }
 

	
 
    pub(crate) fn is_data_type(&self) -> bool {
 
        *self == TypeClass::Enum || *self == TypeClass::Union || *self == TypeClass::Struct
 
    }
 

	
 
    pub(crate) fn is_proc_type(&self) -> bool {
 
        *self == TypeClass::Function || *self == TypeClass::Component
 
    }
 
}
 

	
 
impl std::fmt::Display for TypeClass {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
 
        write!(f, "{}", self.display_name())
 
    }
 
}
 

	
 
/// Struct wrapping around a potentially polymorphic type. If the type does not
 
/// have any polymorphic arguments then it will not have any monomorphs and
 
/// `is_polymorph` will be set to `false`. A type with polymorphic arguments
 
/// only has `is_polymorph` set to `true` if the polymorphic arguments actually
 
/// appear in the types associated types (function return argument, struct
 
/// field, enum variant, etc.). Otherwise the polymorphic argument is just a
 
/// marker and does not influence the bytesize of the type.
 
pub struct DefinedType {
 
    pub(crate) ast_root: RootId,
 
    pub(crate) ast_definition: DefinitionId,
 
    pub(crate) definition: DefinedTypeVariant,
 
    pub(crate) poly_args: Vec<PolyArg>,
 
    pub(crate) is_polymorph: bool,
 
    pub(crate) is_pointerlike: bool,
 
    // TODO: @optimize
 
    pub(crate) monomorphs: Vec<Vec<ConcreteType>>,
 
}
 

	
 
impl DefinedType {
 
    fn add_monomorph(&mut self, types: Vec<ConcreteType>) {
 
        debug_assert!(!self.has_monomorph(&types), "monomorph already exists");
 
        self.monomorphs.push(types);
 
    }
 

	
 
    fn has_monomorph(&self, types: &Vec<ConcreteType>) -> bool {
 
        debug_assert_eq!(self.poly_args.len(), types.len(), "mismatch in number of polymorphic types");
 
        for monomorph in &self.monomorphs {
 
            if monomorph == types { return true; }
 
        }
 

	
 
        return false;
 
    }
 
}
 

	
 
pub enum DefinedTypeVariant {
 
    Enum(EnumType),
 
    Union(UnionType),
 
    Struct(StructType),
 
    Function(FunctionType),
 
    Component(ComponentType)
 
}
 

	
 
pub struct PolyArg {
 
    identifier: Identifier,
 
    /// Whether the polymorphic argument is used directly in the definition of
 
    /// the type (not including bodies of function/component types)
 
    is_in_use: bool,
 
}
 

	
 
impl DefinedTypeVariant {
 
    pub(crate) fn type_class(&self) -> TypeClass {
 
        match self {
 
            DefinedTypeVariant::Enum(_) => TypeClass::Enum,
 
            DefinedTypeVariant::Union(_) => TypeClass::Union,
 
            DefinedTypeVariant::Struct(_) => TypeClass::Struct,
 
            DefinedTypeVariant::Function(_) => TypeClass::Function,
 
            DefinedTypeVariant::Component(_) => TypeClass::Component
 
        }
 
    }
 
}
 

	
 
/// `EnumType` is the classical C/C++ enum type. It has various variants with
 
/// an assigned integer value. The integer values may be user-defined,
 
/// compiler-defined, or a mix of the two. If a user assigns the same enum
 
/// value multiple times, we assume the user is an expert and we consider both
 
/// variants to be equal to one another.
 
pub struct EnumType {
 
    variants: Vec<EnumVariant>,
 
    representation: PrimitiveType,
 
}
 

	
 
// TODO: Also support maximum u64 value
 
pub struct EnumVariant {
 
    identifier: Identifier,
 
    value: i64,
 
}
 

	
 
/// `UnionType` is the algebraic datatype (or sum type, or discriminated union).
 
/// A value is an element of the union, identified by its tag, and may contain
 
/// a single subtype.
 
pub struct UnionType {
 
    variants: Vec<UnionVariant>,
 
    tag_representation: PrimitiveType
 
}
 

	
 
pub struct UnionVariant {
 
    identifier: Identifier,
 
    parser_type: Option<ParserTypeId>,
 
    tag_value: i64,
 
}
 

	
 
pub struct StructType {
 
    fields: Vec<StructField>,
 
}
 

	
 
pub struct StructField {
 
    identifier: Identifier,
 
    parser_type: ParserTypeId,
 
}
 

	
 
pub struct FunctionType {
 
    pub return_type: ParserTypeId,
 
    pub arguments: Vec<FunctionArgument>
 
}
 

	
 
pub struct ComponentType {
 
    variant: ComponentVariant,
 
    arguments: Vec<FunctionArgument>
 
    pub variant: ComponentVariant,
 
    pub arguments: Vec<FunctionArgument>
 
}
 

	
 
pub struct FunctionArgument {
 
    identifier: Identifier,
 
    parser_type: ParserTypeId,
 
}
 

	
 
//------------------------------------------------------------------------------
 
// Type table
 
//------------------------------------------------------------------------------
 

	
 
// TODO: @cleanup Do I really need this, doesn't make the code that much cleaner
 
struct TypeIterator {
 
    breadcrumbs: Vec<(RootId, DefinitionId)>
 
}
 

	
 
impl TypeIterator {
 
    fn new() -> Self {
 
        Self{ breadcrumbs: Vec::with_capacity(32) }
 
    }
 

	
 
    fn reset(&mut self, root_id: RootId, definition_id: DefinitionId) {
 
        self.breadcrumbs.clear();
 
        self.breadcrumbs.push((root_id, definition_id))
 
    }
 

	
 
    fn push(&mut self, root_id: RootId, definition_id: DefinitionId) {
 
        self.breadcrumbs.push((root_id, definition_id));
 
    }
 

	
 
    fn contains(&self, root_id: RootId, definition_id: DefinitionId) -> bool {
 
        for (stored_root_id, stored_definition_id) in self.breadcrumbs.iter() {
 
            if *stored_root_id == root_id && *stored_definition_id == definition_id { return true; }
 
        }
 

	
 
        return false
 
    }
 

	
 
    fn top(&self) -> Option<(RootId, DefinitionId)> {
 
        self.breadcrumbs.last().map(|(r, d)| (*r, *d))
 
    }
 

	
 
    fn pop(&mut self) {
 
        debug_assert!(!self.breadcrumbs.is_empty());
 
        self.breadcrumbs.pop();
 
    }
 
}
 

	
 
/// Result from attempting to resolve a `ParserType` using the symbol table and
 
/// the type table.
 
enum ResolveResult {
 
    /// ParserType is a builtin type
 
    BuiltIn,
 
    /// ParserType points to a polymorphic argument, contains the index of the
 
    /// polymorphic argument in the outermost definition (e.g. we may have 
 
    /// structs nested three levels deep, but in the innermost struct we can 
 
    /// only use the polyargs that are specified in the type definition of the
 
    /// outermost struct).
 
    PolyArg(usize),
 
    /// ParserType points to a user-defined type that is already resolved in the
 
    /// type table.
 
    Resolved((RootId, DefinitionId)),
 
    /// ParserType points to a user-defined type that is not yet resolved into
 
    /// the type table.
 
    Unresolved((RootId, DefinitionId))
 
}
 

	
 
pub(crate) struct TypeTable {
 
    /// Lookup from AST DefinitionId to a defined type. Considering possible
 
    /// polymorphs is done inside the `DefinedType` struct.
 
    lookup: HashMap<DefinitionId, DefinedType>,
 
    /// Iterator over `(module, definition)` tuples used as workspace to make sure
 
    /// that each base definition of all a type's subtypes are resolved.
 
    iter: TypeIterator,
 
    /// Iterator over `parser type`s during the process where `parser types` are
 
    /// resolved into a `(module, definition)` tuple.
 
    parser_type_iter: VecDeque<ParserTypeId>,
 
}
 

	
 
pub(crate) struct TypeCtx<'a> {
 
    symbols: &'a SymbolTable,
 
    heap: &'a mut Heap,
 
    modules: &'a [LexedModule]
 
}
 

	
 
impl<'a> TypeCtx<'a> {
 
    pub(crate) fn new(symbols: &'a SymbolTable, heap: &'a mut Heap, modules: &'a [LexedModule]) -> Self {
 
        Self{ symbols, heap, modules }
 
    }
 
}
 

	
 
impl TypeTable {
 
    /// Construct a new type table without any resolved types. Types will be
 
    /// resolved on-demand.
 
    pub(crate) fn new(ctx: &mut TypeCtx) -> Result<Self, ParseError2> {
 
        // Make sure we're allowed to cast root_id to index into ctx.modules
 
        if cfg!(debug_assertions) {
 
            for (index, module) in ctx.modules.iter().enumerate() {
 
                debug_assert_eq!(index, module.root_id.index as usize);
 
            }
 
        }
 

	
 
        // Use context to guess hashmap size
 
        let reserve_size = ctx.heap.definitions.len();
 
        let mut table = Self{
 
            lookup: HashMap::with_capacity(reserve_size),
 
            iter: TypeIterator::new(),
 
            parser_type_iter: VecDeque::with_capacity(64),
 
        };
 

	
 
        // TODO: @cleanup Rework this hack
 
        for root_idx in 0..ctx.modules.len() {
 
            let last_definition_idx = ctx.heap[ctx.modules[root_idx].root_id].definitions.len();
 
            for definition_idx in 0..last_definition_idx {
 
                let definition_id = ctx.heap[ctx.modules[root_idx].root_id].definitions[definition_idx];
 
                table.resolve_base_definition(ctx, definition_id)?;
 
            }
 
        }
 

	
 
        debug_assert_eq!(table.lookup.len(), reserve_size, "mismatch in reserved size of type table");
 

	
 
        Ok(table)
 
    }
 

	
 
    /// Retrieves base definition from type table. We must be able to retrieve
 
    /// it as we resolve all base types upon type table construction (for now).
 
    /// However, in the future we might do on-demand type resolving, so return
 
    /// an option anyway
 
    pub(crate) fn get_base_definition(&self, definition_id: &DefinitionId) -> Option<&DefinedType> {
 
        self.lookup.get(&definition_id)
 
    }
 

	
 
    /// Instantiates a monomorph for a given base definition.
 
    pub(crate) fn instantiate_monomorph(&mut self, definition_id: &DefinitionId, monomorph: &Vec<ConcreteType>) {
 
    pub(crate) fn add_monomorph(&mut self, definition_id: &DefinitionId, types: Vec<ConcreteType>) {
 
        debug_assert!(
 
            self.lookup.contains_key(definition_id),
 
            "attempting to instantiate monomorph of definition unknown to type table"
 
        );
 

	
 
        let definition = self.lookup.get_mut(definition_id).unwrap();
 
        debug_assert_eq!(
 
            monomorph.len(), definition.poly_args.len(),
 
            "attempting to instantiate monomorph with {} types, but definition requires {}",
 
            monomorph.len(), definition.poly_args.len()
 
        definition.add_monomorph(types);
 
    }
 

	
 
    /// Checks if a given definition already has a specific monomorph
 
    pub(crate) fn has_monomorph(&mut self, definition_id: &DefinitionId, types: &Vec<ConcreteType>) -> bool {
 
        debug_assert!(
 
            self.lookup.contains_key(definition_id),
 
            "attempting to check monomorph existence of definition unknown to type table"
 
        );
 

	
 
        definition.monomorphs.push(monomorph.clone())
 
        let definition = self.lookup.get(definition_id).unwrap();
 
        definition.has_monomorph(types)
 
    }
 

	
 
    /// This function will resolve just the basic definition of the type, it
 
    /// will not handle any of the monomorphized instances of the type.
 
    fn resolve_base_definition<'a>(&'a mut self, ctx: &mut TypeCtx, definition_id: DefinitionId) -> Result<(), ParseError2> {
 
        // Check if we have already resolved the base definition
 
        if self.lookup.contains_key(&definition_id) { return Ok(()); }
 

	
 
        let root_id = Self::find_root_id(ctx, definition_id);
 
        self.iter.reset(root_id, definition_id);
 

	
 
        while let Some((root_id, definition_id)) = self.iter.top() {
 
            // We have a type to resolve
 
            let definition = &ctx.heap[definition_id];
 

	
 
            let can_pop_breadcrumb = match definition {
 
                // TODO: @cleanup Borrow rules hax
 
                Definition::Enum(_) => self.resolve_base_enum_definition(ctx, root_id, definition_id),
 
                Definition::Struct(_) => self.resolve_base_struct_definition(ctx, root_id, definition_id),
 
                Definition::Component(_) => self.resolve_base_component_definition(ctx, root_id, definition_id),
 
                Definition::Function(_) => self.resolve_base_function_definition(ctx, root_id, definition_id),
 
            }?;
 

	
 
            // Otherwise: `ingest_resolve_result` has pushed a new breadcrumb
 
            // that we must follow before we can resolve the current type
 
            if can_pop_breadcrumb {
 
                self.iter.pop();
 
            }
 
        }
 

	
 
        // We must have resolved the type
 
        debug_assert!(self.lookup.contains_key(&definition_id), "base type not resolved");
 
        Ok(())
 
    }
 

	
 
    /// Resolve the basic enum definition to an entry in the type table. It will
 
    /// not instantiate any monomorphized instances of polymorphic enum
 
    /// definitions. If a subtype has to be resolved first then this function
 
    /// will return `false` after calling `ingest_resolve_result`.
 
    fn resolve_base_enum_definition(&mut self, ctx: &mut TypeCtx, root_id: RootId, definition_id: DefinitionId) -> Result<bool, ParseError2> {
 
        debug_assert!(ctx.heap[definition_id].is_enum());
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base enum already resolved");
 
        
 
        let definition = ctx.heap[definition_id].as_enum();
 

	
 
        // Check if the enum should be implemented as a classic enumeration or
 
        // a tagged union. Keep track of variant index for error messages. Make
 
        // sure all embedded types are resolved.
 
        let mut first_tag_value = None;
 
        let mut first_int_value = None;
 
        for variant in &definition.variants {
 
            match &variant.value {
 
                EnumVariantValue::None => {},
 
                EnumVariantValue::Integer(_) => if first_int_value.is_none() {
 
                    first_int_value = Some(variant.position);
 
                },
 
                EnumVariantValue::Type(variant_type_id) => {
 
                    if first_tag_value.is_none() {
 
                        first_tag_value = Some(variant.position);
 
                    }
 

	
 
                    // Check if the embedded type needs to be resolved
 
                    let resolve_result = self.resolve_base_parser_type(ctx, &definition.poly_vars, root_id, *variant_type_id)?;
 
                    if !self.ingest_resolve_result(ctx, resolve_result)? {
 
                        return Ok(false)
 
                    }
 
                }
 
            }
 
        }
 

	
 
        if first_tag_value.is_some() && first_int_value.is_some() {
 
            // Not illegal, but useless and probably a programmer mistake
 
            let module_source = &ctx.modules[root_id.index as usize].source;
 
            let tag_pos = first_tag_value.unwrap();
 
            let int_pos = first_int_value.unwrap();
 
            return Err(
 
                ParseError2::new_error(
 
                    module_source, definition.position,
 
                    "Illegal combination of enum integer variant(s) and enum union variant(s)"
 
                )
 
                    .with_postfixed_info(module_source, int_pos, "Assigning an integer value here")
 
                    .with_postfixed_info(module_source, tag_pos, "Embedding a type in a union variant here")
 
            );
 
        }
 

	
 
        // Enumeration is legal
 
        if first_tag_value.is_some() {
 
            // Implement as a tagged union
 

	
 
            // Determine the union variants
 
            let mut tag_value = -1;
 
            let mut variants = Vec::with_capacity(definition.variants.len());
 
            for variant in &definition.variants {
 
                tag_value += 1;
 
                let parser_type = match &variant.value {
 
                    EnumVariantValue::None => {

Changeset was too big and was cut off... Show full diff anyway

0 comments (0 inline, 0 general)