Changeset - d8148185e205
[Not reviewed]
0 9 0
MH - 4 years ago 2021-03-24 15:17:42
contact@maxhenger.nl
finished initial type inferencer/checker, tests work again
9 files changed with 484 insertions and 662 deletions:
0 comments (0 inline, 0 general)
src/protocol/ast.rs
Show inline comments
 
@@ -515,488 +515,503 @@ impl SyntaxElement for Root {
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub enum Pragma {
 
    Version(PragmaVersion),
 
    Module(PragmaModule)
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct PragmaVersion {
 
    pub this: PragmaId,
 
    // Phase 1: parser
 
    pub position: InputPosition,
 
    pub version: u64,
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct PragmaModule {
 
    pub this: PragmaId,
 
    // Phase 1: parser
 
    pub position: InputPosition,
 
    pub value: Vec<u8>,
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct PragmaOld {
 
    pub this: PragmaId,
 
    // Phase 1: parser
 
    pub position: InputPosition,
 
    pub value: Vec<u8>,
 
}
 

	
 
impl SyntaxElement for PragmaOld {
 
    fn position(&self) -> InputPosition {
 
        self.position
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub enum Import {
 
    Module(ImportModule),
 
    Symbols(ImportSymbols)
 
}
 

	
 
impl Import {
 
    pub(crate) fn as_module(&self) -> &ImportModule {
 
        match self {
 
            Import::Module(m) => m,
 
            _ => panic!("Unable to cast 'Import' to 'ImportModule'")
 
        }
 
    }
 
    pub(crate) fn as_symbols(&self) -> &ImportSymbols {
 
        match self {
 
            Import::Symbols(m) => m,
 
            _ => panic!("Unable to cast 'Import' to 'ImportSymbols'")
 
        }
 
    }
 
}
 

	
 
impl SyntaxElement for Import {
 
    fn position(&self) -> InputPosition {
 
        match self {
 
            Import::Module(m) => m.position,
 
            Import::Symbols(m) => m.position
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ImportModule {
 
    pub this: ImportId,
 
    // Phase 1: parser
 
    pub position: InputPosition,
 
    pub module_name: Vec<u8>,
 
    pub alias: Vec<u8>,
 
    // Phase 2: module resolving
 
    pub module_id: Option<RootId>,
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct AliasedSymbol {
 
    // Phase 1: parser
 
    pub position: InputPosition,
 
    pub name: Vec<u8>,
 
    pub alias: Vec<u8>,
 
    // Phase 2: symbol resolving
 
    pub definition_id: Option<DefinitionId>,
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ImportSymbols {
 
    pub this: ImportId,
 
    // Phase 1: parser
 
    pub position: InputPosition,
 
    pub module_name: Vec<u8>,
 
    // Phase 2: module resolving
 
    pub module_id: Option<RootId>,
 
    // Phase 1&2
 
    // if symbols is empty, then we implicitly import all symbols without any
 
    // aliases for them. If it is not empty, then symbols are explicitly
 
    // specified, and optionally given an alias.
 
    pub symbols: Vec<AliasedSymbol>,
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct Identifier {
 
    pub position: InputPosition,
 
    pub value: Vec<u8>
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct NamespacedIdentifier {
 
    pub position: InputPosition,
 
    pub num_namespaces: u8,
 
    pub value: Vec<u8>,
 
}
 

	
 
impl NamespacedIdentifier {
 
    pub(crate) fn iter(&self) -> NamespacedIdentifierIter {
 
        NamespacedIdentifierIter{
 
            value: &self.value,
 
            cur_offset: 0,
 
            num_returned: 0,
 
            num_total: self.num_namespaces
 
        }
 
    }
 
}
 

	
 
impl PartialEq for NamespacedIdentifier {
 
    fn eq(&self, other: &Self) -> bool {
 
        return self.value == other.value
 
    }
 
}
 
impl Eq for NamespacedIdentifier{}
 

	
 
// TODO: Just keep ref to NamespacedIdentifier
 
pub(crate) struct NamespacedIdentifierIter<'a> {
 
    value: &'a Vec<u8>,
 
    cur_offset: usize,
 
    num_returned: u8,
 
    num_total: u8,
 
}
 

	
 
impl<'a> NamespacedIdentifierIter<'a> {
 
    pub(crate) fn num_returned(&self) -> u8 {
 
        return self.num_returned;
 
    }
 
    pub(crate) fn num_remaining(&self) -> u8 {
 
        return self.num_total - self.num_returned
 
    }
 
    pub(crate) fn returned_section(&self) -> &[u8] {
 
        // Offset always includes the two trailing ':' characters
 
        let end = if self.cur_offset >= 2 { self.cur_offset - 2 } else { self.cur_offset };
 
        return &self.value[..end]
 
    }
 
}
 

	
 
impl<'a> Iterator for NamespacedIdentifierIter<'a> {
 
    type Item = &'a [u8];
 
    fn next(&mut self) -> Option<Self::Item> {
 
        if self.cur_offset >= self.value.len() {
 
            debug_assert_eq!(self.num_returned, self.num_total);
 
            None
 
        } else {
 
            debug_assert!(self.num_returned < self.num_total);
 
            let start = self.cur_offset;
 
            let mut end = start;
 
            while end < self.value.len() - 1 {
 
                if self.value[end] == b':' && self.value[end + 1] == b':' {
 
                    self.cur_offset = end + 2;
 
                    self.num_returned += 1;
 
                    return Some(&self.value[start..end]);
 
                }
 
                end += 1;
 
            }
 

	
 
            // If NamespacedIdentifier is constructed properly, then we cannot
 
            // end with "::" in the value, so
 
            debug_assert!(end == 0 || (self.value[end - 1] != b':' && self.value[end] != b':'));
 
            debug_assert_eq!(self.num_returned + 1, self.num_total);
 
            self.cur_offset = self.value.len();
 
            self.num_returned += 1;
 
            return Some(&self.value[start..]);
 
        }
 
    }
 
}
 

	
 
impl Display for Identifier {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        // A source identifier is in ASCII range.
 
        write!(f, "{}", String::from_utf8_lossy(&self.value))
 
    }
 
}
 

	
 
/// TODO: @types Remove the Message -> Byte hack at some point...
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub enum ParserTypeVariant {
 
    // Basic builtin
 
    Message,
 
    Bool,
 
    Byte,
 
    Short,
 
    Int,
 
    Long,
 
    String,
 
    // Literals (need to get concrete builtin type during typechecking)
 
    IntegerLiteral,
 
    Inferred,
 
    // Complex builtins
 
    Array(ParserTypeId), // array of a type
 
    Input(ParserTypeId), // typed input endpoint of a channel
 
    Output(ParserTypeId), // typed output endpoint of a channel
 
    Symbolic(SymbolicParserType), // symbolic type (definition or polyarg)
 
}
 

	
 
impl ParserTypeVariant {
 
    pub(crate) fn supports_polymorphic_args(&self) -> bool {
 
        use ParserTypeVariant::*;
 
        match self {
 
            Message | Bool | Byte | Short | Int | Long | String | IntegerLiteral | Inferred => false,
 
            _ => true
 
        }
 
    }
 
}
 

	
 
/// ParserType is a specification of a type during the parsing phase and initial
 
/// linker/validator phase of the compilation process. These types may be
 
/// (partially) inferred or represent literals (e.g. a integer whose bytesize is
 
/// not yet determined).
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ParserType {
 
    pub this: ParserTypeId,
 
    pub pos: InputPosition,
 
    pub variant: ParserTypeVariant,
 
}
 

	
 
/// SymbolicParserType is the specification of a symbolic type. During the
 
/// parsing phase we will only store the identifier of the type. During the
 
/// validation phase we will determine whether it refers to a user-defined type,
 
/// or a polymorphic argument. After the validation phase it may still be the
 
/// case that the resulting `variant` will not pass the typechecker.
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct SymbolicParserType {
 
    // Phase 1: parser
 
    pub identifier: NamespacedIdentifier,
 
    /// The user-specified polymorphic arguments. Zero-length implies that the
 
    /// user did not specify any of them, and they're either not needed or all
 
    /// need to be inferred. Otherwise the number of polymorphic arguments must
 
    /// match those of the corresponding definition
 
    pub poly_args: Vec<ParserTypeId>,
 
    // Phase 2: validation/linking (for types in function/component bodies) and
 
    //  type table construction (for embedded types of structs/unions)
 
    pub variant: Option<SymbolicParserTypeVariant>
 
}
 

	
 
/// Specifies whether the symbolic type points to an actual user-defined type,
 
/// or whether it points to a polymorphic argument within the definition (e.g.
 
/// a defined variable `T var` within a function `int func<T>()`
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub enum SymbolicParserTypeVariant {
 
    Definition(DefinitionId),
 
    // TODO: figure out if I need the DefinitionId here
 
    PolyArg(DefinitionId, usize), // index of polyarg in the definition
 
}
 

	
 
/// ConcreteType is the representation of a type after resolving symbolic types
 
/// and performing type inference
 
#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)]
 
#[derive(Debug, Clone, Copy, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
 
pub enum ConcreteTypePart {
 
    // Markers for the use of polymorphic types within a procedure's body that
 
    // refer to polymorphic variables on the procedure's definition. Different
 
    // from markers in the `InferenceType`, these will not contain nested types.
 
    Marker(usize),
 
    // Special types (cannot be explicitly constructed by the programmer)
 
    Void,
 
    // Builtin types without nested types
 
    Message,
 
    Bool,
 
    Byte,
 
    Short,
 
    Int,
 
    Long,
 
    String,
 
    // Builtin types with one nested type
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // User defined type with any number of nested types
 
    Instance(DefinitionId, usize),
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
#[derive(Debug, Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
 
pub struct ConcreteType {
 
    pub(crate) parts: Vec<ConcreteTypePart>
 
}
 

	
 
impl Default for ConcreteType {
 
    fn default() -> Self {
 
        Self{ parts: Vec::new() }
 
    }
 
}
 

	
 
impl ConcreteType {
 
    pub(crate) fn has_marker(&self) -> bool {
 
        self.parts
 
            .iter()
 
            .any(|p| {
 
                if let ConcreteTypePart::Marker(_) = p { true } else { false }
 
            })
 
    }
 
}
 

	
 
// TODO: Remove at some point
 
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
 
pub enum PrimitiveType {
 
    Unassigned,
 
    Input,
 
    Output,
 
    Message,
 
    Boolean,
 
    Byte,
 
    Short,
 
    Int,
 
    Long,
 
    Symbolic(PrimitiveSymbolic)
 
}
 

	
 
// TODO: @cleanup, remove PartialEq implementations
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct PrimitiveSymbolic {
 
    // Phase 1: parser
 
    pub(crate) identifier: NamespacedIdentifier,
 
    // Phase 2: typing
 
    pub(crate) definition: Option<DefinitionId>
 
}
 

	
 
impl PartialEq for PrimitiveSymbolic {
 
    fn eq(&self, other: &Self) -> bool {
 
        self.identifier == other.identifier
 
    }
 
}
 
impl Eq for PrimitiveSymbolic{}
 

	
 
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
 
pub struct Type {
 
    pub primitive: PrimitiveType,
 
    pub array: bool,
 
}
 

	
 
#[allow(dead_code)]
 
impl Type {
 
    pub const UNASSIGNED: Type = Type { primitive: PrimitiveType::Unassigned, array: false };
 

	
 
    pub const INPUT: Type = Type { primitive: PrimitiveType::Input, array: false };
 
    pub const OUTPUT: Type = Type { primitive: PrimitiveType::Output, array: false };
 
    pub const MESSAGE: Type = Type { primitive: PrimitiveType::Message, array: false };
 
    pub const BOOLEAN: Type = Type { primitive: PrimitiveType::Boolean, array: false };
 
    pub const BYTE: Type = Type { primitive: PrimitiveType::Byte, array: false };
 
    pub const SHORT: Type = Type { primitive: PrimitiveType::Short, array: false };
 
    pub const INT: Type = Type { primitive: PrimitiveType::Int, array: false };
 
    pub const LONG: Type = Type { primitive: PrimitiveType::Long, array: false };
 

	
 
    pub const INPUT_ARRAY: Type = Type { primitive: PrimitiveType::Input, array: true };
 
    pub const OUTPUT_ARRAY: Type = Type { primitive: PrimitiveType::Output, array: true };
 
    pub const MESSAGE_ARRAY: Type = Type { primitive: PrimitiveType::Message, array: true };
 
    pub const BOOLEAN_ARRAY: Type = Type { primitive: PrimitiveType::Boolean, array: true };
 
    pub const BYTE_ARRAY: Type = Type { primitive: PrimitiveType::Byte, array: true };
 
    pub const SHORT_ARRAY: Type = Type { primitive: PrimitiveType::Short, array: true };
 
    pub const INT_ARRAY: Type = Type { primitive: PrimitiveType::Int, array: true };
 
    pub const LONG_ARRAY: Type = Type { primitive: PrimitiveType::Long, array: true };
 
}
 

	
 
impl Display for Type {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        match &self.primitive {
 
            PrimitiveType::Unassigned => {
 
                write!(f, "unassigned")?;
 
            }
 
            PrimitiveType::Input => {
 
                write!(f, "in")?;
 
            }
 
            PrimitiveType::Output => {
 
                write!(f, "out")?;
 
            }
 
            PrimitiveType::Message => {
 
                write!(f, "msg")?;
 
            }
 
            PrimitiveType::Boolean => {
 
                write!(f, "boolean")?;
 
            }
 
            PrimitiveType::Byte => {
 
                write!(f, "byte")?;
 
            }
 
            PrimitiveType::Short => {
 
                write!(f, "short")?;
 
            }
 
            PrimitiveType::Int => {
 
                write!(f, "int")?;
 
            }
 
            PrimitiveType::Long => {
 
                write!(f, "long")?;
 
            }
 
            PrimitiveType::Symbolic(data) => {
 
                // Type data is in ASCII range.
 
                if let Some(id) = &data.definition {
 
                    write!(
 
                        f, "Symbolic({}, id: {})", 
 
                        String::from_utf8_lossy(&data.identifier.value),
 
                        id.index
 
                    )?;
 
                } else {
 
                    write!(
 
                        f, "Symbolic({}, id: Unresolved)",
 
                        String::from_utf8_lossy(&data.identifier.value)
 
                    )?;
 
                }
 
            }
 
        }
 
        if self.array {
 
            write!(f, "[]")
 
        } else {
 
            Ok(())
 
        }
 
    }
 
}
 

	
 
type CharacterData = Vec<u8>;
 
type IntegerData = i64;
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub enum Constant {
 
    Null, // message
 
    True,
 
    False,
 
    Character(CharacterData),
 
    Integer(IntegerData),
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub enum Method {
 
    Get,
 
    Put,
 
    Fires,
 
    Create,
 
    Symbolic(MethodSymbolic)
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct MethodSymbolic {
 
    pub(crate) identifier: NamespacedIdentifier,
 
    pub(crate) definition: Option<DefinitionId>
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub enum Field {
 
    Length,
 
    Symbolic(Identifier),
 
}
 
impl Field {
 
    pub fn is_length(&self) -> bool {
 
        match self {
 
            Field::Length => true,
 
            _ => false,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)]
 
pub enum Scope {
 
    Definition(DefinitionId),
 
    Regular(BlockStatementId),
 
    Synchronous((SynchronousStatementId, BlockStatementId)),
 
}
 

	
 
impl Scope {
 
    pub fn is_block(&self) -> bool {
 
        match &self {
 
            Scope::Definition(_) => false,
 
            Scope::Regular(_) => true,
 
            Scope::Synchronous(_) => true,
 
        }
 
    }
 
    pub fn to_block(&self) -> BlockStatementId {
 
        match &self {
 
            Scope::Regular(id) => *id,
 
            Scope::Synchronous((_, id)) => *id,
 
            _ => panic!("unable to get BlockStatement from Scope")
 
        }
 
    }
 
}
 

	
 
pub trait VariableScope {
 
    fn parent_scope(&self, h: &Heap) -> Option<Scope>;
 
    fn get_variable(&self, h: &Heap, id: &Identifier) -> Option<VariableId>;
 
}
 

	
 
impl VariableScope for Scope {
 
    fn parent_scope(&self, h: &Heap) -> Option<Scope> {
 
        match self {
 
            Scope::Definition(def) => h[*def].parent_scope(h),
 
            Scope::Regular(stmt) => h[*stmt].parent_scope(h),
 
            Scope::Synchronous((stmt, _)) => h[*stmt].parent_scope(h),
 
        }
 
    }
src/protocol/ast_printer.rs
Show inline comments
 
use std::fmt::{Debug, Display, Write};
 
use std::io::Write as IOWrite;
 

	
 
use super::ast::*;
 

	
 
const INDENT: usize = 2;
 

	
 
const PREFIX_EMPTY: &'static str = "    ";
 
const PREFIX_ROOT_ID: &'static str = "Root";
 
const PREFIX_PRAGMA_ID: &'static str = "Prag";
 
const PREFIX_IMPORT_ID: &'static str = "Imp ";
 
const PREFIX_TYPE_ANNOT_ID: &'static str = "TyAn";
 
const PREFIX_VARIABLE_ID: &'static str = "Var ";
 
const PREFIX_PARAMETER_ID: &'static str = "Par ";
 
const PREFIX_LOCAL_ID: &'static str = "Loc ";
 
const PREFIX_DEFINITION_ID: &'static str = "Def ";
 
const PREFIX_STRUCT_ID: &'static str = "DefS";
 
const PREFIX_ENUM_ID: &'static str = "DefE";
 
const PREFIX_COMPONENT_ID: &'static str = "DefC";
 
const PREFIX_FUNCTION_ID: &'static str = "DefF";
 
const PREFIX_STMT_ID: &'static str = "Stmt";
 
const PREFIX_BLOCK_STMT_ID: &'static str = "SBl ";
 
const PREFIX_LOCAL_STMT_ID: &'static str = "SLoc";
 
const PREFIX_MEM_STMT_ID: &'static str = "SMem";
 
const PREFIX_CHANNEL_STMT_ID: &'static str = "SCha";
 
const PREFIX_SKIP_STMT_ID: &'static str = "SSki";
 
const PREFIX_LABELED_STMT_ID: &'static str = "SLab";
 
const PREFIX_IF_STMT_ID: &'static str = "SIf ";
 
const PREFIX_ENDIF_STMT_ID: &'static str = "SEIf";
 
const PREFIX_WHILE_STMT_ID: &'static str = "SWhi";
 
const PREFIX_ENDWHILE_STMT_ID: &'static str = "SEWh";
 
const PREFIX_BREAK_STMT_ID: &'static str = "SBre";
 
const PREFIX_CONTINUE_STMT_ID: &'static str = "SCon";
 
const PREFIX_SYNC_STMT_ID: &'static str = "SSyn";
 
const PREFIX_ENDSYNC_STMT_ID: &'static str = "SESy";
 
const PREFIX_RETURN_STMT_ID: &'static str = "SRet";
 
const PREFIX_ASSERT_STMT_ID: &'static str = "SAsr";
 
const PREFIX_GOTO_STMT_ID: &'static str = "SGot";
 
const PREFIX_NEW_STMT_ID: &'static str = "SNew";
 
const PREFIX_PUT_STMT_ID: &'static str = "SPut";
 
const PREFIX_EXPR_STMT_ID: &'static str = "SExp";
 
const PREFIX_ASSIGNMENT_EXPR_ID: &'static str = "EAsi";
 
const PREFIX_CONDITIONAL_EXPR_ID: &'static str = "ECnd";
 
const PREFIX_BINARY_EXPR_ID: &'static str = "EBin";
 
const PREFIX_UNARY_EXPR_ID: &'static str = "EUna";
 
const PREFIX_INDEXING_EXPR_ID: &'static str = "EIdx";
 
const PREFIX_SLICING_EXPR_ID: &'static str = "ESli";
 
const PREFIX_SELECT_EXPR_ID: &'static str = "ESel";
 
const PREFIX_ARRAY_EXPR_ID: &'static str = "EArr";
 
const PREFIX_CONST_EXPR_ID: &'static str = "ECns";
 
const PREFIX_CALL_EXPR_ID: &'static str = "ECll";
 
const PREFIX_VARIABLE_EXPR_ID: &'static str = "EVar";
 

	
 
struct KV<'a> {
 
    buffer: &'a mut String,
 
    prefix: Option<(&'static str, u32)>,
 
    indent: usize,
 
    temp_key: &'a mut String,
 
    temp_val: &'a mut String,
 
}
 

	
 
impl<'a> KV<'a> {
 
    fn new(buffer: &'a mut String, temp_key: &'a mut String, temp_val: &'a mut String, indent: usize) -> Self {
 
        temp_key.clear();
 
        temp_val.clear();
 
        KV{
 
            buffer,
 
            prefix: None,
 
            indent,
 
            temp_key,
 
            temp_val
 
        }
 
    }
 

	
 
    fn with_id(mut self, prefix: &'static str, id: u32) -> Self {
 
        self.prefix = Some((prefix, id));
 
        self
 
    }
 

	
 
    fn with_s_key(self, key: &str) -> Self {
 
        self.temp_key.push_str(key);
 
        self
 
    }
 

	
 
    fn with_d_key<D: Display>(mut self, key: &D) -> Self {
 
    fn with_d_key<D: Display>(self, key: &D) -> Self {
 
        self.temp_key.push_str(&key.to_string());
 
        self
 
    }
 

	
 
    fn with_s_val(self, val: &str) -> Self {
 
        self.temp_val.push_str(val);
 
        self
 
    }
 

	
 
    fn with_disp_val<D: Display>(mut self, val: &D) -> Self {
 
    fn with_disp_val<D: Display>(self, val: &D) -> Self {
 
        self.temp_val.push_str(&format!("{}", val));
 
        self
 
    }
 

	
 
    fn with_debug_val<D: Debug>(mut self, val: &D) -> Self {
 
    fn with_debug_val<D: Debug>(self, val: &D) -> Self {
 
        self.temp_val.push_str(&format!("{:?}", val));
 
        self
 
    }
 

	
 
    fn with_ascii_val(self, val: &[u8]) -> Self {
 
        self.temp_val.push_str(&*String::from_utf8_lossy(val));
 
        self
 
    }
 

	
 
    fn with_opt_disp_val<D: Display>(mut self, val: Option<&D>) -> Self {
 
    fn with_opt_disp_val<D: Display>(self, val: Option<&D>) -> Self {
 
        match val {
 
            Some(v) => { self.temp_val.push_str(&format!("Some({})", v)); },
 
            None => { self.temp_val.push_str("None"); }
 
        }
 
        self
 
    }
 

	
 
    fn with_opt_ascii_val(self, val: Option<&[u8]>) -> Self {
 
        match val {
 
            Some(v) => {
 
                self.temp_val.push_str("Some(");
 
                self.temp_val.push_str(&*String::from_utf8_lossy(v));
 
                self.temp_val.push(')');
 
            },
 
            None => {
 
                self.temp_val.push_str("None");
 
            }
 
        }
 
        self
 
    }
 

	
 
    fn with_custom_val<F: Fn(&mut String)>(mut self, val_fn: F) -> Self {
 
        val_fn(&mut self.temp_val);
 
        self
 
    }
 
}
 

	
 
impl<'a> Drop for KV<'a> {
 
    fn drop(&mut self) {
 
        // Prefix and indent
 
        if let Some((prefix, id)) = &self.prefix {
 
            self.buffer.push_str(&format!("{}[{:04}]", prefix, id));
 
        } else {
 
            self.buffer.push_str("           ");
 
        }
 

	
 
        for _ in 0..self.indent * INDENT {
 
            self.buffer.push(' ');
 
        }
 

	
 
        // Leading dash
 
        self.buffer.push_str("- ");
 

	
 
        // Key and value
 
        self.buffer.push_str(self.temp_key);
 
        if self.temp_val.is_empty() {
 
            self.buffer.push(':');
 
        } else {
 
            self.buffer.push_str(": ");
 
            self.buffer.push_str(&self.temp_val);
 
        }
 
        self.buffer.push('\n');
 
    }
 
}
 

	
 
pub(crate) struct ASTWriter {
 
    cur_definition: Option<DefinitionId>,
 
    buffer: String,
 
    temp1: String,
 
    temp2: String,
 
}
 

	
 
impl ASTWriter {
 
    pub(crate) fn new() -> Self {
 
        Self{
 
            cur_definition: None,
 
            buffer: String::with_capacity(4096),
 
            temp1: String::with_capacity(256),
 
            temp2: String::with_capacity(256),
 
        }
 
    }
 
    pub(crate) fn write_ast<W: IOWrite>(&mut self, w: &mut W, heap: &Heap) {
 
        for root_id in heap.protocol_descriptions.iter().map(|v| v.this) {
 
            self.write_module(heap, root_id);
 
            w.write_all(self.buffer.as_bytes()).expect("flush buffer");
 
            self.buffer.clear();
 
        }
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Top-level module writing
 
    //--------------------------------------------------------------------------
 

	
 
    fn write_module(&mut self, heap: &Heap, root_id: RootId) {
 
        self.kv(0).with_id(PREFIX_ROOT_ID, root_id.index)
 
            .with_s_key("Module");
 

	
 
        let root = &heap[root_id];
 
        self.kv(1).with_s_key("Pragmas");
 
        for pragma_id in &root.pragmas {
 
            self.write_pragma(heap, *pragma_id, 2);
 
        }
 

	
 
        self.kv(1).with_s_key("Imports");
 
        for import_id in &root.imports {
 
            self.write_import(heap, *import_id, 2);
 
        }
 

	
 
        self.kv(1).with_s_key("Definitions");
 
        for def_id in &root.definitions {
 
            self.write_definition(heap, *def_id, 2);
 
        }
 
    }
 

	
 
    fn write_pragma(&mut self, heap: &Heap, pragma_id: PragmaId, indent: usize) {
 
        match &heap[pragma_id] {
 
            Pragma::Version(pragma) => {
 
                self.kv(indent).with_id(PREFIX_PRAGMA_ID, pragma.this.index)
 
                    .with_s_key("PragmaVersion")
 
                    .with_disp_val(&pragma.version);
 
            },
 
            Pragma::Module(pragma) => {
 
                self.kv(indent).with_id(PREFIX_PRAGMA_ID, pragma.this.index)
 
                    .with_s_key("PragmaModule")
 
                    .with_ascii_val(&pragma.value);
 
            }
 
        }
 
    }
 

	
 
    fn write_import(&mut self, heap: &Heap, import_id: ImportId, indent: usize) {
 
        let import = &heap[import_id];
 
        let indent2 = indent + 1;
 

	
 
        match import {
 
            Import::Module(import) => {
 
                self.kv(indent).with_id(PREFIX_IMPORT_ID, import.this.index)
 
                    .with_s_key("ImportModule");
 

	
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&import.module_name);
 
                self.kv(indent2).with_s_key("Alias").with_ascii_val(&import.alias);
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(import.module_id.as_ref().map(|v| &v.index));
 
            },
 
            Import::Symbols(import) => {
 
                self.kv(indent).with_id(PREFIX_IMPORT_ID, import.this.index)
 
                    .with_s_key("ImportSymbol");
 

	
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&import.module_name);
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(import.module_id.as_ref().map(|v| &v.index));
 

	
 
                self.kv(indent2).with_s_key("Symbols");
 

	
 
                let indent3 = indent2 + 1;
 
                let indent4 = indent3 + 1;
 
                for symbol in &import.symbols {
 
                    self.kv(indent3).with_s_key("AliasedSymbol");
 
                    self.kv(indent4).with_s_key("Name").with_ascii_val(&symbol.name);
 
                    self.kv(indent4).with_s_key("Alias").with_ascii_val(&symbol.alias);
 
                    self.kv(indent4).with_s_key("Definition")
 
                        .with_opt_disp_val(symbol.definition_id.as_ref().map(|v| &v.index));
 
                }
 
            }
 
        }
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Top-level definition writing
 
    //--------------------------------------------------------------------------
 

	
 
    fn write_definition(&mut self, heap: &Heap, def_id: DefinitionId, indent: usize) {
 
        self.cur_definition = Some(def_id);
 
        let indent2 = indent + 1;
 
        let indent3 = indent2 + 1;
 
        let indent4 = indent3 + 1;
 

	
 
        match &heap[def_id] {
 
            Definition::Struct(_) => todo!("implement Definition::Struct"),
 
            Definition::Enum(_) => todo!("implement Definition::Enum"),
 
            Definition::Function(def) => {
 
                self.kv(indent).with_id(PREFIX_FUNCTION_ID, def.this.0.index)
 
                    .with_s_key("DefinitionFunction");
 

	
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&def.identifier.value);
 
                for poly_var_id in &def.poly_vars {
 
                    self.kv(indent3).with_s_key("PolyVar");
 
                    self.kv(indent4).with_s_key("Name").with_ascii_val(&poly_var_id.value);
 
                }
 

	
 
                self.kv(indent2).with_s_key("ReturnParserType").with_custom_val(|s| write_parser_type(s, heap, &heap[def.return_type]));
 

	
 
                self.kv(indent2).with_s_key("Parameters");
 
                for param_id in &def.parameters {
 
                    self.write_parameter(heap, *param_id, indent3);
 
                }
 

	
 
                self.kv(indent2).with_s_key("Body");
 
                self.write_stmt(heap, def.body, indent3);
 
            },
 
            Definition::Component(def) => {
 
                self.kv(indent).with_id(PREFIX_COMPONENT_ID,def.this.0.index)
 
                    .with_s_key("DefinitionComponent");
 

	
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&def.identifier.value);
 
                self.kv(indent2).with_s_key("Variant").with_debug_val(&def.variant);
 

	
 
                self.kv(indent2).with_s_key("PolymorphicVariables");
 
                for poly_var_id in &def.poly_vars {
 
                    self.kv(indent3).with_s_key("PolyVar");
 
                    self.kv(indent4).with_s_key("Name").with_ascii_val(&poly_var_id.value);
 
                }
 

	
 
                self.kv(indent2).with_s_key("Parameters");
 
                for param_id in &def.parameters {
 
                    self.write_parameter(heap, *param_id, indent3)
 
                }
 

	
 
                self.kv(indent2).with_s_key("Body");
 
                self.write_stmt(heap, def.body, indent3);
 
            }
 
        }
 
    }
 

	
 
    fn write_parameter(&mut self, heap: &Heap, param_id: ParameterId, indent: usize) {
 
        let indent2 = indent + 1;
 
        let param = &heap[param_id];
 

	
 
        self.kv(indent).with_id(PREFIX_PARAMETER_ID, param_id.0.index)
 
            .with_s_key("Parameter");
 
        self.kv(indent2).with_s_key("Name").with_ascii_val(&param.identifier.value);
 
        self.kv(indent2).with_s_key("ParserType").with_custom_val(|w| write_parser_type(w, heap, &heap[param.parser_type]));
 
    }
 

	
 
    fn write_stmt(&mut self, heap: &Heap, stmt_id: StatementId, indent: usize) {
 
        let stmt = &heap[stmt_id];
 
        let indent2 = indent + 1;
 
        let indent3 = indent2 + 1;
 

	
 
        match stmt {
 
            Statement::Block(stmt) => {
 
                self.kv(indent).with_id(PREFIX_BLOCK_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Block");
 

	
 
                for stmt_id in &stmt.statements {
 
                    self.write_stmt(heap, *stmt_id, indent2);
 
                }
 
            },
 
            Statement::Local(stmt) => {
 
                match stmt {
 
                    LocalStatement::Channel(stmt) => {
 
                        self.kv(indent).with_id(PREFIX_CHANNEL_STMT_ID, stmt.this.0.0.index)
 
                            .with_s_key("LocalChannel");
 

	
 
                        self.kv(indent2).with_s_key("From");
 
                        self.write_local(heap, stmt.from, indent3);
 
                        self.kv(indent2).with_s_key("To");
 
                        self.write_local(heap, stmt.to, indent3);
 
                        self.kv(indent2).with_s_key("Next")
 
                            .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
                    },
 
                    LocalStatement::Memory(stmt) => {
 
                        self.kv(indent).with_id(PREFIX_MEM_STMT_ID, stmt.this.0.0.index)
 
                            .with_s_key("LocalMemory");
 

	
 
                        self.kv(indent2).with_s_key("Variable");
 
                        self.write_local(heap, stmt.variable, indent3);
 
                        self.kv(indent2).with_s_key("Next")
 
                            .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
                    }
 
                }
 
            },
 
            Statement::Skip(stmt) => {
 
                self.kv(indent).with_id(PREFIX_SKIP_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Skip");
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::Labeled(stmt) => {
 
                self.kv(indent).with_id(PREFIX_LABELED_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Labeled");
 

	
 
                self.kv(indent2).with_s_key("Label").with_ascii_val(&stmt.label.value);
 
                self.kv(indent2).with_s_key("Statement");
 
                self.write_stmt(heap, stmt.body, indent3);
 
            },
 
            Statement::If(stmt) => {
 
                self.kv(indent).with_id(PREFIX_IF_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("If");
 

	
 
                self.kv(indent2).with_s_key("EndIf")
 
                    .with_opt_disp_val(stmt.end_if.as_ref().map(|v| &v.0.index));
 

	
 
                self.kv(indent2).with_s_key("Condition");
 
                self.write_expr(heap, stmt.test, indent3);
 

	
 
                self.kv(indent2).with_s_key("TrueBody");
 
                self.write_stmt(heap, stmt.true_body, indent3);
 

	
 
                self.kv(indent2).with_s_key("FalseBody");
 
                self.write_stmt(heap, stmt.false_body, indent3);
 
            },
 
            Statement::EndIf(stmt) => {
 
                self.kv(indent).with_id(PREFIX_ENDIF_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("EndIf");
 
                self.kv(indent2).with_s_key("StartIf").with_disp_val(&stmt.start_if.0.index);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::While(stmt) => {
 
                self.kv(indent).with_id(PREFIX_WHILE_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("While");
 

	
 
                self.kv(indent2).with_s_key("EndWhile")
 
                    .with_opt_disp_val(stmt.end_while.as_ref().map(|v| &v.0.index));
 
                self.kv(indent2).with_s_key("InSync")
 
                    .with_opt_disp_val(stmt.in_sync.as_ref().map(|v| &v.0.index));
 
                self.kv(indent2).with_s_key("Condition");
 
                self.write_expr(heap, stmt.test, indent3);
 
                self.kv(indent2).with_s_key("Body");
 
                self.write_stmt(heap, stmt.body, indent3);
 
            },
 
            Statement::EndWhile(stmt) => {
 
                self.kv(indent).with_id(PREFIX_ENDWHILE_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("EndWhile");
 
                self.kv(indent2).with_s_key("StartWhile").with_disp_val(&stmt.start_while.0.index);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::Break(stmt) => {
 
                self.kv(indent).with_id(PREFIX_BREAK_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Break");
 
                self.kv(indent2).with_s_key("Label")
 
                    .with_opt_ascii_val(stmt.label.as_ref().map(|v| v.value.as_slice()));
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(stmt.target.as_ref().map(|v| &v.0.index));
 
            },
 
            Statement::Continue(stmt) => {
 
                self.kv(indent).with_id(PREFIX_CONTINUE_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Continue");
 
                self.kv(indent2).with_s_key("Label")
 
                    .with_opt_ascii_val(stmt.label.as_ref().map(|v| v.value.as_slice()));
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(stmt.target.as_ref().map(|v| &v.0.index));
 
            },
 
            Statement::Synchronous(stmt) => {
 
                self.kv(indent).with_id(PREFIX_SYNC_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Synchronous");
 
                self.kv(indent2).with_s_key("EndSync")
 
                    .with_opt_disp_val(stmt.end_sync.as_ref().map(|v| &v.0.index));
 
                self.kv(indent2).with_s_key("Body");
 
                self.write_stmt(heap, stmt.body, indent3);
 
            },
 
            Statement::EndSynchronous(stmt) => {
 
                self.kv(indent).with_id(PREFIX_ENDSYNC_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("EndSynchronous");
 
                self.kv(indent2).with_s_key("StartSync").with_disp_val(&stmt.start_sync.0.index);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::Return(stmt) => {
 
                self.kv(indent).with_id(PREFIX_RETURN_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Return");
 
                self.kv(indent2).with_s_key("Expression");
 
                self.write_expr(heap, stmt.expression, indent3);
 
            },
 
            Statement::Assert(stmt) => {
 
                self.kv(indent).with_id(PREFIX_ASSERT_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Assert");
 
                self.kv(indent2).with_s_key("Expression");
 
                self.write_expr(heap, stmt.expression, indent3);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::Goto(stmt) => {
 
                self.kv(indent).with_id(PREFIX_GOTO_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("Goto");
 
                self.kv(indent2).with_s_key("Label").with_ascii_val(&stmt.label.value);
 
                self.kv(indent2).with_s_key("Target")
 
                    .with_opt_disp_val(stmt.target.as_ref().map(|v| &v.0.index));
 
            },
 
            Statement::New(stmt) => {
 
                self.kv(indent).with_id(PREFIX_NEW_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("New");
 
                self.kv(indent2).with_s_key("Expression");
 
                self.write_expr(heap, stmt.expression.upcast(), indent3);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            },
 
            Statement::Expression(stmt) => {
 
                self.kv(indent).with_id(PREFIX_EXPR_STMT_ID, stmt.this.0.index)
 
                    .with_s_key("ExpressionStatement");
 
                self.write_expr(heap, stmt.expression, indent2);
 
                self.kv(indent2).with_s_key("Next")
 
                    .with_opt_disp_val(stmt.next.as_ref().map(|v| &v.index));
 
            }
 
        }
 
    }
 

	
 
    fn write_expr(&mut self, heap: &Heap, expr_id: ExpressionId, indent: usize) {
 
        let expr = &heap[expr_id];
 
        let indent2 = indent + 1;
 
        let indent3 = indent2 + 1;
 
        let def_id = self.cur_definition.unwrap();
 

	
 
        match expr {
 
            Expression::Assignment(expr) => {
 
                self.kv(indent).with_id(PREFIX_ASSIGNMENT_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("AssignmentExpr");
 
                self.kv(indent2).with_s_key("Operation").with_debug_val(&expr.operation);
 
                self.kv(indent2).with_s_key("Left");
 
                self.write_expr(heap, expr.left, indent3);
 
                self.kv(indent2).with_s_key("Right");
 
                self.write_expr(heap, expr.right, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Conditional(expr) => {
 
                self.kv(indent).with_id(PREFIX_CONDITIONAL_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("ConditionalExpr");
 
                self.kv(indent2).with_s_key("Condition");
 
                self.write_expr(heap, expr.test, indent3);
 
                self.kv(indent2).with_s_key("TrueExpression");
 
                self.write_expr(heap, expr.true_expression, indent3);
 
                self.kv(indent2).with_s_key("FalseExpression");
 
                self.write_expr(heap, expr.false_expression, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Binary(expr) => {
 
                self.kv(indent).with_id(PREFIX_BINARY_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("BinaryExpr");
 
                self.kv(indent2).with_s_key("Operation").with_debug_val(&expr.operation);
 
                self.kv(indent2).with_s_key("Left");
 
                self.write_expr(heap, expr.left, indent3);
 
                self.kv(indent2).with_s_key("Right");
 
                self.write_expr(heap, expr.right, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Unary(expr) => {
 
                self.kv(indent).with_id(PREFIX_UNARY_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("UnaryExpr");
 
                self.kv(indent2).with_s_key("Operation").with_debug_val(&expr.operation);
 
                self.kv(indent2).with_s_key("Argument");
 
                self.write_expr(heap, expr.expression, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Indexing(expr) => {
 
                self.kv(indent).with_id(PREFIX_INDEXING_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("IndexingExpr");
 
                self.kv(indent2).with_s_key("Subject");
 
                self.write_expr(heap, expr.subject, indent3);
 
                self.kv(indent2).with_s_key("Index");
 
                self.write_expr(heap, expr.index, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Slicing(expr) => {
 
                self.kv(indent).with_id(PREFIX_SLICING_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("SlicingExpr");
 
                self.kv(indent2).with_s_key("Subject");
 
                self.write_expr(heap, expr.subject, indent3);
 
                self.kv(indent2).with_s_key("FromIndex");
 
                self.write_expr(heap, expr.from_index, indent3);
 
                self.kv(indent2).with_s_key("ToIndex");
 
                self.write_expr(heap, expr.to_index, indent3);
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Select(expr) => {
 
                self.kv(indent).with_id(PREFIX_SELECT_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("SelectExpr");
 
                self.kv(indent2).with_s_key("Subject");
 
                self.write_expr(heap, expr.subject, indent3);
 

	
 
                match &expr.field {
 
                    Field::Length => {
 
                        self.kv(indent2).with_s_key("Field").with_s_val("length");
 
                    },
 
                    Field::Symbolic(field) => {
 
                        self.kv(indent2).with_s_key("Field").with_ascii_val(&field.value);
 
                    }
 
                }
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Array(expr) => {
 
                self.kv(indent).with_id(PREFIX_ARRAY_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("ArrayExpr");
 
                self.kv(indent2).with_s_key("Elements");
 
                for expr_id in &expr.elements {
 
                    self.write_expr(heap, *expr_id, indent3);
 
                }
 

	
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Constant(expr) => {
 
                self.kv(indent).with_id(PREFIX_CONST_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("ConstantExpr");
 

	
 
                let val = self.kv(indent2).with_s_key("Value");
 
                match &expr.value {
 
                    Constant::Null => { val.with_s_val("null"); },
 
                    Constant::True => { val.with_s_val("true"); },
 
                    Constant::False => { val.with_s_val("false"); },
 
                    Constant::Character(char) => { val.with_ascii_val(char); },
 
                    Constant::Integer(int) => { val.with_disp_val(int); },
 
                }
 

	
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Call(expr) => {
 
                self.kv(indent).with_id(PREFIX_CALL_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("CallExpr");
 

	
 
                // Method
 
                let method = self.kv(indent2).with_s_key("Method");
 
                match &expr.method {
 
                    Method::Get => { method.with_s_val("get"); },
 
                    Method::Put => { method.with_s_val("put"); },
 
                    Method::Fires => { method.with_s_val("fires"); },
 
                    Method::Create => { method.with_s_val("create"); },
 
                    Method::Symbolic(symbolic) => {
 
                        method.with_s_val("symbolic");
 
                        self.kv(indent3).with_s_key("Name").with_ascii_val(&symbolic.identifier.value);
 
                        self.kv(indent3).with_s_key("Definition")
 
                            .with_opt_disp_val(symbolic.definition.as_ref().map(|v| &v.index));
 
                    }
 
                }
 

	
 
                // Arguments
 
                self.kv(indent2).with_s_key("Arguments");
 
                for arg_id in &expr.arguments {
 
                    self.write_expr(heap, *arg_id, indent3);
 
                }
 

	
 
                // Parent
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            },
 
            Expression::Variable(expr) => {
 
                self.kv(indent).with_id(PREFIX_VARIABLE_EXPR_ID, expr.this.0.index)
 
                    .with_s_key("VariableExpr");
 
                self.kv(indent2).with_s_key("Name").with_ascii_val(&expr.identifier.value);
 
                self.kv(indent2).with_s_key("Definition")
 
                    .with_opt_disp_val(expr.declaration.as_ref().map(|v| &v.index));
 
                self.kv(indent2).with_s_key("Parent")
 
                    .with_custom_val(|v| write_expression_parent(v, &expr.parent));
 
                self.kv(indent2).with_s_key("ConcreteType")
 
                    .with_custom_val(|v| write_concrete_type(v, heap, &expr.concrete_type));
 
                    .with_custom_val(|v| write_concrete_type(v, heap, def_id, &expr.concrete_type));
 
            }
 
        }
 
    }
 

	
 
    fn write_local(&mut self, heap: &Heap, local_id: LocalId, indent: usize) {
 
        let local = &heap[local_id];
 
        let indent2 = indent + 1;
 

	
 
        self.kv(indent).with_id(PREFIX_LOCAL_ID, local_id.0.index)
 
            .with_s_key("Local");
 

	
 
        self.kv(indent2).with_s_key("Name").with_ascii_val(&local.identifier.value);
 
        self.kv(indent2).with_s_key("ParserType")
 
            .with_custom_val(|w| write_parser_type(w, heap, &heap[local.parser_type]));
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Printing Utilities
 
    //--------------------------------------------------------------------------
 

	
 
    fn kv(&mut self, indent: usize) -> KV {
 
        KV::new(&mut self.buffer, &mut self.temp1, &mut self.temp2, indent)
 
    }
 

	
 
    fn flush<W: IOWrite>(&mut self, w: &mut W) {
 
        w.write(self.buffer.as_bytes()).unwrap();
 
        self.buffer.clear()
 
    }
 
}
 

	
 
fn write_option<V: Display>(target: &mut String, value: Option<V>) {
 
    target.clear();
 
    match &value {
 
        Some(v) => target.push_str(&format!("Some({})", v)),
 
        None => target.push_str("None")
 
    };
 
}
 

	
 
fn write_parser_type(target: &mut String, heap: &Heap, t: &ParserType) {
 
    use ParserTypeVariant as PTV;
 

	
 
    let mut embedded = Vec::new();
 
    match &t.variant {
 
        PTV::Input(id) => { target.push_str("in"); embedded.push(*id); }
 
        PTV::Output(id) => { target.push_str("out"); embedded.push(*id) }
 
        PTV::Array(id) => { target.push_str("array"); embedded.push(*id) }
 
        PTV::Message => { target.push_str("msg"); }
 
        PTV::Bool => { target.push_str("bool"); }
 
        PTV::Byte => { target.push_str("byte"); }
 
        PTV::Short => { target.push_str("short"); }
 
        PTV::Int => { target.push_str("int"); }
 
        PTV::Long => { target.push_str("long"); }
 
        PTV::String => { target.push_str("str"); }
 
        PTV::IntegerLiteral => { target.push_str("int_lit"); }
 
        PTV::Inferred => { target.push_str("auto"); }
 
        PTV::Symbolic(symbolic) => {
 
            target.push_str(&String::from_utf8_lossy(&symbolic.identifier.value));
 
            match symbolic.variant {
 
                Some(SymbolicParserTypeVariant::PolyArg(def_id, idx)) => {
 
                    target.push_str(&format!("{{def: {}, idx: {}}}", def_id.index, idx));
 
                },
 
                Some(SymbolicParserTypeVariant::Definition(def_id)) => {
 
                    target.push_str(&format!("{{def: {}}}", def_id.index));
 
                },
 
                None => {
 
                    target.push_str("{None}");
 
                }
 
            }
 
            embedded.extend(&symbolic.poly_args);
 
        }
 
    };
 

	
 
    if !embedded.is_empty() {
 
        target.push_str("<");
 
        for (idx, embedded_id) in embedded.into_iter().enumerate() {
 
            if idx != 0 { target.push_str(", "); }
 
            write_parser_type(target, heap, &heap[embedded_id]);
 
        }
 
        target.push_str(">");
 
    }
 
}
 

	
 
fn write_concrete_type(target: &mut String, heap: &Heap, t: &ConcreteType) {
 
fn write_concrete_type(target: &mut String, heap: &Heap, def_id: DefinitionId, t: &ConcreteType) {
 
    use ConcreteTypePart as CTP;
 

	
 
    fn write_concrete_part(target: &mut String, heap: &Heap, t: &ConcreteType, mut idx: usize) -> usize {
 
    fn write_concrete_part(target: &mut String, heap: &Heap, def_id: DefinitionId, t: &ConcreteType, mut idx: usize) -> usize {
 
        if idx >= t.parts.len() {
 
            target.push_str("Programmer error: invalid concrete type tree");
 
            return idx;
 
        }
 

	
 
        match &t.parts[idx] {
 
            CTP::Marker(marker) => {
 
                // Marker points to polymorphic variable index
 
                let definition = &heap[def_id];
 
                let poly_var_ident = match definition {
 
                    Definition::Struct(_) | Definition::Enum(_) => unreachable!(),
 
                    Definition::Function(definition) => &definition.poly_vars[*marker].value,
 
                    Definition::Component(definition) => &definition.poly_vars[*marker].value,
 
                };
 
                target.push_str(&String::from_utf8_lossy(&poly_var_ident));
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
            },
 
            CTP::Void => target.push_str("void"),
 
            CTP::Message => target.push_str("msg"),
 
            CTP::Bool => target.push_str("bool"),
 
            CTP::Byte => target.push_str("byte"),
 
            CTP::Short => target.push_str("short"),
 
            CTP::Int => target.push_str("int"),
 
            CTP::Long => target.push_str("long"),
 
            CTP::String => target.push_str("string"),
 
            CTP::Array => {
 
                idx = write_concrete_part(target, heap, t, idx + 1);
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                target.push_str("[]");
 
            },
 
            CTP::Slice => {
 
                idx = write_concrete_part(target, heap, t, idx + 1);
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                target.push_str("[..]");
 
            }
 
            CTP::Input => {
 
                target.push_str("in<");
 
                idx = write_concrete_part(target, heap, t, idx + 1);
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                target.push('>');
 
            },
 
            CTP::Output => {
 
                target.push_str("out<");
 
                idx = write_concrete_part(target, heap, t, idx + 1);
 
                idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                target.push('>')
 
            },
 
            CTP::Instance(definition_id, num_embedded) => {
 
                let identifier = heap[*definition_id].identifier();
 
                target.push_str(&String::from_utf8_lossy(&identifier.value));
 
                target.push('<');
 
                for idx_embedded in 0..*num_embedded {
 
                    if idx_embedded != 0 {
 
                        target.push_str(", ");
 
                    }
 
                    idx = write_concrete_part(target, heap, t, idx + 1);
 
                    idx = write_concrete_part(target, heap, def_id, t, idx + 1);
 
                }
 
                target.push('>');
 
            }
 
        }
 

	
 
        idx + 1
 
    }
 

	
 
    write_concrete_part(target, heap, t, 0);
 
    write_concrete_part(target, heap, def_id, t, 0);
 
}
 

	
 
fn write_expression_parent(target: &mut String, parent: &ExpressionParent) {
 
    use ExpressionParent as EP;
 

	
 
    *target = match parent {
 
        EP::None => String::from("None"),
 
        EP::If(id) => format!("IfStmt({})", id.0.index),
 
        EP::While(id) => format!("WhileStmt({})", id.0.index),
 
        EP::Return(id) => format!("ReturnStmt({})", id.0.index),
 
        EP::Assert(id) => format!("AssertStmt({})", id.0.index),
 
        EP::New(id) => format!("NewStmt({})", id.0.index),
 
        EP::ExpressionStmt(id) => format!("ExprStmt({})", id.0.index),
 
        EP::Expression(id, idx) => format!("Expr({}, {})", id.index, idx)
 
    };
 
}
 
\ No newline at end of file
src/protocol/eval.rs
Show inline comments
 
@@ -719,537 +719,553 @@ impl Value {
 
        }
 
    }
 
}
 

	
 
impl From<bool> for Value {
 
    fn from(b: bool) -> Self {
 
        Value::Boolean(BooleanValue(b))
 
    }
 
}
 
impl From<Value> for bool {
 
    fn from(val: Value) -> Self {
 
        match val {
 
            Value::Boolean(BooleanValue(b)) => b,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 
impl From<&Value> for bool {
 
    fn from(val: &Value) -> Self {
 
        match val {
 
            Value::Boolean(BooleanValue(b)) => *b,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 

	
 
impl From<Value> for i8 {
 
    fn from(val: Value) -> Self {
 
        match val {
 
            Value::Byte(ByteValue(b)) => b,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 
impl From<&Value> for i8 {
 
    fn from(val: &Value) -> Self {
 
        match val {
 
            Value::Byte(ByteValue(b)) => *b,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 

	
 
impl From<Value> for i16 {
 
    fn from(val: Value) -> Self {
 
        match val {
 
            Value::Byte(ByteValue(b)) => i16::from(b),
 
            Value::Short(ShortValue(s)) => s,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 
impl From<&Value> for i16 {
 
    fn from(val: &Value) -> Self {
 
        match val {
 
            Value::Byte(ByteValue(b)) => i16::from(*b),
 
            Value::Short(ShortValue(s)) => *s,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 

	
 
impl From<Value> for i32 {
 
    fn from(val: Value) -> Self {
 
        match val {
 
            Value::Byte(ByteValue(b)) => i32::from(b),
 
            Value::Short(ShortValue(s)) => i32::from(s),
 
            Value::Int(IntValue(i)) => i,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 
impl From<&Value> for i32 {
 
    fn from(val: &Value) -> Self {
 
        match val {
 
            Value::Byte(ByteValue(b)) => i32::from(*b),
 
            Value::Short(ShortValue(s)) => i32::from(*s),
 
            Value::Int(IntValue(i)) => *i,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 

	
 
impl From<Value> for i64 {
 
    fn from(val: Value) -> Self {
 
        match val {
 
            Value::Byte(ByteValue(b)) => i64::from(b),
 
            Value::Short(ShortValue(s)) => i64::from(s),
 
            Value::Int(IntValue(i)) => i64::from(i),
 
            Value::Long(LongValue(l)) => l,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 
impl From<&Value> for i64 {
 
    fn from(val: &Value) -> Self {
 
        match val {
 
            Value::Byte(ByteValue(b)) => i64::from(*b),
 
            Value::Short(ShortValue(s)) => i64::from(*s),
 
            Value::Int(IntValue(i)) => i64::from(*i),
 
            Value::Long(LongValue(l)) => *l,
 
            _ => unimplemented!(),
 
        }
 
    }
 
}
 

	
 
impl ValueImpl for Value {
 
    fn exact_type(&self) -> Type {
 
        match self {
 
            Value::Unassigned => Type::UNASSIGNED,
 
            Value::Input(val) => val.exact_type(),
 
            Value::Output(val) => val.exact_type(),
 
            Value::Message(val) => val.exact_type(),
 
            Value::Boolean(val) => val.exact_type(),
 
            Value::Byte(val) => val.exact_type(),
 
            Value::Short(val) => val.exact_type(),
 
            Value::Int(val) => val.exact_type(),
 
            Value::Long(val) => val.exact_type(),
 
            Value::InputArray(val) => val.exact_type(),
 
            Value::OutputArray(val) => val.exact_type(),
 
            Value::MessageArray(val) => val.exact_type(),
 
            Value::BooleanArray(val) => val.exact_type(),
 
            Value::ByteArray(val) => val.exact_type(),
 
            Value::ShortArray(val) => val.exact_type(),
 
            Value::IntArray(val) => val.exact_type(),
 
            Value::LongArray(val) => val.exact_type(),
 
        }
 
    }
 
    fn is_type_compatible(&self, h: &Heap, t: &ParserType) -> bool {
 
        match self {
 
            Value::Unassigned => true,
 
            Value::Input(_) => InputValue::is_type_compatible_hack(h, t),
 
            Value::Output(_) => OutputValue::is_type_compatible_hack(h, t),
 
            Value::Message(_) => MessageValue::is_type_compatible_hack(h, t),
 
            Value::Boolean(_) => BooleanValue::is_type_compatible_hack(h, t),
 
            Value::Byte(_) => ByteValue::is_type_compatible_hack(h, t),
 
            Value::Short(_) => ShortValue::is_type_compatible_hack(h, t),
 
            Value::Int(_) => IntValue::is_type_compatible_hack(h, t),
 
            Value::Long(_) => LongValue::is_type_compatible_hack(h, t),
 
            Value::InputArray(_) => InputArrayValue::is_type_compatible_hack(h, t),
 
            Value::OutputArray(_) => OutputArrayValue::is_type_compatible_hack(h, t),
 
            Value::MessageArray(_) => MessageArrayValue::is_type_compatible_hack(h, t),
 
            Value::BooleanArray(_) => BooleanArrayValue::is_type_compatible_hack(h, t),
 
            Value::ByteArray(_) => ByteArrayValue::is_type_compatible_hack(h, t),
 
            Value::ShortArray(_) => ShortArrayValue::is_type_compatible_hack(h, t),
 
            Value::IntArray(_) => InputArrayValue::is_type_compatible_hack(h, t),
 
            Value::LongArray(_) => LongArrayValue::is_type_compatible_hack(h, t),
 
        }
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, _t: &ParserType) -> bool { false }
 
}
 

	
 
impl Display for Value {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        let disp: &dyn Display;
 
        match self {
 
            Value::Unassigned => disp = &Type::UNASSIGNED,
 
            Value::Input(val) => disp = val,
 
            Value::Output(val) => disp = val,
 
            Value::Message(val) => disp = val,
 
            Value::Boolean(val) => disp = val,
 
            Value::Byte(val) => disp = val,
 
            Value::Short(val) => disp = val,
 
            Value::Int(val) => disp = val,
 
            Value::Long(val) => disp = val,
 
            Value::InputArray(val) => disp = val,
 
            Value::OutputArray(val) => disp = val,
 
            Value::MessageArray(val) => disp = val,
 
            Value::BooleanArray(val) => disp = val,
 
            Value::ByteArray(val) => disp = val,
 
            Value::ShortArray(val) => disp = val,
 
            Value::IntArray(val) => disp = val,
 
            Value::LongArray(val) => disp = val,
 
        }
 
        disp.fmt(f)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct InputValue(pub PortId);
 

	
 
impl Display for InputValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "#in")
 
    }
 
}
 

	
 
impl ValueImpl for InputValue {
 
    fn exact_type(&self) -> Type {
 
        Type::INPUT
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        return if let ParserTypeVariant::Input(_) = t.variant { true } else { false }
 
        use ParserTypeVariant::*;
 
        match &t.variant {
 
            Input(_) | Inferred | Symbolic(_) => true,
 
            _ => false,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct OutputValue(pub PortId);
 

	
 
impl Display for OutputValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "#out")
 
    }
 
}
 

	
 
impl ValueImpl for OutputValue {
 
    fn exact_type(&self) -> Type {
 
        Type::OUTPUT
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        return if let ParserTypeVariant::Output(_) = t.variant { true } else { false }
 
        use ParserTypeVariant::*;
 
        match &t.variant {
 
            Output(_) | Inferred | Symbolic(_) => true,
 
            _ => false,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct MessageValue(pub Option<Payload>);
 

	
 
impl Display for MessageValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        match &self.0 {
 
            None => write!(f, "null"),
 
            Some(payload) => {
 
                // format print up to 10 bytes
 
                let mut slice = payload.as_slice();
 
                if slice.len() > 10 {
 
                    slice = &slice[..10];
 
                }
 
                f.debug_list().entries(slice.iter().copied()).finish()
 
            }
 
        }
 
    }
 
}
 

	
 
impl ValueImpl for MessageValue {
 
    fn exact_type(&self) -> Type {
 
        Type::MESSAGE
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        return if let ParserTypeVariant::Message = t.variant { true } else { false };
 
        use ParserTypeVariant::*;
 
        match &t.variant {
 
            Message | Inferred | Symbolic(_) => true,
 
            _ => false,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct BooleanValue(bool);
 

	
 
impl Display for BooleanValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for BooleanValue {
 
    fn exact_type(&self) -> Type {
 
        Type::BOOLEAN
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        use ParserTypeVariant::*;
 
        match t.variant {
 
            Bool | Byte | Short | Int | Long => true,
 
            Symbolic(_) | Inferred | Bool | Byte | Short | Int | Long => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ByteValue(i8);
 

	
 
impl Display for ByteValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for ByteValue {
 
    fn exact_type(&self) -> Type {
 
        Type::BYTE
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        use ParserTypeVariant::*;
 
        match t.variant {
 
            Byte | Short | Int | Long => true,
 
            Symbolic(_) | Inferred | Byte | Short | Int | Long => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ShortValue(i16);
 

	
 
impl Display for ShortValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for ShortValue {
 
    fn exact_type(&self) -> Type {
 
        Type::SHORT
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        use ParserTypeVariant::*;
 
        match t.variant {
 
            Short | Int | Long => true,
 
            Symbolic(_) | Inferred | Short | Int | Long => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct IntValue(i32);
 

	
 
impl Display for IntValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for IntValue {
 
    fn exact_type(&self) -> Type {
 
        Type::INT
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        use ParserTypeVariant::*;
 
        match t.variant {
 
            Int | Long => true,
 
            Symbolic(_) | Inferred | Int | Long => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct LongValue(i64);
 

	
 
impl Display for LongValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.0)
 
    }
 
}
 

	
 
impl ValueImpl for LongValue {
 
    fn exact_type(&self) -> Type {
 
        Type::LONG
 
    }
 
    fn is_type_compatible_hack(_h: &Heap, t: &ParserType) -> bool {
 
        return if let ParserTypeVariant::Long = t.variant { true } else { false }
 
        use ParserTypeVariant::*;
 
        match &t.variant {
 
            Long | Inferred | Symbolic(_) => true,
 
            _ => false,
 
        }
 
    }
 
}
 

	
 
fn get_array_inner(t: &ParserType) -> Option<ParserTypeId> {
 
    match t.variant {
 
        ParserTypeVariant::Array(inner) => Some(inner),
 
        _ => None
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct InputArrayValue(Vec<InputValue>);
 

	
 
impl Display for InputArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
            if !first {
 
                write!(f, ",")?;
 
            }
 
            write!(f, "{}", v)?;
 
            first = false;
 
        }
 
        write!(f, "}}")
 
    }
 
}
 

	
 
impl ValueImpl for InputArrayValue {
 
    fn exact_type(&self) -> Type {
 
        Type::INPUT_ARRAY
 
    }
 
    fn is_type_compatible_hack(h: &Heap, t: &ParserType) -> bool {
 
        get_array_inner(t)
 
            .map(|v| InputValue::is_type_compatible_hack(h, &h[v]))
 
            .unwrap_or(false)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct OutputArrayValue(Vec<OutputValue>);
 

	
 
impl Display for OutputArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
            if !first {
 
                write!(f, ",")?;
 
            }
 
            write!(f, "{}", v)?;
 
            first = false;
 
        }
 
        write!(f, "}}")
 
    }
 
}
 

	
 
impl ValueImpl for OutputArrayValue {
 
    fn exact_type(&self) -> Type {
 
        Type::OUTPUT_ARRAY
 
    }
 
    fn is_type_compatible_hack(h: &Heap, t: &ParserType) -> bool {
 
        get_array_inner(t)
 
            .map(|v| OutputValue::is_type_compatible_hack(h, &h[v]))
 
            .unwrap_or(false)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct MessageArrayValue(Vec<MessageValue>);
 

	
 
impl Display for MessageArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
            if !first {
 
                write!(f, ",")?;
 
            }
 
            write!(f, "{}", v)?;
 
            first = false;
 
        }
 
        write!(f, "}}")
 
    }
 
}
 

	
 
impl ValueImpl for MessageArrayValue {
 
    fn exact_type(&self) -> Type {
 
        Type::MESSAGE_ARRAY
 
    }
 
    fn is_type_compatible_hack(h: &Heap, t: &ParserType) -> bool {
 
        get_array_inner(t)
 
            .map(|v| MessageValue::is_type_compatible_hack(h, &h[v]))
 
            .unwrap_or(false)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct BooleanArrayValue(Vec<BooleanValue>);
 

	
 
impl Display for BooleanArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
            if !first {
 
                write!(f, ",")?;
 
            }
 
            write!(f, "{}", v)?;
 
            first = false;
 
        }
 
        write!(f, "}}")
 
    }
 
}
 

	
 
impl ValueImpl for BooleanArrayValue {
 
    fn exact_type(&self) -> Type {
 
        Type::BOOLEAN_ARRAY
 
    }
 
    fn is_type_compatible_hack(h: &Heap, t: &ParserType) -> bool {
 
        get_array_inner(t)
 
            .map(|v| BooleanValue::is_type_compatible_hack(h, &h[v]))
 
            .unwrap_or(false)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ByteArrayValue(Vec<ByteValue>);
 

	
 
impl Display for ByteArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
            if !first {
 
                write!(f, ",")?;
 
            }
 
            write!(f, "{}", v)?;
 
            first = false;
 
        }
 
        write!(f, "}}")
 
    }
 
}
 

	
 
impl ValueImpl for ByteArrayValue {
 
    fn exact_type(&self) -> Type {
 
        Type::BYTE_ARRAY
 
    }
 
    fn is_type_compatible_hack(h: &Heap, t: &ParserType) -> bool {
 
        get_array_inner(t)
 
            .map(|v| ByteValue::is_type_compatible_hack(h, &h[v]))
 
            .unwrap_or(false)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct ShortArrayValue(Vec<ShortValue>);
 

	
 
impl Display for ShortArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
            if !first {
 
                write!(f, ",")?;
 
            }
 
            write!(f, "{}", v)?;
 
            first = false;
 
        }
 
        write!(f, "}}")
 
    }
 
}
 

	
 
impl ValueImpl for ShortArrayValue {
 
    fn exact_type(&self) -> Type {
 
        Type::SHORT_ARRAY
 
    }
 
    fn is_type_compatible_hack(h: &Heap, t: &ParserType) -> bool {
 
        get_array_inner(t)
 
            .map(|v| ShortValue::is_type_compatible_hack(h, &h[v]))
 
            .unwrap_or(false)
 
    }
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub struct IntArrayValue(Vec<IntValue>);
 

	
 
impl Display for IntArrayValue {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{{")?;
 
        let mut first = true;
 
        for v in self.0.iter() {
 
@@ -1326,391 +1342,388 @@ impl Store {
 
        h: &Heap,
 
        ctx: &mut EvalContext,
 
        lexpr: ExpressionId,
 
        value: Value,
 
    ) -> EvalResult {
 
        match &h[lexpr] {
 
            Expression::Variable(var) => {
 
                let var = var.declaration.unwrap();
 
                // Ensure value is compatible with type of variable
 
                let parser_type_id = match &h[var] {
 
                    Variable::Local(v) => v.parser_type,
 
                    Variable::Parameter(v) => v.parser_type
 
                };
 
                let parser_type = &h[parser_type_id];
 
                assert!(value.is_type_compatible(h, parser_type));
 
                // Overwrite mapping
 
                self.map.insert(var, value.clone());
 
                Ok(value)
 
            }
 
            Expression::Indexing(indexing) => {
 
                // Evaluate index expression, which must be some integral type
 
                let index = self.eval(h, ctx, indexing.index)?;
 
                // Mutable reference to the subject
 
                let subject;
 
                match &h[indexing.subject] {
 
                    Expression::Variable(var) => {
 
                        let var = var.declaration.unwrap();
 
                        subject = self.map.get_mut(&var).unwrap();
 
                    }
 
                    _ => unreachable!(),
 
                }
 
                match subject.set(&index, &value) {
 
                    Some(value) => Ok(value),
 
                    None => Err(EvalContinuation::Inconsistent),
 
                }
 
            }
 
            _ => unimplemented!("{:?}", h[lexpr]),
 
        }
 
    }
 
    fn get(&mut self, h: &Heap, ctx: &mut EvalContext, rexpr: ExpressionId) -> EvalResult {
 
        match &h[rexpr] {
 
            Expression::Variable(var) => {
 
                let var_id = var.declaration.unwrap();
 
                let value = self
 
                    .map
 
                    .get(&var_id)
 
                    .expect(&format!("Uninitialized variable {:?}", String::from_utf8_lossy(&var.identifier.value)));
 
                Ok(value.clone())
 
            }
 
            Expression::Indexing(indexing) => {
 
                // Evaluate index expression, which must be some integral type
 
                let index = self.eval(h, ctx, indexing.index)?;
 
                // Reference to subject
 
                let subject;
 
                match &h[indexing.subject] {
 
                    Expression::Variable(var) => {
 
                        let var = var.declaration.unwrap();
 
                        subject = self.map.get(&var).unwrap();
 
                    }
 
                    q => unreachable!("Reached {:?}", q),
 
                }
 
                match subject.get(&index) {
 
                    Some(value) => Ok(value),
 
                    None => Err(EvalContinuation::Inconsistent),
 
                }
 
            }
 
            Expression::Select(selecting) => {
 
                // Reference to subject
 
                let subject;
 
                match &h[selecting.subject] {
 
                    Expression::Variable(var) => {
 
                        let var = var.declaration.unwrap();
 
                        subject = self.map.get(&var).unwrap();
 
                    }
 
                    q => unreachable!("Reached {:?}", q),
 
                }
 
                match subject.length() {
 
                    Some(value) => Ok(value),
 
                    None => Err(EvalContinuation::Inconsistent),
 
                }
 
            }
 
            _ => unimplemented!("{:?}", h[rexpr]),
 
        }
 
    }
 
    fn eval(&mut self, h: &Heap, ctx: &mut EvalContext, expr: ExpressionId) -> EvalResult {
 
        match &h[expr] {
 
            Expression::Assignment(expr) => {
 
                let value = self.eval(h, ctx, expr.right)?;
 
                match expr.operation {
 
                    AssignmentOperator::Set => {
 
                        self.update(h, ctx, expr.left, value.clone())?;
 
                    }
 
                    AssignmentOperator::Added => {
 
                        let old = self.get(h, ctx, expr.left)?;
 
                        self.update(h, ctx, expr.left, old.plus(&value))?;
 
                    }
 
                    AssignmentOperator::Subtracted => {
 
                        let old = self.get(h, ctx, expr.left)?;
 
                        self.update(h, ctx, expr.left, old.minus(&value))?;
 
                    }
 
                    _ => unimplemented!("{:?}", expr),
 
                }
 
                Ok(value)
 
            }
 
            Expression::Conditional(expr) => {
 
                let test = self.eval(h, ctx, expr.test)?;
 
                if test.as_boolean().0 {
 
                    self.eval(h, ctx, expr.true_expression)
 
                } else {
 
                    self.eval(h, ctx, expr.false_expression)
 
                }
 
            }
 
            Expression::Binary(expr) => {
 
                let left = self.eval(h, ctx, expr.left)?;
 
                let right;
 
                match expr.operation {
 
                    BinaryOperator::LogicalAnd => {
 
                        if left.as_boolean().0 == false {
 
                            return Ok(left);
 
                        }
 
                        right = self.eval(h, ctx, expr.right)?;
 
                        right.as_boolean(); // panics if not a boolean
 
                        return Ok(right);
 
                    }
 
                    BinaryOperator::LogicalOr => {
 
                        if left.as_boolean().0 == true {
 
                            return Ok(left);
 
                        }
 
                        right = self.eval(h, ctx, expr.right)?;
 
                        right.as_boolean(); // panics if not a boolean
 
                        return Ok(right);
 
                    }
 
                    _ => {}
 
                }
 
                right = self.eval(h, ctx, expr.right)?;
 
                match expr.operation {
 
                    BinaryOperator::Equality => Ok(left.eq(&right)),
 
                    BinaryOperator::Inequality => Ok(left.neq(&right)),
 
                    BinaryOperator::LessThan => Ok(left.lt(&right)),
 
                    BinaryOperator::LessThanEqual => Ok(left.lte(&right)),
 
                    BinaryOperator::GreaterThan => Ok(left.gt(&right)),
 
                    BinaryOperator::GreaterThanEqual => Ok(left.gte(&right)),
 
                    BinaryOperator::Remainder => Ok(left.modulus(&right)),
 
                    BinaryOperator::Add => Ok(left.plus(&right)),
 
                    _ => unimplemented!("{:?}", expr.operation),
 
                }
 
            }
 
            Expression::Unary(expr) => {
 
                let mut value = self.eval(h, ctx, expr.expression)?;
 
                match expr.operation {
 
                    UnaryOperation::PostIncrement => {
 
                        self.update(h, ctx, expr.expression, value.plus(&ONE))?;
 
                    }
 
                    UnaryOperation::PreIncrement => {
 
                        value = value.plus(&ONE);
 
                        self.update(h, ctx, expr.expression, value.clone())?;
 
                    }
 
                    UnaryOperation::PostDecrement => {
 
                        self.update(h, ctx, expr.expression, value.minus(&ONE))?;
 
                    }
 
                    UnaryOperation::PreDecrement => {
 
                        value = value.minus(&ONE);
 
                        self.update(h, ctx, expr.expression, value.clone())?;
 
                    }
 
                    _ => unimplemented!(),
 
                }
 
                Ok(value)
 
            }
 
            Expression::Indexing(expr) => self.get(h, ctx, expr.this.upcast()),
 
            Expression::Slicing(_expr) => unimplemented!(),
 
            Expression::Select(expr) => self.get(h, ctx, expr.this.upcast()),
 
            Expression::Array(expr) => {
 
                let mut elements = Vec::new();
 
                for &elem in expr.elements.iter() {
 
                    elements.push(self.eval(h, ctx, elem)?);
 
                }
 
                todo!()
 
            }
 
            Expression::Constant(expr) => Ok(Value::from_constant(&expr.value)),
 
            Expression::Call(expr) => match &expr.method {
 
                Method::Get => {
 
                    assert_eq!(1, expr.arguments.len());
 
                    let value = self.eval(h, ctx, expr.arguments[0])?;
 
                    match ctx.get(value.clone()) {
 
                        None => Err(EvalContinuation::BlockGet(value)),
 
                        Some(result) => Ok(result),
 
                    }
 
                }
 
                Method::Put => {
 
                    assert_eq!(2, expr.arguments.len());
 
                    let port_value = self.eval(h, ctx, expr.arguments[0])?;
 
                    let msg_value = self.eval(h, ctx, expr.arguments[1])?;
 
                    println!("DEBUG: Handiling put({:?}, {:?})", port_value, msg_value);
 
                    if ctx.did_put(port_value.clone()) {
 
                        println!("DEBUG: Already put...");
 
                        // Return bogus, replacing this at some point anyway
 
                        Ok(Value::Message(MessageValue(None)))
 
                    } else {
 
                        println!("DEBUG: Did not yet put...");
 
                        Err(EvalContinuation::Put(port_value, msg_value))
 
                    }
 
                }
 
                Method::Fires => {
 
                    assert_eq!(1, expr.arguments.len());
 
                    let value = self.eval(h, ctx, expr.arguments[0])?;
 
                    match ctx.fires(value.clone()) {
 
                        None => Err(EvalContinuation::BlockFires(value)),
 
                        Some(result) => Ok(result),
 
                    }
 
                }
 
                Method::Create => {
 
                    assert_eq!(1, expr.arguments.len());
 
                    let length = self.eval(h, ctx, expr.arguments[0])?;
 
                    Ok(Value::create_message(length))
 
                }
 
                Method::Symbolic(_symbol) => unimplemented!(),
 
            },
 
            Expression::Variable(expr) => self.get(h, ctx, expr.this.upcast()),
 
        }
 
    }
 
}
 

	
 
type EvalResult = Result<Value, EvalContinuation>;
 
pub enum EvalContinuation {
 
    Stepping,
 
    Inconsistent,
 
    Terminal,
 
    SyncBlockStart,
 
    SyncBlockEnd,
 
    NewComponent(DefinitionId, Vec<Value>),
 
    BlockFires(Value),
 
    BlockGet(Value),
 
    Put(Value, Value),
 
}
 

	
 
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
 
pub(crate) struct Prompt {
 
    definition: DefinitionId,
 
    store: Store,
 
    position: Option<StatementId>,
 
}
 

	
 
impl Prompt {
 
    pub fn new(h: &Heap, def: DefinitionId, args: &Vec<Value>) -> Self {
 
        let mut prompt =
 
            Prompt { definition: def, store: Store::new(), position: Some((&h[def]).body()) };
 
        prompt.set_arguments(h, args);
 
        prompt
 
    }
 
    fn set_arguments(&mut self, h: &Heap, args: &Vec<Value>) {
 
        let def = &h[self.definition];
 
        let params = def.parameters();
 
        assert_eq!(params.len(), args.len());
 
        for (param, value) in params.iter().zip(args.iter()) {
 
            let hparam = &h[*param];
 
            let parser_type = &h[hparam.parser_type];
 
            assert!(value.is_type_compatible(h, parser_type));
 
            self.store.initialize(h, param.upcast(), value.clone());
 
        }
 
    }
 
    pub fn step(&mut self, h: &Heap, ctx: &mut EvalContext) -> EvalResult {
 
        if self.position.is_none() {
 
            return Err(EvalContinuation::Terminal);
 
        }
 

	
 
        let stmt = &h[self.position.unwrap()];
 
        match stmt {
 
            Statement::Block(stmt) => {
 
                // Continue to first statement
 
                self.position = Some(stmt.first());
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::Local(stmt) => {
 
                match stmt {
 
                    LocalStatement::Memory(stmt) => {
 
                        // Update store
 
                        self.store.initialize(h, stmt.variable.upcast(), Value::Unassigned);
 
                    }
 
                    LocalStatement::Channel(stmt) => {
 
                        let [from, to] = ctx.new_channel();
 
                        // Store the values in the declared variables
 
                        self.store.initialize(h, stmt.from.upcast(), from);
 
                        self.store.initialize(h, stmt.to.upcast(), to);
 
                    }
 
                }
 
                // Continue to next statement
 
                self.position = stmt.next();
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::Skip(stmt) => {
 
                // Continue to next statement
 
                self.position = stmt.next;
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::Labeled(stmt) => {
 
                // Continue to next statement
 
                self.position = Some(stmt.body);
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::If(stmt) => {
 
                // Evaluate test
 
                let value = self.store.eval(h, ctx, stmt.test)?;
 
                // Continue with either branch
 
                if value.as_boolean().0 {
 
                    self.position = Some(stmt.true_body);
 
                } else {
 
                    self.position = Some(stmt.false_body);
 
                }
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::EndIf(stmt) => {
 
                // Continue to next statement
 
                self.position = stmt.next;
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::While(stmt) => {
 
                // Evaluate test
 
                let value = self.store.eval(h, ctx, stmt.test)?;
 
                // Either continue with body, or go to next
 
                if value.as_boolean().0 {
 
                    self.position = Some(stmt.body);
 
                } else {
 
                    self.position = stmt.end_while.map(|x| x.upcast());
 
                }
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::EndWhile(stmt) => {
 
                // Continue to next statement
 
                self.position = stmt.next;
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::Synchronous(stmt) => {
 
                // Continue to next statement, and signal upward
 
                self.position = Some(stmt.body);
 
                Err(EvalContinuation::SyncBlockStart)
 
            }
 
            Statement::EndSynchronous(stmt) => {
 
                // Continue to next statement, and signal upward
 
                self.position = stmt.next;
 
                Err(EvalContinuation::SyncBlockEnd)
 
            }
 
            Statement::Break(stmt) => {
 
                // Continue to end of while
 
                self.position = stmt.target.map(EndWhileStatementId::upcast);
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::Continue(stmt) => {
 
                // Continue to beginning of while
 
                self.position = stmt.target.map(WhileStatementId::upcast);
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::Assert(stmt) => {
 
                // Evaluate expression
 
                let value = self.store.eval(h, ctx, stmt.expression)?;
 
                if value.as_boolean().0 {
 
                    // Continue to next statement
 
                    self.position = stmt.next;
 
                    Err(EvalContinuation::Stepping)
 
                } else {
 
                    // Assertion failed: inconsistent
 
                    Err(EvalContinuation::Inconsistent)
 
                }
 
            }
 
            Statement::Return(stmt) => {
 
                // Evaluate expression
 
                let value = self.store.eval(h, ctx, stmt.expression)?;
 
                // Done with evaluation
 
                Ok(value)
 
            }
 
            Statement::Goto(stmt) => {
 
                // Continue to target
 
                self.position = stmt.target.map(|x| x.upcast());
 
                Err(EvalContinuation::Stepping)
 
            }
 
            Statement::New(stmt) => {
 
                let expr = &h[stmt.expression];
 
                let mut args = Vec::new();
 
                for &arg in expr.arguments.iter() {
 
                    let value = self.store.eval(h, ctx, arg)?;
 
                    args.push(value);
 
                }
 
                self.position = stmt.next;
 
                match &expr.method {
 
                    Method::Symbolic(symbolic) => {
 
                         Err(EvalContinuation::NewComponent(symbolic.definition.unwrap(), args))
 
                    },
 
                    _ => unreachable!("not a symbolic call expression")
 
                }
 
            }
 
            Statement::Expression(stmt) => {
 
                // Evaluate expression
src/protocol/lexer.rs
Show inline comments
 
@@ -2266,543 +2266,192 @@ impl Lexer<'_> {
 
            let mut value = Vec::new();
 
            let mut ident = self.consume_ident()?;
 
            value.append(&mut ident);
 
            while self.has_string(b".") {
 
                self.consume_string(b".")?;
 
                value.push(b'.');
 
                ident = self.consume_ident()?;
 
                value.append(&mut ident);
 
            }
 
            return Ok(h.alloc_pragma(|this| Pragma::Module(PragmaModule{
 
                this, position, value
 
            })));
 
        } else {
 
            return Err(self.error_at_pos("Unknown pragma"));
 
        }
 
    }
 

	
 
    fn has_import(&self) -> bool {
 
        self.has_keyword(b"import")
 
    }
 
    fn consume_import(&mut self, h: &mut Heap) -> Result<ImportId, ParseError2> {
 
        // Parse the word "import" and the name of the module
 
        let position = self.source.pos();
 
        self.consume_keyword(b"import")?;
 
        self.consume_whitespace(true)?;
 
        let mut value = Vec::new();
 
        let mut ident = self.consume_ident()?;
 
        value.append(&mut ident);
 
        let mut last_ident_start = 0;
 

	
 
        while self.has_string(b".") {
 
            self.consume_string(b".")?;
 
            value.push(b'.');
 
            ident = self.consume_ident()?;
 
            last_ident_start = value.len();
 
            value.append(&mut ident);
 
        }
 

	
 

	
 
        self.consume_whitespace(false)?;
 

	
 
        // Check for the potential aliasing or specific module imports
 
        let import = if self.has_string(b"as") {
 
            self.consume_string(b"as")?;
 
            self.consume_whitespace(true)?;
 
            let alias = self.consume_ident()?;
 

	
 
            h.alloc_import(|this| Import::Module(ImportModule{
 
                this,
 
                position,
 
                module_name: value,
 
                alias,
 
                module_id: None,
 
            }))
 
        } else if self.has_string(b"::") {
 
            self.consume_string(b"::")?;
 
            self.consume_whitespace(false)?;
 

	
 
            if let Some(b'{') = self.source.next() {
 
                // Import specific symbols, optionally with an alias
 
                self.source.consume();
 
                self.consume_whitespace(false)?;
 

	
 
                let mut symbols = Vec::new();
 
                let mut next = self.source.next();
 

	
 
                while next.is_some() {
 
                    let char = next.unwrap();
 
                    if char == b'}' {
 
                        break;
 
                    }
 

	
 
                    let symbol_position = self.source.pos();
 
                    let symbol_name = self.consume_ident()?;
 
                    self.consume_whitespace(false)?;
 
                    if self.has_string(b"as") {
 
                        // Symbol has an alias
 
                        self.consume_string(b"as")?;
 
                        self.consume_whitespace(true)?;
 
                        let symbol_alias = self.consume_ident()?;
 

	
 
                        symbols.push(AliasedSymbol{
 
                            position: symbol_position,
 
                            name: symbol_name,
 
                            alias: symbol_alias,
 
                            definition_id: None,
 
                        });
 
                    } else {
 
                        // Symbol does not have an alias
 
                        symbols.push(AliasedSymbol{
 
                            position: symbol_position,
 
                            name: symbol_name.clone(),
 
                            alias: symbol_name,
 
                            definition_id: None,
 
                        });
 
                    }
 

	
 
                    // A comma indicates that we may have another symbol coming
 
                    // up (not necessary), but if not present then we expect the
 
                    // end of the symbol list
 
                    self.consume_whitespace(false)?;
 

	
 
                    next = self.source.next();
 
                    if let Some(b',') = next {
 
                        self.source.consume();
 
                        self.consume_whitespace(false)?;
 
                        next = self.source.next();
 
                    } else {
 
                        break;
 
                    }
 
                }
 

	
 
                if let Some(b'}') = next {
 
                    // We are fine, push the imported symbols
 
                    self.source.consume();
 
                    if symbols.is_empty() {
 
                        return Err(ParseError2::new_error(self.source, position, "empty symbol import list"));
 
                    }
 

	
 
                    h.alloc_import(|this| Import::Symbols(ImportSymbols{
 
                        this,
 
                        position,
 
                        module_name: value,
 
                        module_id: None,
 
                        symbols,
 
                    }))
 
                } else {
 
                    return Err(self.error_at_pos("Expected '}'"));
 
                }
 
            } else if let Some(b'*') = self.source.next() {
 
                // Import all symbols without alias
 
                self.source.consume();
 
                h.alloc_import(|this| Import::Symbols(ImportSymbols{
 
                    this,
 
                    position,
 
                    module_name: value,
 
                    module_id: None,
 
                    symbols: Vec::new()
 
                }))
 
            } else {
 
                return Err(self.error_at_pos("Expected '*' or '{'"));
 
            }
 
        } else {
 
            // No explicit alias or subimports, so implicit alias
 
            let alias = Vec::from(&value[last_ident_start..]);
 
            h.alloc_import(|this| Import::Module(ImportModule{
 
                this,
 
                position,
 
                module_name: value,
 
                alias,
 
                module_id: None,
 
            }))
 
        };
 

	
 
        self.consume_whitespace(false)?;
 
        self.consume_string(b";")?;
 
        Ok(import)
 
    }
 
    pub fn consume_protocol_description(&mut self, h: &mut Heap) -> Result<RootId, ParseError2> {
 
        let position = self.source.pos();
 
        let mut pragmas = Vec::new();
 
        let mut imports = Vec::new();
 
        let mut definitions = Vec::new();
 
        self.consume_whitespace(false)?;
 
        while self.has_pragma() {
 
            let pragma = self.consume_pragma(h)?;
 
            pragmas.push(pragma);
 
            self.consume_whitespace(false)?;
 
        }
 
        while self.has_import() {
 
            let import = self.consume_import(h)?;
 
            imports.push(import);
 
            self.consume_whitespace(false)?;
 
        }
 
        while self.has_symbol_definition() {
 
            let def = self.consume_symbol_definition(h)?;
 
            definitions.push(def);
 
            self.consume_whitespace(false)?;
 
        }
 
        // end of file
 
        if !self.source.is_eof() {
 
            return Err(self.error_at_pos("Expected end of file"));
 
        }
 
        Ok(h.alloc_protocol_description(|this| Root {
 
            this,
 
            position,
 
            pragmas,
 
            imports,
 
            definitions,
 
        }))
 
    }
 
}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use crate::protocol::ast::*;
 
    use crate::protocol::lexer::*;
 
    use crate::protocol::inputsource::*;
 

	
 
    #[derive(Debug, Eq, PartialEq)]
 
    enum ParserTypeClass {
 
        Message, Bool, Byte, Short, Int, Long, String, Array, Nope
 
    }
 
    impl ParserTypeClass {
 
        fn from(v: &ParserType) -> ParserTypeClass {
 
            use ParserTypeVariant as PTV;
 
            use ParserTypeClass as PTC;
 
            match &v.variant {
 
                PTV::Message => PTC::Message,
 
                PTV::Bool => PTC::Bool,
 
                PTV::Byte => PTC::Byte,
 
                PTV::Short => PTC::Short,
 
                PTV::Int => PTC::Int,
 
                PTV::Long => PTC::Long,
 
                PTV::String => PTC::String,
 
                PTV::Array(_) => PTC::Array,
 
                _ => PTC::Nope,
 
            }
 
        }
 
    }
 

	
 
    #[test]
 
    fn test_pragmas() {
 
        let mut h = Heap::new();
 
        let mut input = InputSource::from_string("
 
        #version 0o7777
 
        #module something.dot.separated
 
        ").expect("new InputSource");
 
        let mut lex = Lexer::new(&mut input);
 
        let lexed = lex.consume_protocol_description(&mut h)
 
            .expect("lex input source");
 
        let root = &h[lexed];
 
        assert_eq!(root.pragmas.len(), 2);
 
        let pv = &h[root.pragmas[0]];
 
        let pm = &h[root.pragmas[1]];
 

	
 
        if let Pragma::Version(v) = pv {
 
            assert_eq!(v.version, 0o7777)
 
        } else {
 
            assert!(false, "first pragma not version");
 
        }
 
        if let Pragma::Module(m) = pm {
 
            assert_eq!(m.value, b"something.dot.separated");
 
        } else {
 
            assert!(false, "second pragma not module");
 
        }
 
    }
 

	
 
    #[test]
 
    fn test_import() {
 
        let mut h = Heap::new();
 
        let mut input = InputSource::from_string("
 
        // Module imports, with optional and explicit aliasing
 
        import single_module;
 
        import std.reo;
 
        import something.other as alias;
 
        // Symbol imports
 
        import some_module::*;
 
        import some_module::{Foo as Bar, Qux, Dix as Flu};
 
        import std.reo::{
 
            Foo as Bar, // because thing
 
            Qux as Mox, // more explanations
 
            Dix, /* yesh, import me */
 
        };
 
        ").unwrap();
 
        let mut lex = Lexer::new(&mut input);
 
        let lexed = lex.consume_protocol_description(&mut h).unwrap();
 
        let root = &h[lexed];
 
        assert_eq!(root.imports.len(), 6);
 
        let no_alias_single = h[root.imports[0]].as_module();
 
        let no_alias_multi = h[root.imports[1]].as_module();
 
        let with_alias = h[root.imports[2]].as_module();
 

	
 
        assert_eq!(no_alias_single.module_name, b"single_module");
 
        assert_eq!(no_alias_single.alias, b"single_module");
 
        assert_eq!(no_alias_multi.module_name, b"std.reo");
 
        assert_eq!(no_alias_multi.alias, b"reo");
 
        assert_eq!(with_alias.module_name, b"something.other");
 
        assert_eq!(with_alias.alias, b"alias");
 

	
 
        let all_symbols = h[root.imports[3]].as_symbols();
 
        let single_line_symbols = h[root.imports[4]].as_symbols();
 
        let multi_line_symbols = h[root.imports[5]].as_symbols();
 

	
 
        assert_eq!(all_symbols.module_name, b"some_module");
 
        assert!(all_symbols.symbols.is_empty());
 
        assert_eq!(single_line_symbols.module_name, b"some_module");
 
        assert_eq!(single_line_symbols.symbols.len(), 3);
 
        assert_eq!(single_line_symbols.symbols[0].name, b"Foo");
 
        assert_eq!(single_line_symbols.symbols[0].alias, b"Bar");
 
        assert_eq!(single_line_symbols.symbols[1].name, b"Qux");
 
        assert_eq!(single_line_symbols.symbols[1].alias, b"Qux");
 
        assert_eq!(single_line_symbols.symbols[2].name, b"Dix");
 
        assert_eq!(single_line_symbols.symbols[2].alias, b"Flu");
 
        assert_eq!(multi_line_symbols.module_name, b"std.reo");
 
        assert_eq!(multi_line_symbols.symbols.len(), 3);
 
        assert_eq!(multi_line_symbols.symbols[0].name, b"Foo");
 
        assert_eq!(multi_line_symbols.symbols[0].alias, b"Bar");
 
        assert_eq!(multi_line_symbols.symbols[1].name, b"Qux");
 
        assert_eq!(multi_line_symbols.symbols[1].alias, b"Mox");
 
        assert_eq!(multi_line_symbols.symbols[2].name, b"Dix");
 
        assert_eq!(multi_line_symbols.symbols[2].alias, b"Dix");
 
    }
 

	
 
    #[test]
 
    fn test_struct_definition() {
 
        let mut h = Heap::new();
 
        let mut input = InputSource::from_string("
 
        struct Foo {
 
            byte one,
 
            short two,
 
            Bar three,
 
        }
 
        struct Bar{int[] one, int[] two, Qux[] three}
 
        ").unwrap();
 
        let mut lex = Lexer::new(&mut input);
 
        let lexed = lex.consume_protocol_description(&mut h);
 
        if let Err(err) = &lexed {
 
            println!("{}", err);
 
        }
 
        let lexed = lexed.unwrap();
 
        let root = &h[lexed];
 

	
 
        assert_eq!(root.definitions.len(), 2);
 

	
 
        // let symbolic_type = |v: &PrimitiveType| -> Vec<u8> {
 
        //     if let PrimitiveType::Symbolic(v) = v {
 
        //         v.identifier.value.clone()
 
        //     } else {
 
        //         assert!(false);
 
        //         unreachable!();
 
        //     }
 
        // };
 

	
 
        let foo_def = h[root.definitions[0]].as_struct();
 
        assert_eq!(foo_def.identifier.value, b"Foo");
 
        assert_eq!(foo_def.fields.len(), 3);
 
        assert_eq!(foo_def.fields[0].field.value, b"one");
 
        assert_eq!(ParserTypeClass::from(&h[foo_def.fields[0].parser_type]), ParserTypeClass::Byte);
 
        assert_eq!(foo_def.fields[1].field.value, b"two");
 
        assert_eq!(ParserTypeClass::from(&h[foo_def.fields[1].parser_type]), ParserTypeClass::Short);
 
        assert_eq!(foo_def.fields[2].field.value, b"three");
 
        // assert_eq!(
 
        //     symbolic_type(&h[foo_def.fields[2].the_type].the_type.primitive),
 
        //     Vec::from("Bar".as_bytes())
 
        // );
 

	
 
        let bar_def = h[root.definitions[1]].as_struct();
 
        assert_eq!(bar_def.identifier.value, b"Bar");
 
        assert_eq!(bar_def.fields.len(), 3);
 
        assert_eq!(bar_def.fields[0].field.value, b"one");
 
        assert_eq!(ParserTypeClass::from(&h[bar_def.fields[0].parser_type]), ParserTypeClass::Array);
 
        assert_eq!(bar_def.fields[1].field.value, b"two");
 
        assert_eq!(ParserTypeClass::from(&h[bar_def.fields[1].parser_type]), ParserTypeClass::Array);
 
        assert_eq!(bar_def.fields[2].field.value, b"three");
 
        assert_eq!(ParserTypeClass::from(&h[bar_def.fields[2].parser_type]), ParserTypeClass::Array);
 
        // assert_eq!(
 
        //     symbolic_type(&h[bar_def.fields[2].parser_type].the_type.primitive),
 
        //     Vec::from("Qux".as_bytes())
 
        // );
 
    }
 

	
 
    #[test]
 
    fn test_enum_definition() {
 
        let mut h = Heap::new();
 
        let mut input = InputSource::from_string("
 
        enum Foo {
 
            A = 0,
 
            B = 5,
 
            C,
 
            D = 0xFF,
 
        }
 
        enum Bar { Ayoo, Byoo, Cyoo,}
 
        enum Qux { A(byte[]), B(Bar[]), C(byte)
 
        }
 
        ").unwrap();
 
        let mut lex = Lexer::new(&mut input);
 
        let lexed = lex.consume_protocol_description(&mut h).unwrap();
 
        let root = &h[lexed];
 

	
 
        assert_eq!(root.definitions.len(), 3);
 

	
 
        let foo_def = h[root.definitions[0]].as_enum();
 
        assert_eq!(foo_def.identifier.value, b"Foo");
 
        assert_eq!(foo_def.variants.len(), 4);
 
        assert_eq!(foo_def.variants[0].identifier.value, b"A");
 
        assert_eq!(foo_def.variants[0].value, EnumVariantValue::Integer(0));
 
        assert_eq!(foo_def.variants[1].identifier.value, b"B");
 
        assert_eq!(foo_def.variants[1].value, EnumVariantValue::Integer(5));
 
        assert_eq!(foo_def.variants[2].identifier.value, b"C");
 
        assert_eq!(foo_def.variants[2].value, EnumVariantValue::None);
 
        assert_eq!(foo_def.variants[3].identifier.value, b"D");
 
        assert_eq!(foo_def.variants[3].value, EnumVariantValue::Integer(0xFF));
 

	
 
        let bar_def = h[root.definitions[1]].as_enum();
 
        assert_eq!(bar_def.identifier.value, b"Bar");
 
        assert_eq!(bar_def.variants.len(), 3);
 
        assert_eq!(bar_def.variants[0].identifier.value, b"Ayoo");
 
        assert_eq!(bar_def.variants[0].value, EnumVariantValue::None);
 
        assert_eq!(bar_def.variants[1].identifier.value, b"Byoo");
 
        assert_eq!(bar_def.variants[1].value, EnumVariantValue::None);
 
        assert_eq!(bar_def.variants[2].identifier.value, b"Cyoo");
 
        assert_eq!(bar_def.variants[2].value, EnumVariantValue::None);
 

	
 
        let qux_def = h[root.definitions[2]].as_enum();
 
        let enum_type = |value: &EnumVariantValue| -> &ParserType {
 
            if let EnumVariantValue::Type(t) = value {
 
                &h[*t]
 
            } else {
 
                assert!(false);
 
                unreachable!();
 
            }
 
        };
 
        assert_eq!(qux_def.identifier.value, b"Qux");
 
        assert_eq!(qux_def.variants.len(), 3);
 
        assert_eq!(qux_def.variants[0].identifier.value, b"A");
 
        assert_eq!(ParserTypeClass::from(enum_type(&qux_def.variants[0].value)), ParserTypeClass::Array);
 
        assert_eq!(qux_def.variants[1].identifier.value, b"B");
 
        assert_eq!(ParserTypeClass::from(enum_type(&qux_def.variants[1].value)), ParserTypeClass::Array);
 
        // if let PrimitiveType::Symbolic(t) = &enum_type(&qux_def.variants[1].value).the_type.primitive {
 
        //     assert_eq!(t.identifier.value, Vec::from("Bar".as_bytes()));
 
        // } else { assert!(false) }
 

	
 
        assert_eq!(qux_def.variants[2].identifier.value, b"C");
 
        assert_eq!(ParserTypeClass::from(enum_type(&qux_def.variants[2].value)), ParserTypeClass::Byte);
 
    }
 

	
 
//     #[test]
 
//     fn test_lowercase() {
 
//         assert_eq!(lowercase(b'a'), b'a');
 
//         assert_eq!(lowercase(b'A'), b'a');
 
//         assert_eq!(lowercase(b'z'), b'z');
 
//         assert_eq!(lowercase(b'Z'), b'z');
 
//     }
 

	
 
//     #[test]
 
//     fn test_basic_expression() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string("a+b;").unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_expression(&mut h) {
 
//             Ok(expr) => {
 
//                 println!("{:?}", expr);
 
//                 if let Binary(bin) = &h[expr] {
 
//                     if let Variable(left) = &h[bin.left] {
 
//                         if let Variable(right) = &h[bin.right] {
 
//                             assert_eq!("a", format!("{}", h[left.identifier]));
 
//                             assert_eq!("b", format!("{}", h[right.identifier]));
 
//                             assert_eq!(Some(b';'), is.next());
 
//                             return;
 
//                         }
 
//                     }
 
//                 }
 
//                 assert!(false);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 

	
 
//     #[test]
 
//     fn test_paren_expression() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string("(true)").unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_paren_expression(&mut h) {
 
//             Ok(expr) => {
 
//                 println!("{:#?}", expr);
 
//                 if let Constant(con) = &h[expr] {
 
//                     if let ast::Constant::True = con.value {
 
//                         return;
 
//                     }
 
//                 }
 
//                 assert!(false);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 

	
 
//     #[test]
 
//     fn test_expression() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string("(x(1+5,get(y))-w[5])+z++\n").unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_expression(&mut h) {
 
//             Ok(expr) => {
 
//                 println!("{:#?}", expr);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 

	
 
//     #[test]
 
//     fn test_basic_statement() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string("while (true) { skip; }").unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_statement(&mut h) {
 
//             Ok(stmt) => {
 
//                 println!("{:#?}", stmt);
 
//                 if let Statement::While(w) = &h[stmt] {
 
//                     if let Expression::Constant(_) = h[w.test] {
 
//                         if let Statement::Block(_) = h[w.body] {
 
//                             return;
 
//                         }
 
//                     }
 
//                 }
 
//                 assert!(false);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 

	
 
//     #[test]
 
//     fn test_statement() {
 
//         let mut h = Heap::new();
 
//         let mut is = InputSource::from_string(
 
//             "label: while (true) { if (x++ > y[0]) break label; else continue; }\n",
 
//         )
 
//         .unwrap();
 
//         let mut lex = Lexer::new(&mut is);
 
//         match lex.consume_statement(&mut h) {
 
//             Ok(stmt) => {
 
//                 println!("{:#?}", stmt);
 
//             }
 
//             Err(err) => {
 
//                 err.print(&is);
 
//                 assert!(false);
 
//             }
 
//         }
 
//     }
 
}
src/protocol/parser/mod.rs
Show inline comments
 
@@ -36,359 +36,359 @@ pub struct Parser {
 
}
 

	
 
impl Parser {
 
    pub fn new() -> Self {
 
        Parser{
 
            heap: Heap::new(),
 
            modules: Vec::new(),
 
            module_lookup: HashMap::new()
 
        }
 
    }
 

	
 
    // TODO: @fix, temporary implementation to keep code compilable
 
    pub fn new_with_source(source: InputSource) -> Result<Self, ParseError2> {
 
        let mut parser = Parser::new();
 
        parser.feed(source)?;
 
        Ok(parser)
 
    }
 

	
 
    pub fn feed(&mut self, mut source: InputSource) -> Result<RootId, ParseError2> {
 
        // Lex the input source
 
        let mut lex = Lexer::new(&mut source);
 
        let pd = lex.consume_protocol_description(&mut self.heap)?;
 

	
 
        // Seek the module name and version
 
        let root = &self.heap[pd];
 
        let mut module_name_pos = InputPosition::default();
 
        let mut module_name = Vec::new();
 
        let mut module_version_pos = InputPosition::default();
 
        let mut module_version = None;
 

	
 
        for pragma in &root.pragmas {
 
            match &self.heap[*pragma] {
 
                Pragma::Module(module) => {
 
                    if !module_name.is_empty() {
 
                        return Err(
 
                            ParseError2::new_error(&source, module.position, "Double definition of module name in the same file")
 
                                .with_postfixed_info(&source, module_name_pos, "Previous definition was here")
 
                        )
 
                    }
 

	
 
                    module_name_pos = module.position.clone();
 
                    module_name = module.value.clone();
 
                },
 
                Pragma::Version(version) => {
 
                    if module_version.is_some() {
 
                        return Err(
 
                            ParseError2::new_error(&source, version.position, "Double definition of module version")
 
                                .with_postfixed_info(&source, module_version_pos, "Previous definition was here")
 
                        )
 
                    }
 

	
 
                    module_version_pos = version.position.clone();
 
                    module_version = Some(version.version);
 
                },
 
            }
 
        }
 

	
 
        // Add module to list of modules and prevent naming conflicts
 
        let cur_module_idx = self.modules.len();
 
        if let Some(prev_module_idx) = self.module_lookup.get(&module_name) {
 
            // Find `#module` statement in other module again
 
            let prev_module = &self.modules[*prev_module_idx];
 
            let prev_module_pos = self.heap[prev_module.root_id].pragmas
 
                .iter()
 
                .find_map(|p| {
 
                    match &self.heap[*p] {
 
                        Pragma::Module(module) => Some(module.position.clone()),
 
                        _ => None
 
                    }
 
                })
 
                .unwrap_or(InputPosition::default());
 

	
 
            let module_name_msg = if module_name.is_empty() {
 
                format!("a nameless module")
 
            } else {
 
                format!("module '{}'", String::from_utf8_lossy(&module_name))
 
            };
 

	
 
            return Err(
 
                ParseError2::new_error(&source, module_name_pos, &format!("Double definition of {} across files", module_name_msg))
 
                    .with_postfixed_info(&prev_module.source, prev_module_pos, "Other definition was here")
 
            );
 
        }
 

	
 
        self.modules.push(LexedModule{
 
            source,
 
            module_name: module_name.clone(),
 
            version: module_version,
 
            root_id: pd
 
        });
 
        self.module_lookup.insert(module_name, cur_module_idx);
 
        Ok(pd)
 
    }
 

	
 
    pub fn compile(&mut self) {
 
        // Build module lookup
 
    }
 

	
 
    fn resolve_symbols_and_types(&mut self) -> Result<(SymbolTable, TypeTable), ParseError2> {
 
        // Construct the symbol table to resolve any imports and/or definitions,
 
        // then use the symbol table to actually annotate all of the imports.
 
        // If the type table is constructed correctly then all imports MUST be
 
        // resolvable.
 
        // TODO: Update once namespaced identifiers are implemented
 
        let symbol_table = SymbolTable::new(&self.heap, &self.modules)?;
 

	
 
        // Not pretty, but we need to work around rust's borrowing rules, it is
 
        // totally safe to mutate the contents of an AST element that we are
 
        // not borrowing anywhere else.
 
        // TODO: Maybe directly access heap's members to allow borrowing from
 
        //  mutliple members of Heap? Not pretty though...
 
        let mut module_index = 0;
 
        let mut import_index = 0;
 
        loop {
 
            if module_index >= self.modules.len() {
 
                break;
 
            }
 

	
 
            let module_root_id = self.modules[module_index].root_id;
 
            let import_id = {
 
                let root = &self.heap[module_root_id];
 
                if import_index >= root.imports.len() {
 
                    module_index += 1;
 
                    import_index = 0;
 
                    continue
 
                }
 
                root.imports[import_index]
 
            };
 

	
 
            let import = &mut self.heap[import_id];
 
            match import {
 
                Import::Module(import) => {
 
                    debug_assert!(import.module_id.is_none(), "module import already resolved");
 
                    let target_module_id = symbol_table.resolve_module(&import.module_name)
 
                        .expect("module import is resolved by symbol table");
 
                    import.module_id = Some(target_module_id)
 
                },
 
                Import::Symbols(import) => {
 
                    debug_assert!(import.module_id.is_none(), "module of symbol import already resolved");
 
                    let target_module_id = symbol_table.resolve_module(&import.module_name)
 
                        .expect("symbol import's module is resolved by symbol table");
 
                    import.module_id = Some(target_module_id);
 

	
 
                    for symbol in &mut import.symbols {
 
                        debug_assert!(symbol.definition_id.is_none(), "symbol import already resolved");
 
                        let (_, target_definition_id) = symbol_table.resolve_symbol(module_root_id, &symbol.alias)
 
                            .expect("symbol import is resolved by symbol table")
 
                            .as_definition()
 
                            .expect("symbol import does not resolve to namespace symbol");
 
                        symbol.definition_id = Some(target_definition_id);
 
                    }
 
                }
 
            }
 
        }
 

	
 
        // All imports in the AST are now annotated. We now use the symbol table
 
        // to construct the type table.
 
        let mut type_ctx = TypeCtx::new(&symbol_table, &mut self.heap, &self.modules);
 
        let type_table = TypeTable::new(&mut type_ctx)?;
 

	
 
        Ok((symbol_table, type_table))
 
    }
 

	
 
    // TODO: @fix, temporary impl to keep code compilable
 
    pub fn parse(&mut self) -> Result<RootId, ParseError2> {
 
        assert_eq!(self.modules.len(), 1, "Fix meeeee");
 
        let root_id = self.modules[0].root_id;
 

	
 
        let (mut symbol_table, mut type_table) = self.resolve_symbols_and_types()?;
 

	
 
        // TODO: @cleanup
 
        let mut ctx = visitor::Ctx{
 
            heap: &mut self.heap,
 
            module: &self.modules[0],
 
            symbols: &mut symbol_table,
 
            types: &mut type_table,
 
        };
 
        let mut visit = ValidityAndLinkerVisitor::new();
 
        visit.visit_module(&mut ctx)?;
 
        let mut type_visit = TypeResolvingVisitor::new();
 
        let mut queue = ResolveQueue::new();
 
        TypeResolvingVisitor::queue_module_definitions(&ctx, &mut queue);
 
        while !queue.is_empty() {
 
            let top = queue.pop().unwrap();
 
            println!("Resolving root={}, def={}, mono={:?}", top.root_id.index, top.definition_id.index, top.monomorph_types);
 
            type_visit.handle_module_definition(&mut ctx, &mut queue, top)?;
 
        }
 

	
 
        if let Err((position, message)) = Self::parse_inner(&mut self.heap, root_id) {
 
            return Err(ParseError2::new_error(&self.modules[0].source, position, &message))
 
        }
 

	
 
        let mut writer = ASTWriter::new();
 
        let mut file = std::fs::File::create(std::path::Path::new("ast.txt")).unwrap();
 
        writer.write_ast(&mut file, &self.heap);
 
        // let mut writer = ASTWriter::new();
 
        // let mut file = std::fs::File::create(std::path::Path::new("ast.txt")).unwrap();
 
        // writer.write_ast(&mut file, &self.heap);
 

	
 
        Ok(root_id)
 
    }
 

	
 
    pub fn parse_inner(h: &mut Heap, pd: RootId) -> VisitorResult {
 
        // TODO: @cleanup, slowly phasing out old compiler
 
        // NestedSynchronousStatements::new().visit_protocol_description(h, pd)?;
 
        // ChannelStatementOccurrences::new().visit_protocol_description(h, pd)?;
 
        // FunctionStatementReturns::new().visit_protocol_description(h, pd)?;
 
        // ComponentStatementReturnNew::new().visit_protocol_description(h, pd)?;
 
        // CheckBuiltinOccurrences::new().visit_protocol_description(h, pd)?;
 
        // BuildSymbolDeclarations::new().visit_protocol_description(h, pd)?;
 
        // LinkCallExpressions::new().visit_protocol_description(h, pd)?;
 
        // BuildScope::new().visit_protocol_description(h, pd)?;
 
        // ResolveVariables::new().visit_protocol_description(h, pd)?;
 
        LinkStatements::new().visit_protocol_description(h, pd)?;
 
        // BuildLabels::new().visit_protocol_description(h, pd)?;
 
        // ResolveLabels::new().visit_protocol_description(h, pd)?;
 
        AssignableExpressions::new().visit_protocol_description(h, pd)?;
 
        IndexableExpressions::new().visit_protocol_description(h, pd)?;
 
        SelectableExpressions::new().visit_protocol_description(h, pd)?;
 

	
 
        Ok(())
 
    }
 
}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use std::fs::File;
 
    use std::io::Read;
 
    use std::path::Path;
 

	
 
    use super::*;
 

	
 
    // #[test]
 
    fn positive_tests() {
 
        for resource in TestFileIter::new("testdata/parser/positive", "pdl") {
 
            let resource = resource.expect("read testdata filepath");
 
            // println!(" * running: {}", &resource);
 
            let path = Path::new(&resource);
 
            let source = InputSource::from_file(&path).unwrap();
 
            // println!("DEBUG -- input:\n{}", String::from_utf8_lossy(&source.input));
 
            let mut parser = Parser::new_with_source(source).expect("parse source");
 
            match parser.parse() {
 
                Ok(_) => {}
 
                Err(err) => {
 
                    println!(" > file: {}", &resource);
 
                    println!("{}", err);
 
                    assert!(false);
 
                }
 
            }
 
        }
 
    }
 

	
 
    // #[test]
 
    fn negative_tests() {
 
        for resource in TestFileIter::new("testdata/parser/negative", "pdl") {
 
            let resource = resource.expect("read testdata filepath");
 
            let path = Path::new(&resource);
 
            let expect = path.with_extension("txt");
 
            let mut source = InputSource::from_file(&path).unwrap();
 
            let mut parser = Parser::new_with_source(source).expect("construct parser");
 
            match parser.parse() {
 
                Ok(pd) => {
 
                    println!("Expected parse error:");
 

	
 
                    let mut cev: Vec<u8> = Vec::new();
 
                    let mut f = File::open(expect).unwrap();
 
                    f.read_to_end(&mut cev).unwrap();
 
                    println!("{}", String::from_utf8_lossy(&cev));
 
                    assert!(false);
 
                }
 
                Err(err) => {
 
                    let expected = format!("{}", err);
 
                    println!("{}", &expected);
 

	
 
                    let mut cev: Vec<u8> = Vec::new();
 
                    let mut f = File::open(expect).unwrap();
 
                    f.read_to_end(&mut cev).unwrap();
 
                    println!("{}", String::from_utf8_lossy(&cev));
 

	
 
                    assert_eq!(expected.as_bytes(), cev);
 
                }
 
            }
 
        }
 
    }
 

	
 
    // #[test]
 
    fn counterexample_tests() {
 
        for resource in TestFileIter::new("testdata/parser/counterexamples", "pdl") {
 
            let resource = resource.expect("read testdata filepath");
 
            let path = Path::new(&resource);
 
            let source = InputSource::from_file(&path).unwrap();
 
            let mut parser = Parser::new_with_source(source).expect("construct parser");
 

	
 
            fn print_header(s: &str) {
 
                println!("{}", "=".repeat(80));
 
                println!(" > File: {}", s);
 
                println!("{}", "=".repeat(80));
 
            }
 

	
 
            match parser.parse() {
 
                Ok(parsed) => {
 
                    print_header(&resource);
 
                    println!("\n  SUCCESS\n\n --- source:\n{}", String::from_utf8_lossy(&parser.modules[0].source.input));
 
                },
 
                Err(err) => {
 
                    print_header(&resource);
 
                    println!(
 
                        "\n  FAILURE\n\n --- error:\n{}\n --- source:\n{}",
 
                        err,
 
                        String::from_utf8_lossy(&parser.modules[0].source.input)
 
                    )
 
                }
 
            }
 
        }
 
    }
 

	
 
    struct TestFileIter {
 
        iter: std::fs::ReadDir,
 
        root: String,
 
        extension: String
 
    }
 

	
 
    impl TestFileIter {
 
        fn new(root_dir: &str, extension: &str) -> Self {
 
            let path = Path::new(root_dir);
 
            assert!(path.is_dir(), "root '{}' is not a directory", root_dir);
 

	
 
            let iter = std::fs::read_dir(path).expect("list dir contents");
 

	
 
            Self {
 
                iter,
 
                root: root_dir.to_string(),
 
                extension: extension.to_string(),
 
            }
 
        }
 
    }
 

	
 
    impl Iterator for TestFileIter {
 
        type Item = Result<String, String>;
 

	
 
        fn next(&mut self) -> Option<Self::Item> {
 
            while let Some(entry) = self.iter.next() {
 
                if let Err(e) = entry {
 
                    return Some(Err(format!("failed to read dir entry, because: {}", e)));
 
                }
 
                let entry = entry.unwrap();
 

	
 
                let path = entry.path();
 
                if !path.is_file() { continue; }
 

	
 
                let extension = path.extension();
 
                if extension.is_none() { continue; }
 
                let extension = extension.unwrap().to_string_lossy();
 
                if extension != self.extension { continue; }
 

	
 
                return Some(Ok(path.to_string_lossy().to_string()));
 
            }
 

	
 
            None
 
        }
 
    }
 
}
src/protocol/parser/type_resolver.rs
Show inline comments
 
/// type_resolver.rs
 
///
 
/// Performs type inference and type checking
 
/// Performs type inference and type checking. Type inference is implemented by
 
/// applying constraints on (sub)trees of types. During this process the
 
/// resolver takes the `ParserType` structs (the representation of the types
 
/// written by the programmer), converts them to `InferenceType` structs (the
 
/// temporary data structure used during type inference) and attempts to arrive
 
/// at `ConcreteType` structs (the representation of a fully checked and
 
/// validated type).
 
///
 
/// The resolver will visit every statement and expression relevant to the
 
/// procedure and insert and determine its initial type based on context (e.g. a
 
/// return statement's expression must match the function's return type, an
 
/// if statement's test expression must evaluate to a boolean). When all are
 
/// visited we attempt to make progress in evaluating the types. Whenever a type
 
/// is progressed we queue the related expressions for further type progression.
 
/// Once no more expressions are in the queue the algorithm is finished. At this
 
/// point either all types are inferred (or can be trivially implicitly
 
/// determined), or we have incomplete types. In the latter casee we return an
 
/// error.
 
///
 
/// Inference may be applied on non-polymorphic procedures and on polymorphic
 
/// procedures. When dealing with a non-polymorphic procedure we apply the type
 
/// resolver and annotate the AST with the `ConcreteType`s. When dealing with
 
/// polymorphic procedures we will only annotate the AST once, preserving
 
/// references to polymorphic variables. Any later pass will perform just the
 
/// type checking.
 
///
 
/// TODO: Needs an optimization pass
 
/// TODO: Needs a cleanup pass
 
/// TODO: Disallow `Void` types in various expressions (and other future types)
 
/// TODO: Maybe remove msg type?
 

	
 
macro_rules! enabled_debug_print {
 
    (false, $name:literal, $format:literal) => {};
 
    (false, $name:literal, $format:literal, $($args:expr),*) => {};
 
    (true, $name:literal, $format:literal) => {
 
        println!("[{}] {}", $name, $format)
 
    };
 
    (true, $name:literal, $format:literal, $($args:expr),*) => {
 
        println!("[{}] {}", $name, format!($format, $($args),*))
 
    };
 
}
 

	
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(true, "types", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(true, "types", $format, $($args),*);
 
    };
 
}
 

	
 
use std::collections::{HashMap, HashSet, VecDeque};
 

	
 
use crate::protocol::ast::*;
 
use crate::protocol::inputsource::*;
 
use super::type_table::*;
 
use super::symbol_table::*;
 
use super::visitor::{
 
    STMT_BUFFER_INIT_CAPACITY,
 
    EXPR_BUFFER_INIT_CAPACITY,
 
    Ctx,
 
    Visitor2,
 
    VisitorResult
 
};
 
use std::collections::hash_map::Entry;
 
use crate::protocol::parser::type_resolver::InferenceTypePart::IntegerLike;
 

	
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Message ];
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Message, InferenceTypePart::Byte ];
 
const BOOL_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Bool ];
 
const NUMBERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::NumberLike ];
 
const INTEGERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::IntegerLike ];
 
const ARRAY_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Array, InferenceTypePart::Unknown ];
 
const ARRAYLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::ArrayLike, InferenceTypePart::Unknown ];
 
const PORTLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::PortLike, InferenceTypePart::Unknown ];
 

	
 
/// TODO: @performance Turn into PartialOrd+Ord to simplify checks
 
/// TODO: @types Remove the Message -> Byte hack at some point...
 
#[derive(Debug, Clone, Eq, PartialEq)]
 
pub(crate) enum InferenceTypePart {
 
    // A marker with an identifier which we can use to seek subsections of the 
 
    // inferred type
 
    Marker(usize),
 
    // A marker with an identifier which we can use to retrieve the type subtree
 
    // that follows the marker. This is used to perform type inference on
 
    // polymorphs: an expression may determine the polymorphs type, after we
 
    // need to apply that information to all other places where the polymorph is
 
    // used.
 
    MarkerDefinition(usize), // marker for polymorph types on a procedure's definition
 
    MarkerBody(usize), // marker for polymorph types within a procedure body
 
    // Completely unknown type, needs to be inferred
 
    Unknown,
 
    // Partially known type, may be inferred to to be the appropriate related 
 
    // type.
 
    // IndexLike,      // index into array/slice
 
    NumberLike,     // any kind of integer/float
 
    IntegerLike,    // any kind of integer
 
    ArrayLike,      // array or slice. Note that this must have a subtype
 
    PortLike,       // input or output port
 
    // Special types that cannot be instantiated by the user
 
    Void, // For builtin functions that do not return anything
 
    // Concrete types without subtypes
 
    Message,
 
    Bool,
 
    Byte,
 
    Short,
 
    Int,
 
    Long,
 
    String,
 
    // One subtype
 
    Message,
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // A user-defined type with any number of subtypes
 
    Instance(DefinitionId, usize)
 
}
 

	
 
impl InferenceTypePart {
 
    fn is_marker(&self) -> bool {
 
        if let InferenceTypePart::Marker(_) = self { true } else { false }
 
        use InferenceTypePart as ITP;
 

	
 
        match self {
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if the type is concrete, markers are interpreted as concrete
 
    /// types.
 
    fn is_concrete(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | 
 
            ITP::ArrayLike | ITP::PortLike => false,
 
            _ => true
 
        }
 
    }
 

	
 
    fn is_concrete_number(&self) -> bool {
 
        // TODO: @float
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_integer(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_array_or_slice(&self) -> bool {
 
    fn is_concrete_msg_array_or_slice(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Array | ITP::Slice => true,
 
            ITP::Array | ITP::Slice | ITP::Message => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_port(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Input | ITP::Output => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if a part is less specific than the argument. Only checks for 
 
    /// single-part inference (i.e. not the replacement of an `Unknown` variant 
 
    /// with the argument)
 
    fn may_be_inferred_from(&self, arg: &InferenceTypePart) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        (*self == ITP::IntegerLike && arg.is_concrete_integer()) ||
 
        (*self == ITP::NumberLike && (arg.is_concrete_number() || *arg == ITP::IntegerLike)) ||
 
        (*self == ITP::ArrayLike && arg.is_concrete_array_or_slice()) ||
 
        (*self == ITP::ArrayLike && arg.is_concrete_msg_array_or_slice()) ||
 
        (*self == ITP::PortLike && arg.is_concrete_port())
 
    }
 

	
 
    /// Returns the change in "iteration depth" when traversing this particular
 
    /// part. The iteration depth is used to traverse the tree in a linear 
 
    /// fashion. It is basically `number_of_subtypes - 1`
 
    fn depth_change(&self) -> i32 {
 
        use InferenceTypePart as ITP;
 
        match &self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike |
 
            ITP::Void | ITP::Message | ITP::Bool | 
 
            ITP::Void | ITP::Bool |
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long | 
 
            ITP::String => {
 
                -1
 
            },
 
            ITP::Marker(_) | ITP::ArrayLike | ITP::Array | ITP::Slice | 
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) |
 
            ITP::ArrayLike | ITP::Message | ITP::Array | ITP::Slice |
 
            ITP::PortLike | ITP::Input | ITP::Output => {
 
                // One subtype, so do not modify depth
 
                0
 
            },
 
            ITP::Instance(_, num_args) => {
 
                (*num_args as i32) - 1
 
            }
 
        }
 
    }
 
}
 

	
 
impl From<ConcreteTypePart> for InferenceTypePart {
 
    fn from(v: ConcreteTypePart) -> InferenceTypePart {
 
        use ConcreteTypePart as CTP;
 
        use InferenceTypePart as ITP;
 

	
 
        match v {
 
            CTP::Marker(_) => {
 
                unreachable!("encountered marker while converting concrete type to inferred type");
 
            }
 
            CTP::Void => ITP::Void,
 
            CTP::Message => ITP::Message,
 
            CTP::Bool => ITP::Bool,
 
            CTP::Byte => ITP::Byte,
 
            CTP::Short => ITP::Short,
 
            CTP::Int => ITP::Int,
 
            CTP::Long => ITP::Long,
 
            CTP::String => ITP::String,
 
            CTP::Array => ITP::Array,
 
            CTP::Slice => ITP::Slice,
 
            CTP::Input => ITP::Input,
 
            CTP::Output => ITP::Output,
 
            CTP::Instance(id, num) => ITP::Instance(id, num),
 
        }
 
    }
 
}
 

	
 
#[derive(Debug)]
 
struct InferenceType {
 
    has_marker: bool,
 
    has_body_marker: bool,
 
    is_done: bool,
 
    parts: Vec<InferenceTypePart>,
 
}
 

	
 
impl InferenceType {
 
    fn new(has_marker: bool, is_done: bool, parts: Vec<InferenceTypePart>) -> Self {
 
    fn new(has_body_marker: bool, is_done: bool, parts: Vec<InferenceTypePart>) -> Self {
 
        if cfg!(debug_assertions) {
 
            debug_assert!(!parts.is_empty());
 
            if !has_marker {
 
                debug_assert!(parts.iter().all(|v| !v.is_marker()));
 
            if !has_body_marker {
 
                debug_assert!(parts.iter().all(|v| {
 
                    if let InferenceTypePart::MarkerBody(_) = v { false } else { true }
 
                }));
 
            }
 
            if is_done {
 
                debug_assert!(parts.iter().all(|v| v.is_concrete()));
 
            }
 
        }
 
        Self{ has_marker, is_done, parts }
 
        Self{ has_body_marker: has_body_marker, is_done, parts }
 
    }
 

	
 
    fn replace_subtree(&mut self, start_idx: usize, with: &[InferenceTypePart]) {
 
         let end_idx = Self::find_subtree_end_idx(&self.parts, start_idx);
 
        debug_assert_eq!(with.len(), Self::find_subtree_end_idx(with, 0));
 
        self.parts.splice(start_idx..end_idx, with.iter().cloned());
 
        self.recompute_is_done();
 
    }
 

	
 
    // TODO: @performance, might all be done inline in the type inference methods
 
    fn recompute_is_done(&mut self) {
 
        self.is_done = self.parts.iter().all(|v| v.is_concrete());
 
    }
 

	
 
    /// Checks if type is, or may be inferred as, a number
 
    // TODO: @float
 
    fn might_be_number(&self) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        // TODO: @marker?
 
        if self.parts.len() != 1 { return false; }
 
        match self.parts[0] {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike |
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long =>
 
                true,
 
            _ =>
 
                false,
 
        }
 
    }
 

	
 
    /// Checks if type is, or may be inferred as, an integer
 
    fn might_be_integer(&self) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        // TODO: @marker?
 
        if self.parts.len() != 1 { return false; }
 
        match self.parts[0] {
 
            ITP::Unknown | ITP::IntegerLike |
 
            ITP::Byte | ITP::Short | ITP::Int | ITP::Long =>
 
                true,
 
            _ =>
 
                false,
 
        }
 
    }
 

	
 
    /// Checks if type is, or may be inferred as, a boolean
 
    fn might_be_boolean(&self) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        // TODO: @marker?
 
        if self.parts.len() != 1 { return false; }
 
        match self.parts[0] {
 
            ITP::Unknown | ITP::Bool => true,
 
            _ => false
 
        }
 
    }
 

	
 
    /// Returns an iterator over all markers and the partial type tree that
 
    /// Returns an iterator over all body markers and the partial type tree that
 
    /// follows those markers.
 
    fn marker_iter(&self) -> InferenceTypeMarkerIter {
 
    fn body_marker_iter(&self) -> InferenceTypeMarkerIter {
 
        InferenceTypeMarkerIter::new(&self.parts)
 
    }
 

	
 
    /// Attempts to find a marker with a specific value appearing at or after
 
    /// the specified index. If found then the partial type tree's bounding
 
    /// indices that follow that marker are returned.
 
    fn find_subtree_idx_for_marker(&self, marker: usize, mut idx: usize) -> Option<(usize, usize)> {
 
        // Seek ahead to find a marker
 
        let marker = InferenceTypePart::Marker(marker);
 
    /// Attempts to find a specific type part appearing at or after the
 
    /// specified index. If found then the partial type tree's bounding indices
 
    /// that follow that marker are returned.
 
    fn find_subtree_idx_for_part(&self, part: InferenceTypePart, mut idx: usize) -> Option<(usize, usize)> {
 
        debug_assert!(part.depth_change() >= 0, "cannot find subtree for leaf part");
 
        while idx < self.parts.len() {
 
            if marker == self.parts[idx] {
 
                // Found the marker
 
            if part == self.parts[idx] {
 
                // Found the specified part
 
                let start_idx = idx + 1;
 
                let end_idx = Self::find_subtree_end_idx(&self.parts, start_idx);
 
                return Some((start_idx, end_idx))
 
            }
 

	
 
            idx += 1;
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Given that the `parts` are a depth-first serialized tree of types, this
 
    /// function finds the subtree anchored at a specific node. The returned 
 
    /// index is exclusive.
 
    fn find_subtree_end_idx(parts: &[InferenceTypePart], start_idx: usize) -> usize {
 
        let mut depth = 1;
 
        let mut idx = start_idx;
 

	
 
        while idx < parts.len() {
 
            depth += parts[idx].depth_change();
 
            if depth == 0 {
 
                return idx + 1;
 
            }
 
            idx += 1;
 
        }
 

	
 
        // If here, then the inference type is malformed
 
        unreachable!();
 
    }
 

	
 
    /// Call that attempts to infer the part at `to_infer.parts[to_infer_idx]` 
 
    /// using the subtree at `template.parts[template_idx]`. Will return 
 
    /// `Some(depth_change_due_to_traversal)` if type inference has been 
 
    /// applied. In this case the indices will also be modified to point to the 
 
    /// next part in both templates. If type inference has not (or: could not) 
 
    /// be applied then `None` will be returned. Note that this might mean that 
 
    /// the types are incompatible.
 
    ///
 
    /// As this is a helper functions, some assumptions: the parts are not 
 
    /// exactly equal, and neither of them contains a marker. Also: only the
 
    /// `to_infer` parts are checked for inference. It might be that this 
 
    /// function returns `None`, but that that `template` is still compatible
 
    /// with `to_infer`, e.g. when `template` has an `Unknown` part.
 
    fn infer_part_for_single_type(
 
        to_infer: &mut InferenceType, to_infer_idx: &mut usize,
 
        template_parts: &[InferenceTypePart], template_idx: &mut usize,
 
    ) -> Option<i32> {
 
        use InferenceTypePart as ITP;
 

	
 
        let to_infer_part = &to_infer.parts[*to_infer_idx];
 
        let template_part = &template_parts[*template_idx];
 

	
 
        // TODO: Maybe do this differently?
 
        let mut template_definition_marker = None;
 
        if *template_idx > 0 {
 
            if let ITP::MarkerDefinition(marker) = &template_parts[*template_idx - 1] {
 
                template_definition_marker = Some(*marker)
 
            }
 
        }
 

	
 
        // Check for programmer mistakes
 
        debug_assert_ne!(to_infer_part, template_part);
 
        debug_assert!(!to_infer_part.is_marker(), "marker encountered in 'infer part'");
 
        debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'");
 

	
 
        // Inference of a somewhat-specified type
 
        if to_infer_part.may_be_inferred_from(template_part) {
 
            let depth_change = to_infer_part.depth_change();
 
            debug_assert_eq!(depth_change, template_part.depth_change());
 

	
 
            if let Some(marker) = template_definition_marker {
 
                to_infer.parts.insert(*to_infer_idx, ITP::MarkerDefinition(marker));
 
                *to_infer_idx += 1;
 
            }
 

	
 
            to_infer.parts[*to_infer_idx] = template_part.clone();
 

	
 
            *to_infer_idx += 1;
 
            *template_idx += 1;
 
            return Some(depth_change);
 
        }
 

	
 
        // Inference of a completely unknown type
 
        if *to_infer_part == ITP::Unknown {
 
            // template part is different, so cannot be unknown, hence copy the
 
            // entire subtree
 
            let template_end_idx = Self::find_subtree_end_idx(template_parts, *template_idx);
 
            to_infer.parts[*to_infer_idx] = template_part.clone();
 
            *to_infer_idx += 1;
 
            for insert_idx in (*template_idx + 1)..template_end_idx {
 
                to_infer.parts.insert(*to_infer_idx, template_parts[insert_idx].clone());
 
            let erase_offset = if let Some(marker) = template_definition_marker {
 
                to_infer.parts[*to_infer_idx] = ITP::MarkerDefinition(marker);
 
                *to_infer_idx += 1;
 
            }
 
                0
 
            } else {
 
                1
 
            };
 

	
 
            to_infer.parts.splice(
 
                *to_infer_idx..*to_infer_idx + erase_offset,
 
                template_parts[*template_idx..template_end_idx].iter().cloned()
 
            );
 
            *to_infer_idx += (template_end_idx - *template_idx);
 
            *template_idx = template_end_idx;
 

	
 
            // Note: by definition the LHS was Unknown and the RHS traversed a 
 
            // full subtree.
 
            return Some(-1);
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Call that checks if the `to_check` part is compatible with the `infer`
 
    /// part. This essentially implements `infer_part_for_single_type` but skips
 
    /// over the matching parts.
 
    /// part. This is essentially a copy of `infer_part_for_single_type`, but
 
    /// without actually copying the type parts.
 
    fn check_part_for_single_type(
 
        to_check_parts: &[InferenceTypePart], to_check_idx: &mut usize,
 
        template_parts: &[InferenceTypePart], template_idx: &mut usize
 
    ) -> Option<i32> {
 
        use InferenceTypePart as ITP;
 

	
 
        let to_check_part = &to_check_parts[*to_check_idx];
 
        let template_part = &template_parts[*template_idx];
 

	
 
        // Checking programmer errors
 
        debug_assert_ne!(to_check_part, template_part);
 
        debug_assert!(!to_check_part.is_marker(), "marker encountered in 'to_check part'");
 
        debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'");
 

	
 
        if to_check_part.may_be_inferred_from(template_part) {
 
            let depth_change = to_check_part.depth_change();
 
            debug_assert_eq!(depth_change, template_part.depth_change());
 
            *to_check_idx += 1;
 
            *template_idx += 1;
 
            return Some(depth_change);
 
        }
 

	
 
        if *to_check_part == ITP::Unknown {
 
            *to_check_idx += 1;
 
            *template_idx = Self::find_subtree_end_idx(template_parts, *template_idx);
 

	
 
            // By definition LHS and RHS had depth change of -1
 
            return Some(-1);
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Attempts to infer types between two `InferenceType` instances. This 
 
    /// function is unsafe as it accepts pointers to work around Rust's 
 
    /// borrowing rules. The caller must ensure that the pointers are distinct.
 
    unsafe fn infer_subtrees_for_both_types(
 
        type_a: *mut InferenceType, start_idx_a: usize,
 
        type_b: *mut InferenceType, start_idx_b: usize
 
    ) -> DualInferenceResult {
 
        use InferenceTypePart as ITP;
 

	
 
        debug_assert!(!std::ptr::eq(type_a, type_b), "same inference types");
 
        debug_assert!(!std::ptr::eq(type_a, type_b), "encountered pointers to the same inference type");
 
        let type_a = &mut *type_a;
 
        let type_b = &mut *type_b;
 

	
 
        let mut modified_a = false;
 
        let mut modified_b = false;
 
        let mut idx_a = start_idx_a;
 
        let mut idx_b = start_idx_b;
 
        let mut depth = 1;
 

	
 
        while depth > 0 {
 
            // Advance indices if we encounter markers or equal parts
 
            let part_a = &type_a.parts[idx_a];
 
            let part_b = &type_b.parts[idx_b];
 
            
 
            if part_a == part_b {
 
                let depth_change = part_a.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, part_b.depth_change());
 
                idx_a += 1;
 
                idx_b += 1;
 
                continue;
 
            }
 
            if let ITP::Marker(_) = part_a { idx_a += 1; continue; }
 
            if let ITP::Marker(_) = part_b { idx_b += 1; continue; }
 
            if part_a.is_marker() { idx_a += 1; continue; }
 
            if part_b.is_marker() { idx_b += 1; continue; }
 

	
 
            // Types are not equal and are both not markers
 
            if let Some(depth_change) = Self::infer_part_for_single_type(type_a, &mut idx_a, &type_b.parts, &mut idx_b) {
 
                depth += depth_change;
 
                modified_a = true;
 
                continue;
 
            }
 
            if let Some(depth_change) = Self::infer_part_for_single_type(type_b, &mut idx_b, &type_a.parts, &mut idx_a) {
 
                depth += depth_change;
 
                modified_b = true;
 
                continue;
 
            }
 

	
 
            // And can also not be inferred in any way: types must be incompatible
 
            return DualInferenceResult::Incompatible;
 
        }
 

	
 
        if modified_a { type_a.recompute_is_done(); }
 
        if modified_b { type_b.recompute_is_done(); }
 

	
 
        // If here then we completely inferred the subtrees.
 
        match (modified_a, modified_b) {
 
            (false, false) => DualInferenceResult::Neither,
 
            (false, true) => DualInferenceResult::Second,
 
            (true, false) => DualInferenceResult::First,
 
            (true, true) => DualInferenceResult::Both
 
        }
 
    }
 

	
 
    /// Attempts to infer the first subtree based on the template. Like
 
    /// `infer_subtrees_for_both_types`, but now only applying inference to
 
    /// `to_infer` based on the type information in `template`.
 
    /// Secondary use is to make sure that a type follows a certain template.
 
    fn infer_subtree_for_single_type(
 
        to_infer: &mut InferenceType, mut to_infer_idx: usize,
 
        template: &[InferenceTypePart], mut template_idx: usize,
 
    ) -> SingleInferenceResult {
 
        let mut modified = false;
 
        let mut depth = 1;
 

	
 
        while depth > 0 {
 
            let to_infer_part = &to_infer.parts[to_infer_idx];
 
            let template_part = &template[template_idx];
 

	
 
            if to_infer_part == template_part {
 
                let depth_change = to_infer_part.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, template_part.depth_change());
 
                to_infer_idx += 1;
 
                template_idx += 1;
 
                continue;
 
            }
 
            if to_infer_part.is_marker() { to_infer_idx += 1; continue; }
 
            if template_part.is_marker() { template_idx += 1; continue; }
 

	
 
            // Types are not equal and not markers. So check if we can infer 
 
            // anything
 
            if let Some(depth_change) = Self::infer_part_for_single_type(
 
                to_infer, &mut to_infer_idx, template, &mut template_idx
 
            ) {
 
                depth += depth_change;
 
                modified = true;
 
                continue;
 
            }
 

	
 
            // We cannot infer anything, but the template may still be 
 
            // compatible with the type we're inferring
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                template, &mut template_idx, &to_infer.parts, &mut to_infer_idx
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 

	
 
            return SingleInferenceResult::Incompatible
 
        }
 

	
 
        return if modified {
 
            to_infer.recompute_is_done();
 
            SingleInferenceResult::Modified
 
        } else {
 
            SingleInferenceResult::Unmodified
 
        }
 
    }
 

	
 
    /// Checks if both types are compatible, doesn't perform any inference
 
    fn check_subtrees(
 
        type_parts_a: &[InferenceTypePart], start_idx_a: usize,
 
        type_parts_b: &[InferenceTypePart], start_idx_b: usize
 
    ) -> bool {
 
        let mut depth = 1;
 
        let mut idx_a = start_idx_a;
 
        let mut idx_b = start_idx_b;
 

	
 
        while depth > 0 {
 
            let part_a = &type_parts_a[idx_a];
 
            let part_b = &type_parts_b[idx_b];
 

	
 
            if part_a == part_b {
 
                let depth_change = part_a.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, part_b.depth_change());
 
                idx_a += 1;
 
                idx_b += 1;
 
                continue;
 
            }
 
            
 
            if part_a.is_marker() { idx_a += 1; continue; }
 
            if part_b.is_marker() { idx_b += 1; continue; }
 

	
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                type_parts_a, &mut idx_a, type_parts_b, &mut idx_b
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                type_parts_b, &mut idx_b, type_parts_a, &mut idx_a
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 

	
 
            return false;
 
        }
 

	
 
        true
 
    }
 

	
 
    /// Performs the conversion of the inference type into a concrete type.
 
    /// By calling this function you must make sure that no unspecified types
 
    /// (e.g. Unknown or IntegerLike) exist in the type.
 
    fn write_concrete_type(&self, concrete_type: &mut ConcreteType) {
 
        use InferenceTypePart as ITP;
 
        use ConcreteTypePart as CTP;
 

	
 
        // Make sure inference type is specified but concrete type is not yet specified
 
        debug_assert!(!self.parts.is_empty());
 
        debug_assert!(concrete_type.parts.is_empty());
 
        concrete_type.parts.reserve(self.parts.len());
 

	
 
        for part in &self.parts {
 
        let mut idx = 0;
 
        while idx < self.parts.len() {
 
            let part = &self.parts[idx];
 
            let converted_part = match part {
 
                ITP::Marker(_) => { continue; },
 
                ITP::MarkerDefinition(marker) => {
 
                    // Outer markers are converted to regular markers, we
 
                    // completely remove the type subtree that follows it
 
                    idx = InferenceType::find_subtree_end_idx(&self.parts, idx + 1);
 
                    concrete_type.parts.push(CTP::Marker(*marker));
 
                    continue;
 
                },
 
                ITP::MarkerBody(_) => {
 
                    // Inner markers are removed when writing to the concrete
 
                    // type.
 
                    idx += 1;
 
                    continue;
 
                },
 
                ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | ITP::ArrayLike | ITP::PortLike => {
 
                    debug_assert!(false, "Attempted to convert inference type part {:?} into concrete type", part);
 
                    unreachable!();
 
                    unreachable!("Attempted to convert inference type part {:?} into concrete type", part);
 
                },
 
                ITP::Void => CTP::Void,
 
                ITP::Message => CTP::Message,
 
                ITP::Bool => CTP::Bool,
 
                ITP::Byte => CTP::Byte,
 
                ITP::Short => CTP::Short,
 
                ITP::Int => CTP::Int,
 
                ITP::Long => CTP::Long,
 
                ITP::String => CTP::String,
 
                ITP::Array => CTP::Array,
 
                ITP::Slice => CTP::Slice,
 
                ITP::Input => CTP::Input,
 
                ITP::Output => CTP::Output,
 
                ITP::Instance(id, num) => CTP::Instance(*id, *num),
 
            };
 

	
 
            concrete_type.parts.push(converted_part);
 
            idx += 1;
 
        }
 
    }
 

	
 
    /// Writes a human-readable version of the type to a string. Mostly a
 
    /// function for interior use.
 
    fn write_display_name(
 
        buffer: &mut String, heap: &Heap, parts: &[InferenceTypePart], mut idx: usize
 
    ) -> usize {
 
        use InferenceTypePart as ITP;
 

	
 
        match &parts[idx] {
 
            ITP::Marker(_) => {
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1)
 
            },
 
            ITP::Unknown => buffer.push_str("?"),
 
            ITP::NumberLike => buffer.push_str("num?"),
 
            ITP::IntegerLike => buffer.push_str("int?"),
 
            ITP::ArrayLike => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[?]");
 
            },
 
            ITP::PortLike => {
 
                buffer.push_str("port?<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            }
 
            ITP::Void => buffer.push_str("void"),
 
            ITP::Message => buffer.push_str("msg"),
 
            ITP::Bool => buffer.push_str("bool"),
 
            ITP::Byte => buffer.push_str("byte"),
 
            ITP::Short => buffer.push_str("short"),
 
            ITP::Int => buffer.push_str("int"),
 
            ITP::Long => buffer.push_str("long"),
 
            ITP::String => buffer.push_str("str"),
 
            ITP::Message => {
 
                buffer.push_str("msg<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Array => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[]");
 
            },
 
            ITP::Slice => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[..]");
 
            },
 
            ITP::Input => {
 
                buffer.push_str("in<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Output => {
 
                buffer.push_str("out<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Instance(definition_id, num_sub) => {
 
                let definition = &heap[*definition_id];
 
                buffer.push_str(&String::from_utf8_lossy(&definition.identifier().value));
 
                if *num_sub > 0 {
 
                    buffer.push('<');
 
                    idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    for _sub_idx in 1..*num_sub {
 
                        buffer.push_str(", ");
 
                        idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    }
 
                    buffer.push('>');
 
                }
 
            },
 
        }
 

	
 
        idx
 
    }
 

	
 
    /// Returns the display name of a (part of) the type tree. Will allocate a
 
    /// string.
 
    fn partial_display_name(heap: &Heap, parts: &[InferenceTypePart]) -> String {
 
        let mut buffer = String::with_capacity(parts.len() * 6);
 
        Self::write_display_name(&mut buffer, heap, parts, 0);
 
        buffer
 
    }
 

	
 
    /// Returns the display name of the full type tree. Will allocate a string.
 
    fn display_name(&self, heap: &Heap) -> String {
 
        Self::partial_display_name(heap, &self.parts)
 
    }
 
}
 

	
 
/// Iterator over the subtrees that follow a marker in an `InferenceType`
 
/// instance. Returns immutable slices over the internal parts
 
struct InferenceTypeMarkerIter<'a> {
 
    parts: &'a [InferenceTypePart],
 
    idx: usize,
 
}
 

	
 
impl<'a> InferenceTypeMarkerIter<'a> {
 
    fn new(parts: &'a [InferenceTypePart]) -> Self {
 
        Self{ parts, idx: 0 }
 
    }
 
}
 

	
 
impl<'a> Iterator for InferenceTypeMarkerIter<'a> {
 
    type Item = (usize, &'a [InferenceTypePart]);
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        // Iterate until we find a marker
 
        while self.idx < self.parts.len() {
 
            if let InferenceTypePart::Marker(marker) = self.parts[self.idx] {
 
            if let InferenceTypePart::MarkerBody(marker) = self.parts[self.idx] {
 
                // Found a marker, find the subtree end
 
                let start_idx = self.idx + 1;
 
                let end_idx = InferenceType::find_subtree_end_idx(self.parts, start_idx);
 

	
 
                // Modify internal index, then return items
 
                self.idx = end_idx;
 
                return Some((marker, &self.parts[start_idx..end_idx]))
 
            }
 

	
 
            self.idx += 1;
 
        }
 

	
 
        None
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum DualInferenceResult {
 
    Neither,        // neither argument is clarified
 
    First,          // first argument is clarified using the second one
 
    Second,         // second argument is clarified using the first one
 
    Both,           // both arguments are clarified
 
    Incompatible,   // types are incompatible: programmer error
 
}
 

	
 
impl DualInferenceResult {
 
    fn modified_any(&self) -> bool {
 
        match self {
 
            DualInferenceResult::First | DualInferenceResult::Second | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
    fn modified_lhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::First | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
    fn modified_rhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::Second | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum SingleInferenceResult {
 
    Unmodified,
 
    Modified,
 
    Incompatible
 
}
 

	
 
enum DefinitionType{
 
    None,
 
    Component(ComponentId),
 
    Function(FunctionId),
 
}
 

	
 
pub(crate) struct ResolveQueueElement {
 
    pub(crate) root_id: RootId,
 
    pub(crate) definition_id: DefinitionId,
 
    pub(crate) monomorph_types: Vec<ConcreteType>,
 
}
 

	
 
pub(crate) type ResolveQueue = Vec<ResolveQueueElement>;
 

	
 
/// This particular visitor will recurse depth-first into the AST and ensures
 
/// that all expressions have the appropriate types.
 
pub(crate) struct TypeResolvingVisitor {
 
    // Current definition we're typechecking.
 
    definition_type: DefinitionType,
 
    poly_vars: Vec<ConcreteType>,
 

	
 
    // Buffers for iteration over substatements and subexpressions
 
    stmt_buffer: Vec<StatementId>,
 
    expr_buffer: Vec<ExpressionId>,
 

	
 
    // Mapping from parser type to inferred type. We attempt to continue to
 
    // specify these types until we're stuck or we've fully determined the type.
 
    var_types: HashMap<VariableId, VarData>,      // types of variables
 
    expr_types: HashMap<ExpressionId, InferenceType>,   // types of expressions
 
    extra_data: HashMap<ExpressionId, ExtraData>,       // data for function call inference
 
    // Keeping track of which expressions need to be reinferred because the
 
    // expressions they're linked to made progression on an associated type
 
    expr_queued: HashSet<ExpressionId>,
 
}
 

	
 
// TODO: @rename used for calls and struct literals, maybe union literals?
 
struct ExtraData {
 
    /// Progression of polymorphic variables (if any)
 
    poly_vars: Vec<InferenceType>,
 
    /// Progression of types of call arguments or struct members
 
    embedded: Vec<InferenceType>,
 
    returned: InferenceType,
 
}
 

	
 
struct VarData {
 
    /// Type of the variable
 
    var_type: InferenceType,
 
    /// VariableExpressions that use the variable
 
    used_at: Vec<ExpressionId>,
 
    /// For channel statements we link to the other variable such that when one
 
    /// channel's interior type is resolved, we can also resolve the other one.
 
    linked_var: Option<VariableId>,
 
}
 

	
 
impl VarData {
 
    fn new_channel(var_type: InferenceType, other_port: VariableId) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: Some(other_port) }
 
    }
 
    fn new_local(var_type: InferenceType) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: None }
 
    }
 
}
 

	
 
impl TypeResolvingVisitor {
 
    pub(crate) fn new() -> Self {
 
        TypeResolvingVisitor{
 
            definition_type: DefinitionType::None,
 
            poly_vars: Vec::new(),
 
            stmt_buffer: Vec::with_capacity(STMT_BUFFER_INIT_CAPACITY),
 
            expr_buffer: Vec::with_capacity(EXPR_BUFFER_INIT_CAPACITY),
 
            var_types: HashMap::new(),
 
            expr_types: HashMap::new(),
 
            extra_data: HashMap::new(),
 
            expr_queued: HashSet::new(),
 
        }
 
    }
 

	
 
    // TODO: @cleanup Unsure about this, maybe a pattern will arise after
 
    //  a while.
 
    pub(crate) fn queue_module_definitions(ctx: &Ctx, queue: &mut ResolveQueue) {
 
        let root_id = ctx.module.root_id;
 
        let root = &ctx.heap.protocol_descriptions[root_id];
 
        for definition_id in &root.definitions {
 
            let definition = &ctx.heap[*definition_id];
 
            match definition {
 
                Definition::Function(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        queue.push(ResolveQueueElement{
 
                            root_id,
 
                            definition_id: *definition_id,
 
                            monomorph_types: Vec::new(),
 
                        })
 
                    }
 
                },
 
                Definition::Component(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        queue.push(ResolveQueueElement{
 
                            root_id,
 
                            definition_id: *definition_id,
 
                            monomorph_types: Vec::new(),
 
                        })
 
                    }
 
                },
 
                Definition::Enum(_) | Definition::Struct(_) => {},
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn handle_module_definition(
 
        &mut self, ctx: &mut Ctx, queue: &mut ResolveQueue, element: ResolveQueueElement
 
    ) -> VisitorResult {
 
        // Visit the definition
 
        debug_assert_eq!(ctx.module.root_id, element.root_id);
 
        self.reset();
 
        self.poly_vars.clear();
 
        self.poly_vars.extend(element.monomorph_types.iter().cloned());
 
        self.visit_definition(ctx, element.definition_id)?;
 

	
 
        // Keep resolving types
 
        self.resolve_types(ctx, queue)?;
 
        Ok(())
 
    }
 

	
 
    fn reset(&mut self) {
 
        self.definition_type = DefinitionType::None;
 
        self.poly_vars.clear();
 
        self.stmt_buffer.clear();
 
        self.expr_buffer.clear();
 
        self.var_types.clear();
 
        self.expr_types.clear();
 
        self.extra_data.clear();
 
        self.expr_queued.clear();
 
    }
 
}
 

	
 
impl Visitor2 for TypeResolvingVisitor {
 
    // Definitions
 

	
 
    fn visit_component_definition(&mut self, ctx: &mut Ctx, id: ComponentId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Component(id);
 

	
 
        let comp_def = &ctx.heap[id];
 
        debug_assert_eq!(comp_def.poly_vars.len(), self.poly_vars.len(), "component polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(80));
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting component '{}': {}", &String::from_utf8_lossy(&comp_def.identifier.value), id.0.index);
 
        debug_log!("{}", "-".repeat(80));
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        for param_id in comp_def.parameters.clone() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type(ctx, param.parser_type, true);
 
            debug_assert!(var_type.is_done, "expected component arguments to be concrete types");
 
            self.var_types.insert(param_id.upcast(), VarData{ var_type, used_at: Vec::new() });
 
            self.var_types.insert(param_id.upcast(), VarData::new_local(var_type));
 
        }
 

	
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    fn visit_function_definition(&mut self, ctx: &mut Ctx, id: FunctionId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Function(id);
 

	
 
        let func_def = &ctx.heap[id];
 
        debug_assert_eq!(func_def.poly_vars.len(), self.poly_vars.len(), "function polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(80));
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting function '{}': {}", &String::from_utf8_lossy(&func_def.identifier.value), id.0.index);
 
        debug_log!("{}", "-".repeat(80));
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        for param_id in func_def.parameters.clone() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type(ctx, param.parser_type, true);
 
            debug_assert!(var_type.is_done, "expected function arguments to be concrete types");
 
            self.var_types.insert(param_id.upcast(), VarData{ var_type, used_at: Vec::new() });
 
            self.var_types.insert(param_id.upcast(), VarData::new_local(var_type));
 
        }
 

	
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    // Statements
 

	
 
    fn visit_block_stmt(&mut self, ctx: &mut Ctx, id: BlockStatementId) -> VisitorResult {
 
        // Transfer statements for traversal
 
        let block = &ctx.heap[id];
 

	
 
        for stmt_id in block.statements.clone() {
 
            self.visit_stmt(ctx, stmt_id)?;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_memory_stmt(&mut self, ctx: &mut Ctx, id: MemoryStatementId) -> VisitorResult {
 
        let memory_stmt = &ctx.heap[id];
 

	
 
        let local = &ctx.heap[memory_stmt.variable];
 
        let var_type = self.determine_inference_type_from_parser_type(ctx, local.parser_type, true);
 
        self.var_types.insert(memory_stmt.variable.upcast(), VarData{ var_type, used_at: Vec::new() });
 
        self.var_types.insert(memory_stmt.variable.upcast(), VarData::new_local(var_type));
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_channel_stmt(&mut self, ctx: &mut Ctx, id: ChannelStatementId) -> VisitorResult {
 
        let channel_stmt = &ctx.heap[id];
 

	
 
        let from_local = &ctx.heap[channel_stmt.from];
 
        let from_var_type = self.determine_inference_type_from_parser_type(ctx, from_local.parser_type, true);
 
        self.var_types.insert(from_local.this.upcast(), VarData{ var_type: from_var_type, used_at: Vec::new() });
 
        self.var_types.insert(from_local.this.upcast(), VarData::new_channel(from_var_type, channel_stmt.to.upcast()));
 

	
 
        let to_local = &ctx.heap[channel_stmt.to];
 
        let to_var_type = self.determine_inference_type_from_parser_type(ctx, to_local.parser_type, true);
 
        self.var_types.insert(to_local.this.upcast(), VarData{ var_type: to_var_type, used_at: Vec::new() });
 
        self.var_types.insert(to_local.this.upcast(), VarData::new_channel(to_var_type, channel_stmt.from.upcast()));
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_labeled_stmt(&mut self, ctx: &mut Ctx, id: LabeledStatementId) -> VisitorResult {
 
        let labeled_stmt = &ctx.heap[id];
 
        let substmt_id = labeled_stmt.body;
 
        self.visit_stmt(ctx, substmt_id)
 
    }
 

	
 
    fn visit_if_stmt(&mut self, ctx: &mut Ctx, id: IfStatementId) -> VisitorResult {
 
        let if_stmt = &ctx.heap[id];
 

	
 
        let true_body_id = if_stmt.true_body;
 
        let false_body_id = if_stmt.false_body;
 
        let test_expr_id = if_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_stmt(ctx, true_body_id)?;
 
        self.visit_stmt(ctx, false_body_id)?;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_while_stmt(&mut self, ctx: &mut Ctx, id: WhileStatementId) -> VisitorResult {
 
        let while_stmt = &ctx.heap[id];
 

	
 
        let body_id = while_stmt.body;
 
        let test_expr_id = while_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_stmt(ctx, body_id)?;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_synchronous_stmt(&mut self, ctx: &mut Ctx, id: SynchronousStatementId) -> VisitorResult {
 
        let sync_stmt = &ctx.heap[id];
 
        let body_id = sync_stmt.body;
 

	
 
        self.visit_stmt(ctx, body_id)
 
    }
 

	
 
    fn visit_return_stmt(&mut self, ctx: &mut Ctx, id: ReturnStatementId) -> VisitorResult {
 
        let return_stmt = &ctx.heap[id];
 
        let expr_id = return_stmt.expression;
 

	
 
        self.visit_expr(ctx, expr_id)
 
    }
 

	
 
    fn visit_assert_stmt(&mut self, ctx: &mut Ctx, id: AssertStatementId) -> VisitorResult {
 
        let assert_stmt = &ctx.heap[id];
 
        let test_expr_id = assert_stmt.expression;
 

	
 
        self.visit_expr(ctx, test_expr_id)
 
    }
 

	
 
    fn visit_new_stmt(&mut self, ctx: &mut Ctx, id: NewStatementId) -> VisitorResult {
 
        let new_stmt = &ctx.heap[id];
 
        let call_expr_id = new_stmt.expression;
 

	
 
        self.visit_call_expr(ctx, call_expr_id)
 
    }
 

	
 
    fn visit_expr_stmt(&mut self, ctx: &mut Ctx, id: ExpressionStatementId) -> VisitorResult {
 
        let expr_stmt = &ctx.heap[id];
 
        let subexpr_id = expr_stmt.expression;
 

	
 
        self.visit_expr(ctx, subexpr_id)
 
    }
 

	
 
    // Expressions
 

	
 
    fn visit_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let assign_expr = &ctx.heap[id];
 
        let left_expr_id = assign_expr.left;
 
        let right_expr_id = assign_expr.right;
 

	
 
        self.visit_expr(ctx, left_expr_id)?;
 
        self.visit_expr(ctx, right_expr_id)?;
 

	
 
        self.progress_assignment_expr(ctx, id)
 
    }
 

	
 
    fn visit_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let conditional_expr = &ctx.heap[id];
 
        let test_expr_id = conditional_expr.test;
 
        let true_expr_id = conditional_expr.true_expression;
 
        let false_expr_id = conditional_expr.false_expression;
 

	
 
        self.expr_types.insert(test_expr_id, InferenceType::new(false, true, vec![InferenceTypePart::Bool]));
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_expr(ctx, true_expr_id)?;
 
        self.visit_expr(ctx, false_expr_id)?;
 

	
 
        self.progress_conditional_expr(ctx, id)
 
    }
 

	
 
    fn visit_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let binary_expr = &ctx.heap[id];
 
        let lhs_expr_id = binary_expr.left;
 
        let rhs_expr_id = binary_expr.right;
 

	
 
        self.visit_expr(ctx, lhs_expr_id)?;
 
        self.visit_expr(ctx, rhs_expr_id)?;
 

	
 
        self.progress_binary_expr(ctx, id)
 
    }
 

	
 
    fn visit_unary_expr(&mut self, ctx: &mut Ctx, id: UnaryExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let unary_expr = &ctx.heap[id];
 
        let arg_expr_id = unary_expr.expression;
 

	
 
        self.visit_expr(ctx, arg_expr_id)?;
 

	
 
        self.progress_unary_expr(ctx, id)
 
    }
 

	
 
    fn visit_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let indexing_expr = &ctx.heap[id];
 
        let subject_expr_id = indexing_expr.subject;
 
        let index_expr_id = indexing_expr.index;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.visit_expr(ctx, index_expr_id)?;
 

	
 
        self.progress_indexing_expr(ctx, id)
 
    }
 

	
 
    fn visit_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let slicing_expr = &ctx.heap[id];
 
        let subject_expr_id = slicing_expr.subject;
 
        let from_expr_id = slicing_expr.from_index;
 
        let to_expr_id = slicing_expr.to_index;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.visit_expr(ctx, from_expr_id)?;
 
        self.visit_expr(ctx, to_expr_id)?;
 

	
 
        self.progress_slicing_expr(ctx, id)
 
    }
 

	
 
    fn visit_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let select_expr = &ctx.heap[id];
 
        let subject_expr_id = select_expr.subject;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 

	
 
        self.progress_select_expr(ctx, id)
 
    }
 

	
 
    fn visit_array_expr(&mut self, ctx: &mut Ctx, id: ArrayExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let array_expr = &ctx.heap[id];
 
        // TODO: @performance
 
        for element_id in array_expr.elements.clone().into_iter() {
 
            self.visit_expr(ctx, element_id)?;
 
        }
 

	
 
        self.progress_array_expr(ctx, id)
 
    }
 

	
 
    fn visit_constant_expr(&mut self, ctx: &mut Ctx, id: ConstantExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 
        self.progress_constant_expr(ctx, id)
 
    }
 

	
 
    fn visit_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 
        self.insert_initial_call_polymorph_data(ctx, id);
 

	
 
        // TODO: @performance
 
        let call_expr = &ctx.heap[id];
 
        for arg_expr_id in call_expr.arguments.clone() {
 
            self.visit_expr(ctx, arg_expr_id)?;
 
        }
 

	
 
        self.progress_call_expr(ctx, id)
 
    }
 

	
 
    fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let var_expr = &ctx.heap[id];
 
        debug_assert!(var_expr.declaration.is_some());
 
        let var_data = self.var_types.get_mut(var_expr.declaration.as_ref().unwrap()).unwrap();
 
        var_data.used_at.push(upcast_id);
 

	
 
        self.progress_variable_expr(ctx, id)
 
    }
 
}
 

	
 
macro_rules! debug_assert_expr_ids_unique_and_known {
 
    // Base case for a single expression ID
 
    ($resolver:ident, $id:ident) => {
 
        if cfg!(debug_assertions) {
 
            $resolver.expr_types.contains_key(&$id);
 
        }
 
    };
 
    // Base case for two expression IDs
 
    ($resolver:ident, $id1:ident, $id2:ident) => {
 
        debug_assert_ne!($id1, $id2);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id1);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id2);
 
    };
 
    // Generic case
 
    ($resolver:ident, $id1:ident, $id2:ident, $($tail:ident),+) => {
 
        debug_assert_ne!($id1, $id2);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id1);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id2, $($tail),+);
 
    };
 
}
 

	
 
macro_rules! debug_assert_ptrs_distinct {
 
    // Base case
 
    ($ptr1:ident, $ptr2:ident) => {
 
        debug_assert!(!std::ptr::eq($ptr1, $ptr2));
 
    };
 
    // Generic case
 
    ($ptr1:ident, $ptr2:ident, $($tail:ident),+) => {
 
        debug_assert_ptrs_distinct!($ptr1, $ptr2);
 
        debug_assert_ptrs_distinct!($ptr2, $($tail),+);
 
    };
 
}
 

	
 
impl TypeResolvingVisitor {
 
    fn resolve_types(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) -> Result<(), ParseError2> {
 
        // Keep inferring until we can no longer make any progress
 
        while let Some(next_expr_id) = self.expr_queued.iter().next() {
 
            let next_expr_id = *next_expr_id;
 
            self.expr_queued.remove(&next_expr_id);
 
            self.progress_expr(ctx, next_expr_id)?;
 
        }
 

	
 
        // Should have inferred everything
 
        for (expr_id, expr_type) in self.expr_types.iter() {
 
        // Should have inferred everything. Check for this and optionally
 
        // auto-infer the remaining types
 
        for (expr_id, expr_type) in self.expr_types.iter_mut() {
 
            if !expr_type.is_done {
 
                // TODO: Auto-inference of integerlike types
 
                let expr = &ctx.heap[*expr_id];
 
                return Err(ParseError2::new_error(
 
                    &ctx.module.source, expr.position(),
 
                    &format!(
 
                        "Could not fully infer the type of this expression (got '{}')",
 
                        expr_type.display_name(&ctx.heap)
 
                    )
 
                ))
 
                // Auto-infer numberlike/integerlike types to a regular int
 
                if expr_type.parts.len() == 1 && expr_type.parts[0] == InferenceTypePart::IntegerLike {
 
                    expr_type.parts[0] = InferenceTypePart::Int;
 
                } else {
 
                    let expr = &ctx.heap[*expr_id];
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, expr.position(),
 
                        &format!(
 
                            "Could not fully infer the type of this expression (got '{}')",
 
                            expr_type.display_name(&ctx.heap)
 
                        )
 
                    ))
 
                }
 
            }
 

	
 
            let concrete_type = ctx.heap[*expr_id].get_type_mut();
 
            expr_type.write_concrete_type(concrete_type);
 
        }
 

	
 
        // Check all things we need to monomorphize
 
        // TODO: Struct/enum/union monomorphization
 
        for (call_expr_id, extra_data) in self.extra_data.iter() {
 
            if extra_data.poly_vars.is_empty() { continue; }
 

	
 
            // We have a polymorph
 
            // Retrieve polymorph variable specification
 
            let mut monomorph_types = Vec::with_capacity(extra_data.poly_vars.len());
 
            for (poly_idx, poly_type) in extra_data.poly_vars.iter().enumerate() {
 
                if !poly_type.is_done {
 
                    // TODO: Single clean function for function signatures and polyvars.
 
                    // TODO: Better error message
 
                    let expr = &ctx.heap[*call_expr_id];
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, expr.position(),
 
                        &format!(
 
                            "Could not fully infer the type of polymorphic variable {} of this expression (got '{}')",
 
                            poly_idx, poly_type.display_name(&ctx.heap)
 
                        )
 
                    ))
 
                }
 

	
 
                let mut concrete_type = ConcreteType::default();
 
                poly_type.write_concrete_type(&mut concrete_type);
 
                monomorph_types.insert(poly_idx, concrete_type);
 
            }
 

	
 
            // Resolve to call expression's definition
 
            let call_expr = if let Expression::Call(call_expr) = &ctx.heap[*call_expr_id] {
 
                call_expr
 
            } else {
 
                todo!("implement different kinds of polymorph expressions");
 
            };
 

	
 
            // Add to type table if not yet typechecked
 
            if let Method::Symbolic(symbolic) = &call_expr.method {
 
                let definition_id = symbolic.definition.unwrap();
 
                let root_id = ctx.types
 
                    .get_base_definition(&definition_id)
 
                    .unwrap()
 
                    .ast_root;
 

	
 
                queue.push(ResolveQueueElement{
 
                    root_id,
 
                    definition_id,
 
                    monomorph_types,
 
                })
 
                if !ctx.types.has_monomorph(&definition_id, &monomorph_types) {
 
                    let root_id = ctx.types
 
                        .get_base_definition(&definition_id)
 
                        .unwrap()
 
                        .ast_root;
 

	
 
                    // Pre-emptively add the monomorph to the type table, but
 
                    // we still need to perform typechecking on it
 
                    ctx.types.add_monomorph(&definition_id, monomorph_types.clone());
 
                    queue.push(ResolveQueueElement {
 
                        root_id,
 
                        definition_id,
 
                        monomorph_types,
 
                    })
 
                }
 
            }
 
        }
 

	
 
        // Finally, if the currently resolved definition is a monomoprh, then we
 
        // add it to the type table
 
        if !self.poly_vars.is_empty() {
 
            let definition_id = match &self.definition_type {
 
                DefinitionType::None => unreachable!(),
 
                DefinitionType::Function(id) => id.upcast(),
 
                DefinitionType::Component(id) => id.upcast(),
 
            };
 
            ctx.types.instantiate_monomorph(&definition_id, &self.poly_vars)
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_expr(&mut self, ctx: &mut Ctx, id: ExpressionId) -> Result<(), ParseError2> {
 
        match &ctx.heap[id] {
 
            Expression::Assignment(expr) => {
 
                let id = expr.this;
 
                self.progress_assignment_expr(ctx, id)
 
            },
 
            Expression::Conditional(expr) => {
 
                let id = expr.this;
 
                self.progress_conditional_expr(ctx, id)
 
            },
 
            Expression::Binary(expr) => {
 
                let id = expr.this;
 
                self.progress_binary_expr(ctx, id)
 
            },
 
            Expression::Unary(expr) => {
 
                let id = expr.this;
 
                self.progress_unary_expr(ctx, id)
 
            },
 
            Expression::Indexing(expr) => {
 
                let id = expr.this;
 
                self.progress_indexing_expr(ctx, id)
 
            },
 
            Expression::Slicing(expr) => {
 
                let id = expr.this;
 
                self.progress_slicing_expr(ctx, id)
 
            },
 
            Expression::Select(expr) => {
 
                let id = expr.this;
 
                self.progress_select_expr(ctx, id)
 
            },
 
            Expression::Array(expr) => {
 
                let id = expr.this;
 
                self.progress_array_expr(ctx, id)
 
            },
 
            Expression::Constant(expr) => {
 
                let id = expr.this;
 
                self.progress_constant_expr(ctx, id)
 
            },
 
            Expression::Call(expr) => {
 
                let id = expr.this;
 
                self.progress_call_expr(ctx, id)
 
            },
 
            Expression::Variable(expr) => {
 
                let id = expr.this;
 
                self.progress_variable_expr(ctx, id)
 
            }
 
        }
 
    }
 

	
 
    fn progress_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> Result<(), ParseError2> {
 
        use AssignmentOperator as AO;
 

	
 
        // TODO: Assignable check
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.left;
 
        let arg2_expr_id = expr.right;
 

	
 
        debug_log!("Assignment expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let progress_base = match expr.operation {
 
            AO::Set =>
 
                false,
 
            AO::Multiplied | AO::Divided | AO::Added | AO::Subtracted =>
 
                self.apply_forced_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?,
 
            AO::Remained | AO::ShiftedLeft | AO::ShiftedRight |
 
            AO::BitwiseAnded | AO::BitwiseXored | AO::BitwiseOred =>
 
                self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?,
 
        };
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = self.apply_equal3_constraint(
 
            ctx, upcast_id, arg1_expr_id, arg2_expr_id, 0
 
        )?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_base || progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 

	
 
        if progress_base || progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> Result<(), ParseError2> {
 
        // Note: test expression type is already enforced
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.true_expression;
 
        let arg2_expr_id = expr.false_expression;
 

	
 
        debug_log!("Conditional expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = self.apply_equal3_constraint(
 
            ctx, upcast_id, arg1_expr_id, arg2_expr_id, 0
 
        )?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> Result<(), ParseError2> {
 
        // Note: our expression type might be fixed by our parent, but we still
 
        // need to make sure it matches the type associated with our operation.
 
        use BinaryOperator as BO;
 

	
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_id = expr.left;
 
        let arg2_id = expr.right;
 

	
 
        debug_log!("Binary expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = match expr.operation {
 
            BO::Concatenate => {
 
                // Arguments may be arrays/slices, output is always an array
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, arg1_id, &ARRAYLIKE_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, arg2_id, &ARRAYLIKE_TEMPLATE)?;
 

	
 
                // If they're all arraylike, then we want the subtype to match
 
                let (subtype_expr, subtype_arg1, subtype_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 1)?;
 

	
 
                (progress_expr || subtype_expr, progress_arg1 || subtype_arg1, progress_arg2 || subtype_arg2)
 
            },
 
            BO::LogicalOr | BO::LogicalAnd => {
 
                // Forced boolean on all
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, arg1_id, &BOOL_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, arg2_id, &BOOL_TEMPLATE)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::BitwiseOr | BO::BitwiseXor | BO::BitwiseAnd | BO::Remainder | BO::ShiftLeft | BO::ShiftRight => {
 
                // All equal of integer type
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg1, progress_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg1, progress_base || progress_arg2)
 
            },
 
            BO::Equality | BO::Inequality => {
 
                // Equal2 on args, forced boolean output
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let (progress_arg1, progress_arg2) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, arg1_id, 0, arg2_id, 0)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::LessThan | BO::GreaterThan | BO::LessThanEqual | BO::GreaterThanEqual => {
 
                // Equal2 on args with numberlike type, forced boolean output
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg_base = self.apply_forced_constraint(ctx, arg1_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_arg1, progress_arg2) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, arg1_id, 0, arg2_id, 0)?;
 

	
 
                (progress_expr, progress_arg_base || progress_arg1, progress_arg_base || progress_arg2)
 
            },
 
            BO::Add | BO::Subtract | BO::Multiply | BO::Divide => {
 
                // All equal of number type
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg1, progress_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg1, progress_base || progress_arg2)
 
@@ -1507,1173 +1562,1250 @@ impl TypeResolvingVisitor {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg_id = expr.expression;
 

	
 
        debug_log!("Unary expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg  type: {}", self.expr_types.get(&arg_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_expr, progress_arg) = match expr.operation {
 
            UO::Positive | UO::Negative => {
 
                // Equal types of numeric class
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, arg_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg)
 
            },
 
            UO::BitwiseNot | UO::PreIncrement | UO::PreDecrement | UO::PostIncrement | UO::PostDecrement => {
 
                // Equal types of integer class
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, arg_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg)
 
            },
 
            UO::LogicalNot => {
 
                // Both booleans
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                (progress_expr, progress_arg)
 
            }
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg  type [{}]: {}", progress_arg, self.expr_types.get(&arg_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg { self.queue_expr(arg_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 
        let index_id = expr.index;
 

	
 
        debug_log!("Indexing expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Index   type: {}", self.expr_types.get(&index_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Make sure subject is arraylike and index is integerlike
 
        let progress_subject_base = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
        let progress_index = self.apply_forced_constraint(ctx, index_id, &INTEGERLIKE_TEMPLATE)?;
 

	
 
        // Make sure if output is of T then subject is Array<T>
 
        let (progress_expr, progress_subject) =
 
            self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, subject_id, 1)?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject_base || progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Index   type [{}]: {}", progress_index, self.expr_types.get(&index_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_subject_base || progress_subject { self.queue_expr(subject_id); }
 
        if progress_index { self.queue_expr(index_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 
        let from_id = expr.from_index;
 
        let to_id = expr.to_index;
 

	
 
        debug_log!("Slicing expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - FromIdx type: {}", self.expr_types.get(&from_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - ToIdx   type: {}", self.expr_types.get(&to_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Make sure subject is arraylike and indices are of equal integerlike
 
        let progress_subject_base = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
        let progress_idx_base = self.apply_forced_constraint(ctx, from_id, &INTEGERLIKE_TEMPLATE)?;
 
        let (progress_from, progress_to) = self.apply_equal2_constraint(ctx, upcast_id, from_id, 0, to_id, 0)?;
 

	
 
        // Make sure if output is of T then subject is Array<T>
 
        let (progress_expr, progress_subject) =
 
            self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, subject_id, 1)?;
 

	
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject_base || progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - FromIdx type [{}]: {}", progress_idx_base || progress_from, self.expr_types.get(&from_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - ToIdx   type [{}]: {}", progress_idx_base || progress_to, self.expr_types.get(&to_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_subject_base || progress_subject { self.queue_expr(subject_id); }
 
        if progress_idx_base || progress_from { self.queue_expr(from_id); }
 
        if progress_idx_base || progress_to { self.queue_expr(to_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 

	
 
        debug_log!("Select expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_subject, progress_expr) = match &expr.field {
 
            Field::Length => {
 
                let progress_subject = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                (progress_subject, progress_expr)
 
            },
 
            Field::Symbolic(_field) => {
 
                todo!("implement select expr for symbolic fields");
 
            }
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_subject { self.queue_expr(subject_id); }
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_array_expr(&mut self, ctx: &mut Ctx, id: ArrayExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let expr_elements = expr.elements.clone(); // TODO: @performance
 

	
 
        debug_log!("Array expr ({} elements): {}", expr_elements.len(), upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // All elements should have an equal type
 
        let progress = self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements)?;
 
        let mut any_progress = false;
 
        for (progress_arg, arg_id) in progress.iter().zip(expr_elements.iter()) {
 
            if *progress_arg {
 
                any_progress = true;
 
                self.queue_expr(*arg_id);
 
            }
 
        }
 

	
 
        // And the output should be an array of the element types
 
        let mut expr_progress = self.apply_forced_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
        if !expr_elements.is_empty() {
 
            let first_arg_id = expr_elements[0];
 
            let (inner_expr_progress, arg_progress) = self.apply_equal2_constraint(
 
                ctx, upcast_id, upcast_id, 1, first_arg_id, 0
 
            )?;
 

	
 
            expr_progress = expr_progress || inner_expr_progress;
 

	
 
            // Note that if the array type progressed the type of the arguments,
 
            // then we should enqueue this progression function again
 
            // TODO: @fix Make apply_equal_n accept a start idx as well
 
            if arg_progress { self.queue_expr(upcast_id); }
 
        }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Expr type [{}]: {}", expr_progress, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if expr_progress { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_constant_expr(&mut self, ctx: &mut Ctx, id: ConstantExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let template = match &expr.value {
 
            Constant::Null => &MESSAGE_TEMPLATE,
 
            Constant::Integer(_) => &INTEGERLIKE_TEMPLATE,
 
            Constant::True | Constant::False => &BOOL_TEMPLATE,
 
            Constant::Null => &MESSAGE_TEMPLATE[..],
 
            Constant::Integer(_) => &INTEGERLIKE_TEMPLATE[..],
 
            Constant::True | Constant::False => &BOOL_TEMPLATE[..],
 
            Constant::Character(_) => todo!("character literals")
 
        };
 

	
 
        let progress = self.apply_forced_constraint(ctx, upcast_id, template)?;
 
        if progress { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    // TODO: @cleanup, see how this can be cleaned up once I implement
 
    //  polymorphic struct/enum/union literals. These likely follow the same
 
    //  pattern as here.
 
    fn progress_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let extra = self.extra_data.get_mut(&upcast_id).unwrap();
 

	
 
        debug_log!("Call expr '{}': {}", match &expr.method {
 
            Method::Create => String::from("create"),
 
            Method::Fires => String::from("fires"),
 
            Method::Get => String::from("get"),
 
            Method::Put => String::from("put"),
 
            Method::Symbolic(method) => String::from_utf8_lossy(&method.identifier.value).to_string()
 
        },upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 
        debug_log!(" * During (inferring types from arguments and return type):");
 

	
 
        // Check if we can make progress using the arguments and/or return types
 
        // while keeping track of the polyvars we've extended
 
        let mut poly_progress = HashSet::new();
 
        debug_assert_eq!(extra.embedded.len(), expr.arguments.len());
 
        let mut poly_infer_error = false;
 

	
 
        for (arg_idx, arg_id) in expr.arguments.clone().into_iter().enumerate() {
 
            let signature_type = &mut extra.embedded[arg_idx];
 
            let argument_type: *mut _ = self.expr_types.get_mut(&arg_id).unwrap();
 
            let (progress_sig, progress_arg) = Self::apply_equal2_constraint_types(
 
                ctx, upcast_id, signature_type, 0, argument_type, 0
 
            let (progress_sig, progress_arg) = Self::apply_equal2_signature_constraint(
 
                ctx, upcast_id, Some(arg_id), signature_type, 0, argument_type, 0
 
            )?;
 

	
 
            debug_log!("   - Arg {} type | sig: {}, arg: {}", arg_idx, signature_type.display_name(&ctx.heap), unsafe{&*argument_type}.display_name(&ctx.heap));
 

	
 
            if progress_sig {
 
                // Progressed signature, so also apply inference to the 
 
                // polymorph types using the markers 
 
                debug_assert!(signature_type.has_marker, "progress on signature argument type without markers");
 
                for (poly_idx, poly_section) in signature_type.marker_iter() {
 
                debug_assert!(signature_type.has_body_marker, "progress on signature argument type without markers");
 
                for (poly_idx, poly_section) in signature_type.body_marker_iter() {
 
                    let polymorph_type = &mut extra.poly_vars[poly_idx];
 
                    match Self::apply_forced_constraint_types(
 
                        polymorph_type, 0, poly_section, 0
 
                    ) {
 
                        Ok(true) => { poly_progress.insert(poly_idx); },
 
                        Ok(false) => {},
 
                        Err(()) => { poly_infer_error = true; }
 
                    }
 

	
 
                    debug_log!("   - Poly {} type | sig: {}, arg: {}", poly_idx, polymorph_type.display_name(&ctx.heap), InferenceType::partial_display_name(&ctx.heap, poly_section));
 
                }
 
            }
 
            if progress_arg {
 
                // Progressed argument expression
 
                self.expr_queued.insert(arg_id);
 
            }
 
        }
 

	
 
        // Do the same for the return type
 
        let signature_type = &mut extra.returned;
 
        let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 
        let (progress_sig, progress_expr) = Self::apply_equal2_constraint_types(
 
            ctx, upcast_id, signature_type, 0, expr_type, 0
 
        let (progress_sig, progress_expr) = Self::apply_equal2_signature_constraint(
 
            ctx, upcast_id, None, signature_type, 0, expr_type, 0
 
        )?;
 

	
 
        debug_log!("   - Ret type | sig: {}, arg: {}", signature_type.display_name(&ctx.heap), unsafe{&*expr_type}.display_name(&ctx.heap));
 

	
 
        if progress_sig {
 
            // As above: apply inference to polyargs as well
 
            debug_assert!(signature_type.has_marker, "progress on signature return type without markers");
 
            for (poly_idx, poly_section) in signature_type.marker_iter() {
 
            debug_assert!(signature_type.has_body_marker, "progress on signature return type without markers");
 
            for (poly_idx, poly_section) in signature_type.body_marker_iter() {
 
                let polymorph_type = &mut extra.poly_vars[poly_idx];
 
                match Self::apply_forced_constraint_types(
 
                    polymorph_type, 0, poly_section, 0
 
                ) {
 
                    Ok(true) => { poly_progress.insert(poly_idx); },
 
                    Ok(false) => {},
 
                    Err(()) => { poly_infer_error = true; }
 
                }
 
                debug_log!("   - Poly {} type | sig: {}, arg: {}", poly_idx, polymorph_type.display_name(&ctx.heap), InferenceType::partial_display_name(&ctx.heap, poly_section));
 
            }
 
        }
 
        if progress_expr {
 
            if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                self.expr_queued.insert(parent_id);
 
            }
 
        }
 

	
 
        // If we had an error in the polymorphic variable's inference, then we
 
        // need to provide a human readable error: find a pair of inference
 
        // types in the arguments/return type that do not agree on the
 
        // polymorphic variable's type
 
        if poly_infer_error { return Err(self.construct_poly_arg_error(ctx, id)) }
 

	
 
        // If we did not have an error in the polymorph inference above, then
 
        // reapplying the polymorph type to each argument type and the return
 
        // type should always succeed.
 
        debug_log!(" * During (reinferring from progress polyvars):");
 
        // TODO: @performance If the algorithm is changed to be more "on demand
 
        //  argument re-evaluation", instead of "all-argument re-evaluation",
 
        //  then this is no longer true
 
        for poly_idx in poly_progress.into_iter() {
 
            // For each polymorphic argument: first extend the signature type,
 
            // then reapply the equal2 constraint to the expressions
 
            let poly_type = &extra.poly_vars[poly_idx];
 
            for (arg_idx, sig_type) in extra.embedded.iter_mut().enumerate() {
 
                let mut seek_idx = 0;
 
                let mut modified_sig = false;
 
                while let Some((start_idx, end_idx)) = sig_type.find_subtree_idx_for_marker(poly_idx, seek_idx) {
 
                while let Some((start_idx, end_idx)) = sig_type.find_subtree_idx_for_part(
 
                    InferenceTypePart::MarkerBody(poly_idx), seek_idx
 
                ) {
 
                    let modified_at_marker = Self::apply_forced_constraint_types(
 
                        sig_type, start_idx, &poly_type.parts, 0
 
                    ).unwrap();
 
                    modified_sig = modified_sig || modified_at_marker;
 
                    seek_idx = end_idx;
 
                }
 

	
 
                if !modified_sig {
 
                    debug_log!("   - Poly {} | Arg {} type | signature has not changed", poly_idx, arg_idx);
 
                    continue;
 
                }
 

	
 
                // Part of signature was modified, so update expression used as
 
                // argument as well
 
                let arg_expr_id = expr.arguments[arg_idx];
 
                let arg_type: *mut _ = self.expr_types.get_mut(&arg_expr_id).unwrap();
 
                let (progress_arg, _) = Self::apply_equal2_constraint_types(
 
                    ctx, arg_expr_id, arg_type, 0, sig_type, 0
 
                let (_, progress_arg) = Self::apply_equal2_signature_constraint(
 
                    ctx, arg_expr_id, Some(arg_expr_id), sig_type, 0, arg_type, 0
 
                ).expect("no inference error at argument type");
 
                if progress_arg { self.expr_queued.insert(arg_expr_id); }
 
                debug_log!("   - Poly {} | Arg {} type | sig: {}, arg: {}", poly_idx, arg_idx, sig_type.display_name(&ctx.heap), unsafe{&*arg_type}.display_name(&ctx.heap));
 
            }
 

	
 
            // Again: do the same for the return type
 
            let sig_type = &mut extra.returned;
 
            let mut seek_idx = 0;
 
            let mut modified_sig = false;
 
            while let Some((start_idx, end_idx)) = sig_type.find_subtree_idx_for_marker(poly_idx, seek_idx) {
 
            while let Some((start_idx, end_idx)) = sig_type.find_subtree_idx_for_part(
 
                InferenceTypePart::MarkerBody(poly_idx), seek_idx
 
            ) {
 
                let modified_at_marker = Self::apply_forced_constraint_types(
 
                    sig_type, start_idx, &poly_type.parts, 0
 
                ).unwrap();
 
                modified_sig = modified_sig || modified_at_marker;
 
                seek_idx = end_idx;
 
            }
 

	
 
            if modified_sig {
 
                let ret_type = self.expr_types.get_mut(&upcast_id).unwrap();
 
                let (progress_ret, _) = Self::apply_equal2_constraint_types(
 
                    ctx, upcast_id, ret_type, 0, sig_type, 0
 
                let (_, progress_ret) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, sig_type, 0, ret_type, 0
 
                ).expect("no inference error at return type");
 
                if progress_ret {
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        self.expr_queued.insert(parent_id);
 
                    }
 
                }
 
                debug_log!("   - Poly {} | Ret type | sig: {}, arg: {}", poly_idx, sig_type.display_name(&ctx.heap), ret_type.display_name(&ctx.heap));
 
            } else {
 
                debug_log!("   - Poly {} | Ret type | signature has not changed", poly_idx);
 
            }
 
        }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> Result<(), ParseError2> {
 
        let upcast_id = id.upcast();
 
        let var_expr = &ctx.heap[id];
 
        let var_id = var_expr.declaration.unwrap();
 

	
 
        debug_log!("Variable expr '{}': {}", &String::from_utf8_lossy(&ctx.heap[var_id].identifier().value), upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Var  type: {}", self.var_types.get(&var_id).unwrap().var_type.display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Retrieve shared variable type and expression type and apply inference
 
        let var_data = self.var_types.get_mut(&var_id).unwrap();
 
        let expr_type = self.expr_types.get_mut(&upcast_id).unwrap();
 

	
 
        let infer_res = unsafe{ InferenceType::infer_subtrees_for_both_types(
 
            &mut var_data.var_type as *mut _, 0, expr_type, 0
 
        ) };
 
        if infer_res == DualInferenceResult::Incompatible {
 
            let var_decl = &ctx.heap[var_id];
 
            return Err(ParseError2::new_error(
 
                &ctx.module.source, var_decl.position(),
 
                &format!(
 
                    "Conflicting types for this variable, previously assigned the type '{}'",
 
                    var_data.var_type.display_name(&ctx.heap)
 
                )
 
            ).with_postfixed_info(
 
                &ctx.module.source, var_expr.position,
 
                &format!(
 
                    "But inferred to have incompatible type '{}' here",
 
                    expr_type.display_name(&ctx.heap)
 
                )
 
            ))
 
        }
 

	
 
        let progress_var = infer_res.modified_lhs();
 
        let progress_expr = infer_res.modified_rhs();
 

	
 
        if progress_var {
 
            // Let other variable expressions using this type progress as well
 
            for other_expr in var_data.used_at.iter() {
 
                if *other_expr != upcast_id {
 
                    self.expr_queued.insert(*other_expr);
 
                }
 
            }
 

	
 
            // Let a linked port know that our type has updated
 
            if let Some(linked_id) = var_data.linked_var {
 
                // Only perform one-way inference to prevent updating our type, this
 
                // would lead to an inconsistency
 
                let var_type: *mut _ = &mut var_data.var_type;
 
                let mut link_data = self.var_types.get_mut(&linked_id).unwrap();
 

	
 
                debug_assert!(
 
                    unsafe{&*var_type}.parts[0] == InferenceTypePart::Input ||
 
                    unsafe{&*var_type}.parts[0] == InferenceTypePart::Output
 
                );
 
                debug_assert!(
 
                    link_data.var_type.parts[0] == InferenceTypePart::Input ||
 
                    link_data.var_type.parts[0] == InferenceTypePart::Output
 
                );
 
                match InferenceType::infer_subtree_for_single_type(&mut link_data.var_type, 1, &unsafe{&*var_type}.parts, 1) {
 
                    SingleInferenceResult::Modified => {
 
                        for other_expr in &link_data.used_at {
 
                            self.expr_queued.insert(*other_expr);
 
                        }
 
                    },
 
                    SingleInferenceResult::Unmodified => {},
 
                    SingleInferenceResult::Incompatible => {
 
                        let var_data = self.var_types.get(&var_id).unwrap();
 
                        let link_data = self.var_types.get(&linked_id).unwrap();
 
                        let var_decl = &ctx.heap[var_id];
 
                        let link_decl = &ctx.heap[linked_id];
 

	
 
                        return Err(ParseError2::new_error(
 
                            &ctx.module.source, var_decl.position(),
 
                            &format!(
 
                                "Conflicting types for this variable, assigned the type '{}'",
 
                                var_data.var_type.display_name(&ctx.heap)
 
                            )
 
                        ).with_postfixed_info(
 
                            &ctx.module.source, link_decl.position(),
 
                            &format!(
 
                                "Because it is incompatible with this variable, assigned the type '{}'",
 
                                link_data.var_type.display_name(&ctx.heap)
 
                            )
 
                        ));
 
                    }
 
                }
 
            }
 
        }
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Var  type [{}]: {}", progress_var, self.var_types.get(&var_id).unwrap().var_type.display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 

	
 
        Ok(())
 
    }
 

	
 
    fn queue_expr_parent(&mut self, ctx: &Ctx, expr_id: ExpressionId) {
 
        if let ExpressionParent::Expression(parent_expr_id, _) = &ctx.heap[expr_id].parent() {
 
            self.expr_queued.insert(*parent_expr_id);
 
        }
 
    }
 

	
 
    fn queue_expr(&mut self, expr_id: ExpressionId) {
 
        self.expr_queued.insert(expr_id);
 
    }
 

	
 
    /// Applies a forced type constraint: the type associated with the supplied
 
    /// expression will be molded into the provided "template". The template may
 
    /// be fully specified (e.g. a bool) or contain "inference" variables (e.g.
 
    /// an array of T)
 
    fn apply_forced_constraint(
 
        &mut self, ctx: &mut Ctx, expr_id: ExpressionId, template: &[InferenceTypePart]
 
    ) -> Result<bool, ParseError2> {
 
        debug_assert_expr_ids_unique_and_known!(self, expr_id);
 
        let expr_type = self.expr_types.get_mut(&expr_id).unwrap();
 
        match InferenceType::infer_subtree_for_single_type(expr_type, 0, template, 0) {
 
            SingleInferenceResult::Modified => Ok(true),
 
            SingleInferenceResult::Unmodified => Ok(false),
 
            SingleInferenceResult::Incompatible => Err(
 
                self.construct_template_type_error(ctx, expr_id, template)
 
            )
 
        }
 
    }
 

	
 
    fn apply_forced_constraint_types(
 
        to_infer: *mut InferenceType, to_infer_start_idx: usize,
 
        template: &[InferenceTypePart], template_start_idx: usize
 
    ) -> Result<bool, ()> {
 
        match InferenceType::infer_subtree_for_single_type(
 
            unsafe{ &mut *to_infer }, to_infer_start_idx,
 
            template, template_start_idx
 
        ) {
 
            SingleInferenceResult::Modified => Ok(true),
 
            SingleInferenceResult::Unmodified => Ok(false),
 
            SingleInferenceResult::Incompatible => Err(()),
 
        }
 
    }
 

	
 
    /// Applies a type constraint that expects the two provided types to be
 
    /// equal. We attempt to make progress in inferring the types. If the call
 
    /// is successful then the composition of all types are made equal.
 
    /// The "parent" `expr_id` is provided to construct errors.
 
    fn apply_equal2_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg1_start_idx: usize,
 
        arg2_id: ExpressionId, arg2_start_idx: usize
 
    ) -> Result<(bool, bool), ParseError2> {
 
        debug_assert_expr_ids_unique_and_known!(self, arg1_id, arg2_id);
 
        let arg1_type: *mut _ = self.expr_types.get_mut(&arg1_id).unwrap();
 
        let arg2_type: *mut _ = self.expr_types.get_mut(&arg2_id).unwrap();
 

	
 
        let infer_res = unsafe{ InferenceType::infer_subtrees_for_both_types(
 
            arg1_type, arg1_start_idx,
 
            arg2_type, arg2_start_idx
 
        ) };
 
        if infer_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        Ok((infer_res.modified_lhs(), infer_res.modified_rhs()))
 
    }
 

	
 
    fn apply_equal2_constraint_types(
 
        ctx: &Ctx, expr_id: ExpressionId,
 
        type1: *mut InferenceType, type1_start_idx: usize, 
 
        type2: *mut InferenceType, type2_start_idx: usize
 
    fn apply_equal2_signature_constraint(
 
        ctx: &Ctx, outer_expr_id: ExpressionId, expr_id: Option<ExpressionId>,
 
        signature_type: *mut InferenceType, signature_start_idx: usize,
 
        expression_type: *mut InferenceType, expression_start_idx: usize
 
    ) -> Result<(bool, bool), ParseError2> {
 
        debug_assert_ptrs_distinct!(type1, type2);
 
        debug_assert_ptrs_distinct!(signature_type, expression_type);
 
        let infer_res = unsafe { 
 
            InferenceType::infer_subtrees_for_both_types(
 
                type1, type1_start_idx, 
 
                type2, type2_start_idx
 
                signature_type, signature_start_idx,
 
                expression_type, expression_start_idx
 
            ) 
 
        };
 

	
 
        if infer_res == DualInferenceResult::Incompatible {
 
            // TODO: Check if I still need to use this
 
            let outer_position = ctx.heap[outer_expr_id].position();
 
            let (position_name, position) = match expr_id {
 
                Some(expr_id) => ("argument's", ctx.heap[expr_id].position()),
 
                None => ("return type's", outer_position)
 
            };
 
            let (signature_display_type, expression_display_type) = unsafe { (
 
                (&*signature_type).display_name(&ctx.heap),
 
                (&*expression_type).display_name(&ctx.heap)
 
            ) };
 

	
 
            return Err(ParseError2::new_error(
 
                &ctx.module.source, ctx.heap[expr_id].position(),
 
                "TODO: Write me, apply_equal2_constraint_types"
 
                &ctx.module.source, outer_position,
 
                "Failed to fully resolve the types of this expression"
 
            ).with_postfixed_info(
 
                &ctx.module.source, position,
 
                &format!(
 
                    "Because the {} signature has been resolved to '{}', but the expression has been resolved to '{}'",
 
                    position_name, signature_display_type, expression_display_type
 
                )
 
            ));
 
        }
 

	
 
        Ok((infer_res.modified_lhs(), infer_res.modified_rhs()))
 
    }
 

	
 
    /// Applies a type constraint that expects all three provided types to be
 
    /// equal. In case we can make progress in inferring the types then we
 
    /// attempt to do so. If the call is successful then the composition of all
 
    /// types is made equal.
 
    fn apply_equal3_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg2_id: ExpressionId,
 
        start_idx: usize
 
    ) -> Result<(bool, bool, bool), ParseError2> {
 
        // Safety: all expression IDs are always distinct, and we do not modify
 
        //  the container
 
        debug_assert_expr_ids_unique_and_known!(self, expr_id, arg1_id, arg2_id);
 
        let expr_type: *mut _ = self.expr_types.get_mut(&expr_id).unwrap();
 
        let arg1_type: *mut _ = self.expr_types.get_mut(&arg1_id).unwrap();
 
        let arg2_type: *mut _ = self.expr_types.get_mut(&arg2_id).unwrap();
 

	
 
        let expr_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(expr_type, start_idx, arg1_type, start_idx)
 
        };
 
        if expr_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_expr_type_error(ctx, expr_id, arg1_id));
 
        }
 

	
 
        let args_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(arg1_type, start_idx, arg2_type, start_idx) };
 
        if args_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        // If all types are compatible, but the second call caused the arg1_type
 
        // to be expanded, then we must also assign this to expr_type.
 
        let mut progress_expr = expr_res.modified_lhs();
 
        let mut progress_arg1 = expr_res.modified_rhs();
 
        let mut progress_arg2 = args_res.modified_rhs();
 
        let progress_arg2 = args_res.modified_rhs();
 

	
 
        if args_res.modified_lhs() { 
 
            unsafe {
 
                let end_idx = InferenceType::find_subtree_end_idx(&(*arg2_type).parts, start_idx);
 
                let subtree = &((*arg2_type).parts[start_idx..end_idx]);
 
                (*expr_type).replace_subtree(start_idx, subtree);
 
            }
 
            progress_expr = true;
 
            progress_arg1 = true;
 
        }
 

	
 
        Ok((progress_expr, progress_arg1, progress_arg2))
 
    }
 

	
 
    // TODO: @optimize Since we only deal with a single type this might be done
 
    //  a lot more efficiently, methinks (disregarding the allocations here)
 
    fn apply_equal_n_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId, args: &[ExpressionId],
 
    ) -> Result<Vec<bool>, ParseError2> {
 
        // Early exit
 
        match args.len() {
 
            0 => return Ok(vec!()),         // nothing to progress
 
            1 => return Ok(vec![false]),    // only one type, so nothing to infer
 
            _ => {}
 
        }
 

	
 
        let mut progress = Vec::new();
 
        progress.resize(args.len(), false);
 

	
 
        // Do pairwise inference, keep track of the last entry we made progress
 
        // on. Once done we need to update everything to the most-inferred type.
 
        let mut arg_iter = args.iter();
 
        let mut last_arg_id = *arg_iter.next().unwrap();
 
        let mut last_lhs_progressed = 0;
 
        let mut lhs_arg_idx = 0;
 

	
 
        while let Some(next_arg_id) = arg_iter.next() {
 
            let arg1_type: *mut _ = self.expr_types.get_mut(&last_arg_id).unwrap();
 
            let arg2_type: *mut _ = self.expr_types.get_mut(next_arg_id).unwrap();
 

	
 
            let res = unsafe {
 
                InferenceType::infer_subtrees_for_both_types(arg1_type, 0, arg2_type, 0)
 
            };
 

	
 
            if res == DualInferenceResult::Incompatible {
 
                return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, *next_arg_id));
 
            }
 

	
 
            if res.modified_lhs() {
 
                // We re-inferred something on the left hand side, so everything
 
                // up until now should be re-inferred.
 
                progress[lhs_arg_idx] = true;
 
                last_lhs_progressed = lhs_arg_idx;
 
            }
 
            progress[lhs_arg_idx + 1] = res.modified_rhs();
 

	
 
            last_arg_id = *next_arg_id;
 
            lhs_arg_idx += 1;
 
        }
 

	
 
        // Re-infer everything. Note that we do not need to re-infer the type
 
        // exactly at `last_lhs_progressed`, but only everything up to it.
 
        let last_type: *mut _ = self.expr_types.get_mut(args.last().unwrap()).unwrap();
 
        for arg_idx in 0..last_lhs_progressed {
 
            let arg_type: *mut _ = self.expr_types.get_mut(&args[arg_idx]).unwrap();
 
            unsafe{
 
                (*arg_type).replace_subtree(0, &(*last_type).parts);
 
            }
 
            progress[arg_idx] = true;
 
        }
 

	
 
        Ok(progress)
 
    }
 

	
 
    /// Determines the `InferenceType` for the expression based on the
 
    /// expression parent. Note that if the parent is another expression, we do
 
    /// not take special action, instead we let parent expressions fix the type
 
    /// of subexpressions before they have a chance to call this function.
 
    /// Hence: if the expression type is already set, this function doesn't do
 
    /// anything.
 
    fn insert_initial_expr_inference_type(
 
        &mut self, ctx: &mut Ctx, expr_id: ExpressionId
 
    ) -> Result<(), ParseError2> {
 
        use ExpressionParent as EP;
 
        use InferenceTypePart as ITP;
 

	
 
        let expr = &ctx.heap[expr_id];
 
        let inference_type = match expr.parent() {
 
            EP::None =>
 
                // Should have been set by linker
 
                unreachable!(),
 
            EP::ExpressionStmt(_) | EP::Expression(_, _) =>
 
                // Determined during type inference
 
                InferenceType::new(false, false, vec![ITP::Unknown]),
 
            EP::If(_) | EP::While(_) | EP::Assert(_) =>
 
                // Must be a boolean
 
                InferenceType::new(false, true, vec![ITP::Bool]),
 
            EP::Return(_) =>
 
                // Must match the return type of the function
 
                if let DefinitionType::Function(func_id) = self.definition_type {
 
                    let return_parser_type_id = ctx.heap[func_id].return_type;
 
                    self.determine_inference_type_from_parser_type(ctx, return_parser_type_id, true)
 
                } else {
 
                    // Cannot happen: definition always set upon body traversal
 
                    // and "return" calls in components are illegal.
 
                    unreachable!();
 
                },
 
            EP::New(_) =>
 
                // Must be a component call, which we assign a "Void" return
 
                // type
 
                InferenceType::new(false, true, vec![ITP::Void]),
 
        };
 

	
 
        match self.expr_types.entry(expr_id) {
 
            Entry::Vacant(vacant) => {
 
                vacant.insert(inference_type);
 
            },
 
            Entry::Occupied(mut preexisting) => {
 
                // We already have an entry, this happens if our parent fixed
 
                // our type (e.g. we're used in a conditional expression's test)
 
                // but we have a different type.
 
                // TODO: Is this ever called? Seems like it can't
 
                debug_assert!(false, "I am actually called, my ID is {}", expr_id.index);
 
                let old_type = preexisting.get_mut();
 
                if let SingleInferenceResult::Incompatible = InferenceType::infer_subtree_for_single_type(
 
                    old_type, 0, &inference_type.parts, 0
 
                ) {
 
                    return Err(self.construct_expr_type_error(ctx, expr_id, expr_id))
 
                }
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn insert_initial_call_polymorph_data(
 
        &mut self, ctx: &mut Ctx, call_id: CallExpressionId
 
    ) {
 
        use InferenceTypePart as ITP;
 

	
 
        // Note: the polymorph variables may be partially specified and may
 
        // contain references to the wrapping definition's (i.e. the proctype
 
        // we are currently visiting) polymorphic arguments.
 
        //
 
        // The arguments of the call may refer to polymorphic variables in the
 
        // definition of the function we're calling, not of the wrapping
 
        // definition. We insert markers in these inferred types to be able to
 
        // map them back and forth to the polymorphic arguments of the function
 
        // we are calling.
 
        let call = &ctx.heap[call_id];
 

	
 
        // Handle the polymorphic variables themselves
 
        let mut poly_vars = Vec::with_capacity(call.poly_args.len());
 
        for poly_arg_type_id in call.poly_args.clone() { // TODO: @performance
 
            poly_vars.push(self.determine_inference_type_from_parser_type(ctx, poly_arg_type_id, true));
 
        }
 

	
 
        // Handle the arguments
 
        // TODO: @cleanup: Maybe factor this out for reuse in the validator/linker, should also
 
        //  make the code slightly more robust.
 
        let (embedded_types, return_type) = match &call.method {
 
            Method::Create => {
 
                // Not polymorphic
 
                unreachable!("insert initial polymorph data for builtin 'create()' call")
 
                (
 
                    vec![InferenceType::new(false, true, vec![ITP::Int])],
 
                    InferenceType::new(false, true, vec![ITP::Message, ITP::Byte])
 
                )
 
            },
 
            Method::Fires => {
 
                // bool fires<T>(PortLike<T> arg)
 
                (
 
                    vec![InferenceType::new(true, false, vec![ITP::PortLike, ITP::Marker(0), ITP::Unknown])],
 
                    vec![InferenceType::new(true, false, vec![ITP::PortLike, ITP::MarkerBody(0), ITP::Unknown])],
 
                    InferenceType::new(false, true, vec![ITP::Bool])
 
                )
 
            },
 
            Method::Get => {
 
                // T get<T>(input<T> arg)
 
                (
 
                    vec![InferenceType::new(true, false, vec![ITP::Input, ITP::Marker(0), ITP::Unknown])],
 
                    InferenceType::new(true, false, vec![ITP::Marker(0), ITP::Unknown])
 
                    vec![InferenceType::new(true, false, vec![ITP::Input, ITP::MarkerBody(0), ITP::Unknown])],
 
                    InferenceType::new(true, false, vec![ITP::MarkerBody(0), ITP::Unknown])
 
                )
 
            },
 
            Method::Put => {
 
                // void Put<T>(output<T> port, T msg)
 
                (
 
                    vec![
 
                        InferenceType::new(true, false, vec![ITP::Output, ITP::Marker(0), ITP::Unknown]),
 
                        InferenceType::new(true, false, vec![ITP::Marker(0), ITP::Unknown])
 
                        InferenceType::new(true, false, vec![ITP::Output, ITP::MarkerBody(0), ITP::Unknown]),
 
                        InferenceType::new(true, false, vec![ITP::MarkerBody(0), ITP::Unknown])
 
                    ],
 
                    InferenceType::new(false, true, vec![ITP::Void])
 
                )
 
            }
 
            Method::Symbolic(symbolic) => {
 
                let definition = &ctx.heap[symbolic.definition.unwrap()];
 

	
 
                match definition {
 
                    Definition::Component(definition) => {
 
                        let mut parameter_types = Vec::with_capacity(definition.parameters.len());
 
                        for param_id in definition.parameters.clone() {
 
                            let param = &ctx.heap[param_id];
 
                            let param_parser_type_id = param.parser_type;
 
                            parameter_types.push(self.determine_inference_type_from_parser_type(ctx, param_parser_type_id, false));
 
                        }
 

	
 
                        (parameter_types, InferenceType::new(false, true, vec![InferenceTypePart::Void]))
 
                    },
 
                    Definition::Function(definition) => {
 
                        let mut parameter_types = Vec::with_capacity(definition.parameters.len());
 
                        for param_id in definition.parameters.clone() {
 
                            let param = &ctx.heap[param_id];
 
                            let param_parser_type_id = param.parser_type;
 
                            parameter_types.push(self.determine_inference_type_from_parser_type(ctx, param_parser_type_id, false));
 
                        }
 

	
 
                        let return_type = self.determine_inference_type_from_parser_type(ctx, definition.return_type, false);
 
                        (parameter_types, return_type)
 
                    },
 
                    Definition::Struct(_) | Definition::Enum(_) => {
 
                        unreachable!("insert initial polymorph data for struct/enum");
 
                    }
 
                }
 
            }
 
        };
 

	
 
        self.extra_data.insert(call_id.upcast(), ExtraData {
 
            poly_vars,
 
            embedded: embedded_types,
 
            returned: return_type
 
        });
 
    }
 

	
 
    /// Determines the initial InferenceType from the provided ParserType. This
 
    /// may be called with two kinds of intentions:
 
    /// 1. To resolve a ParserType within the body of a function, or on
 
    ///     polymorphic arguments to calls/instantiations within that body. This
 
    ///     means that the polymorphic variables are known and can be replaced
 
    ///     with the monomorph we're instantiating.
 
    /// 2. To resolve a ParserType on a called function's definition or on
 
    ///     an instantiated datatype's members. This means that the polymorphic
 
    ///     arguments inside those ParserTypes refer to the polymorphic
 
    ///     variables in the called/instantiated type's definition.
 
    /// In the second case we place InferenceTypePart::Marker instances such
 
    /// that we can perform type inference on the polymorphic variables.
 
    fn determine_inference_type_from_parser_type(
 
        &mut self, ctx: &Ctx, parser_type_id: ParserTypeId,
 
        parser_type_in_body: bool
 
    ) -> InferenceType {
 
        use ParserTypeVariant as PTV;
 
        use InferenceTypePart as ITP;
 

	
 
        let mut to_consider = VecDeque::with_capacity(16);
 
        to_consider.push_back(parser_type_id);
 

	
 
        let mut infer_type = Vec::new();
 
        let mut has_inferred = false;
 
        let mut has_markers = false;
 

	
 
        while !to_consider.is_empty() {
 
            let parser_type_id = to_consider.pop_front().unwrap();
 
            let parser_type = &ctx.heap[parser_type_id];
 
            match &parser_type.variant {
 
                PTV::Message => { infer_type.push(ITP::Message); },
 
                PTV::Message => {
 
                    /// TODO: @types Remove the Message -> Byte hack at some point...
 
                    infer_type.push(ITP::Message);
 
                    infer_type.push(ITP::Byte);
 
                },
 
                PTV::Bool => { infer_type.push(ITP::Bool); },
 
                PTV::Byte => { infer_type.push(ITP::Byte); },
 
                PTV::Short => { infer_type.push(ITP::Short); },
 
                PTV::Int => { infer_type.push(ITP::Int); },
 
                PTV::Long => { infer_type.push(ITP::Long); },
 
                PTV::String => { infer_type.push(ITP::String); },
 
                PTV::IntegerLiteral => { unreachable!("integer literal type on variable type"); },
 
                PTV::Inferred => {
 
                    infer_type.push(ITP::Unknown);
 
                    has_inferred = true;
 
                },
 
                PTV::Array(subtype_id) => {
 
                    infer_type.push(ITP::Array);
 
                    to_consider.push_front(*subtype_id);
 
                },
 
                PTV::Input(subtype_id) => {
 
                    infer_type.push(ITP::Input);
 
                    to_consider.push_front(*subtype_id);
 
                },
 
                PTV::Output(subtype_id) => {
 
                    infer_type.push(ITP::Output);
 
                    to_consider.push_front(*subtype_id);
 
                },
 
                PTV::Symbolic(symbolic) => {
 
                    debug_assert!(symbolic.variant.is_some(), "symbolic variant not yet determined");
 
                    match symbolic.variant.as_ref().unwrap() {
 
                        SymbolicParserTypeVariant::PolyArg(_, arg_idx) => {
 
                            // Retrieve concrete type of argument and add it to
 
                            // the inference type.
 
                            let arg_idx = *arg_idx;
 
                            debug_assert!(symbolic.poly_args.is_empty()); // TODO: @hkt
 

	
 
                            if parser_type_in_body {
 
                                // Polymorphic argument refers to definition's
 
                                // polymorphic variables
 
                                debug_assert!(arg_idx < self.poly_vars.len());
 
                                debug_assert!(!self.poly_vars[arg_idx].has_marker());
 
                                infer_type.push(ITP::MarkerDefinition(arg_idx));
 
                                for concrete_part in &self.poly_vars[arg_idx].parts {
 
                                    infer_type.push(ITP::from(*concrete_part));
 
                                }
 
                            } else {
 
                                // Polymorphic argument has to be inferred
 
                                has_markers = true;
 
                                has_inferred = true;
 
                                infer_type.push(ITP::Marker(arg_idx));
 
                                infer_type.push(ITP::MarkerBody(arg_idx));
 
                                infer_type.push(ITP::Unknown);
 
                            }
 
                        },
 
                        SymbolicParserTypeVariant::Definition(definition_id) => {
 
                            // TODO: @cleanup
 
                            if cfg!(debug_assertions) {
 
                                let definition = &ctx.heap[*definition_id];
 
                                debug_assert!(definition.is_struct() || definition.is_enum()); // TODO: @function_ptrs
 
                                let num_poly = match definition {
 
                                    Definition::Struct(v) => v.poly_vars.len(),
 
                                    Definition::Enum(v) => v.poly_vars.len(),
 
                                    _ => unreachable!(),
 
                                };
 
                                debug_assert_eq!(symbolic.poly_args.len(), num_poly);
 
                            }
 

	
 
                            infer_type.push(ITP::Instance(*definition_id, symbolic.poly_args.len()));
 
                            let mut poly_arg_idx = symbolic.poly_args.len();
 
                            while poly_arg_idx > 0 {
 
                                poly_arg_idx -= 1;
 
                                to_consider.push_front(symbolic.poly_args[poly_arg_idx]);
 
                            }
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        InferenceType::new(has_markers, !has_inferred, infer_type)
 
    }
 

	
 
    /// Construct an error when an expression's type does not match. This
 
    /// happens if we infer the expression type from its arguments (e.g. the
 
    /// expression type of an addition operator is the type of the arguments)
 
    /// But the expression type was already set due to our parent (e.g. an
 
    /// "if statement" or a "logical not" always expecting a boolean)
 
    fn construct_expr_type_error(
 
        &self, ctx: &Ctx, expr_id: ExpressionId, arg_id: ExpressionId
 
    ) -> ParseError2 {
 
        // TODO: Expand and provide more meaningful information for humans
 
        let expr = &ctx.heap[expr_id];
 
        let arg_expr = &ctx.heap[arg_id];
 
        let expr_type = self.expr_types.get(&expr_id).unwrap();
 
        let arg_type = self.expr_types.get(&arg_id).unwrap();
 

	
 
        return ParseError2::new_error(
 
            &ctx.module.source, expr.position(),
 
            &format!(
 
                "Incompatible types: this expression expected a '{}'", 
 
                expr_type.display_name(&ctx.heap)
 
            )
 
        ).with_postfixed_info(
 
            &ctx.module.source, arg_expr.position(),
 
            &format!(
 
                "But this expression yields a '{}'",
 
                arg_type.display_name(&ctx.heap)
 
            )
 
        )
 
    }
 

	
 
    fn construct_arg_type_error(
 
        &self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg2_id: ExpressionId
 
    ) -> ParseError2 {
 
        let expr = &ctx.heap[expr_id];
 
        let arg1 = &ctx.heap[arg1_id];
 
        let arg2 = &ctx.heap[arg2_id];
 

	
 
        let arg1_type = self.expr_types.get(&arg1_id).unwrap();
 
        let arg2_type = self.expr_types.get(&arg2_id).unwrap();
 

	
 
        return ParseError2::new_error(
 
            &ctx.module.source, expr.position(),
 
            "Incompatible types: cannot apply this expression"
 
        ).with_postfixed_info(
 
            &ctx.module.source, arg1.position(),
 
            &format!(
 
                "Because this expression has type '{}'",
 
                arg1_type.display_name(&ctx.heap)
 
            )
 
        ).with_postfixed_info(
 
            &ctx.module.source, arg2.position(),
 
            &format!(
 
                "But this expression has type '{}'",
 
                arg2_type.display_name(&ctx.heap)
 
            )
 
        )
 
    }
 

	
 
    fn construct_template_type_error(
 
        &self, ctx: &Ctx, expr_id: ExpressionId, template: &[InferenceTypePart]
 
    ) -> ParseError2 {
 
        let expr = &ctx.heap[expr_id];
 
        let expr_type = self.expr_types.get(&expr_id).unwrap();
 

	
 
        return ParseError2::new_error(
 
            &ctx.module.source, expr.position(),
 
            &format!(
 
                "Incompatible types: got a '{}' but expected a '{}'",
 
                expr_type.display_name(&ctx.heap), 
 
                InferenceType::partial_display_name(&ctx.heap, template)
 
            )
 
        )
 
    }
 

	
 
    /// Constructs a human interpretable error in the case that type inference
 
    /// on a polymorphic variable to a function call failed. This may only be
 
    /// caused by a pair of inference types (which may come from arguments or
 
    /// the return type) having two different inferred values for that
 
    /// polymorphic variable.
 
    ///
 
    /// So we find this pair (which may be a argument type or return type
 
    /// conflicting with itself) and construct the error using it.
 
    fn construct_poly_arg_error(
 
        &self, ctx: &Ctx, call_id: CallExpressionId
 
    ) -> ParseError2 {
 
        // Helper function to check for polymorph mismatch between two inference
 
        // types.
 
        fn has_poly_mismatch<'a>(type_a: &'a InferenceType, type_b: &'a InferenceType) -> Option<(usize, &'a [InferenceTypePart], &'a [InferenceTypePart])> {
 
            if !type_a.has_marker || !type_b.has_marker {
 
            if !type_a.has_body_marker || !type_b.has_body_marker {
 
                return None
 
            }
 

	
 
            for (marker_a, section_a) in type_a.marker_iter() {
 
                for (marker_b, section_b) in type_b.marker_iter() {
 
            for (marker_a, section_a) in type_a.body_marker_iter() {
 
                for (marker_b, section_b) in type_b.body_marker_iter() {
 
                    if marker_a != marker_b {
 
                        // Not the same polymorphic variable
 
                        continue;
 
                    }
 

	
 
                    if !InferenceType::check_subtrees(section_a, 0, section_b, 0) {
 
                        // Not compatible
 
                        return Some((marker_a, section_a, section_b))
 
                    }
 
                }
 
            }
 

	
 
            None
 
        }
 

	
 
        // Helpers function to retrieve polyvar name and function name
 
        fn get_poly_var_and_func_name(ctx: &Ctx, poly_var_idx: usize, expr: &CallExpression) -> (String, String) {
 
            match &expr.method {
 
                Method::Create => unreachable!(),
 
                Method::Fires => (String::from('T'), String::from("fires")),
 
                Method::Get => (String::from('T'), String::from("get")),
 
                Method::Put => (String::from('T'), String::from("put")),
 
                Method::Symbolic(symbolic) => {
 
                    let definition = &ctx.heap[symbolic.definition.unwrap()];
 
                    let poly_var = match definition {
 
                        Definition::Struct(_) | Definition::Enum(_) => unreachable!(),
 
                        Definition::Function(definition) => {
 
                            String::from_utf8_lossy(&definition.poly_vars[poly_var_idx].value).to_string()
 
                        },
 
                        Definition::Component(definition) => {
 
                            String::from_utf8_lossy(&definition.poly_vars[poly_var_idx].value).to_string()
 
                        }
 
                    };
 
                    let func_name = String::from_utf8_lossy(&symbolic.identifier.value).to_string();
 
                    (poly_var, func_name)
 
                }
 
            }
 
        }
 

	
 
        // Helper function to construct initial error
 
        fn construct_main_error(ctx: &Ctx, poly_var_idx: usize, expr: &CallExpression) -> ParseError2 {
 
            let (poly_var, func_name) = get_poly_var_and_func_name(ctx, poly_var_idx, expr);
 
            return ParseError2::new_error(
 
                &ctx.module.source, expr.position(),
 
                &format!(
 
                    "Conflicting type for polymorphic variable '{}' of '{}'",
 
                    poly_var, func_name
 
                )
 
            )
 
        }
 

	
 
        // Actual checking
 
        let extra = self.extra_data.get(&call_id.upcast()).unwrap();
 
        let expr = &ctx.heap[call_id];
 

	
 
        // - check return type with itself
 
        if let Some((poly_idx, section_a, section_b)) = has_poly_mismatch(&extra.returned, &extra.returned) {
 
            return construct_main_error(ctx, poly_idx, expr)
 
                .with_postfixed_info(
 
                    &ctx.module.source, expr.position(),
 
                    &format!(
 
                        "The return type inferred the conflicting types '{}' and '{}'",
 
                        InferenceType::partial_display_name(&ctx.heap, section_a),
 
                        InferenceType::partial_display_name(&ctx.heap, section_b)
 
                    )
 
                )
 
        }
 

	
 
        // - check arguments with each other argument and with return type
 
        for (arg_a_idx, arg_a) in extra.embedded.iter().enumerate() {
 
            for (arg_b_idx, arg_b) in extra.embedded.iter().enumerate() {
 
                if arg_b_idx > arg_a_idx {
 
                    break;
 
                }
 

	
 
                if let Some((poly_idx, section_a, section_b)) = has_poly_mismatch(&arg_a, &arg_b) {
 
                    let error = construct_main_error(ctx, poly_idx, expr);
 
                    if arg_a_idx == arg_b_idx {
 
                        // Same argument
 
                        let arg = &ctx.heap[expr.arguments[arg_a_idx]];
 
                        return error.with_postfixed_info(
 
                            &ctx.module.source, arg.position(),
 
                            &format!(
 
                                "This argument inferred the conflicting types '{}' and '{}'",
 
                                InferenceType::partial_display_name(&ctx.heap, section_a),
 
                                InferenceType::partial_display_name(&ctx.heap, section_b)
 
                            )
 
                        )
 
                    } else {
 
                        let arg_a = &ctx.heap[expr.arguments[arg_a_idx]];
 
                        let arg_b = &ctx.heap[expr.arguments[arg_b_idx]];
 
                        return error.with_postfixed_info(
 
                            &ctx.module.source, arg_a.position(),
 
                            &format!(
 
                                "This argument inferred it to '{}'",
 
                                InferenceType::partial_display_name(&ctx.heap, section_a)
 
                            )
 
                        ).with_postfixed_info(
 
                            &ctx.module.source, arg_b.position(),
 
                            &format!(
 
                                "While this argument inferred it to '{}'",
 
                                InferenceType::partial_display_name(&ctx.heap, section_b)
 
                            )
 
                        )
 
                    }
 
                }
 
            }
 

	
 
            // Check with return type
 
            if let Some((poly_idx, section_arg, section_ret)) = has_poly_mismatch(arg_a, &extra.returned) {
 
                let arg = &ctx.heap[expr.arguments[arg_a_idx]];
 
                return construct_main_error(ctx, poly_idx, expr)
 
                    .with_postfixed_info(
 
                        &ctx.module.source, arg.position(),
 
                        &format!(
 
                            "This argument inferred it to '{}'",
 
                            InferenceType::partial_display_name(&ctx.heap, section_arg)
 
                        )
 
                    )
 
                    .with_postfixed_info(
 
                        &ctx.module.source, expr.position,
 
                        &format!(
 
                            "While the return type inferred it to '{}'",
 
                            InferenceType::partial_display_name(&ctx.heap, section_ret)
 
                        )
 
                    )
 
            }
 
        }
 

	
 
        unreachable!("construct_poly_arg_error without actual error found?")
 
    }
 
}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use super::*;
 
    use crate::protocol::arena::Id;
 
    use InferenceTypePart as ITP;
 
    use InferenceType as IT;
 

	
 
    #[test]
 
    fn test_single_part_inference() {
 
        // lhs argument inferred from rhs
 
        let pairs = [
 
            (ITP::NumberLike, ITP::Byte),
 
            (ITP::IntegerLike, ITP::Int),
 
            (ITP::Unknown, ITP::Message),
 
            (ITP::Unknown, ITP::Long),
 
            (ITP::Unknown, ITP::String)
 
        ];
 
        for (lhs, rhs) in pairs.iter() {
 
            // Using infer-both
 
            let mut lhs_type = IT::new(false, false, vec![lhs.clone()]);
 
            let mut rhs_type = IT::new(false, true, vec![rhs.clone()]);
 
            let result = unsafe{ IT::infer_subtrees_for_both_types(
 
                &mut lhs_type, 0, &mut rhs_type, 0
 
            ) };
 
            assert_eq!(DualInferenceResult::First, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts);
 

	
 
            // Using infer-single
 
            let mut lhs_type = IT::new(false, false, vec![lhs.clone()]);
 
            let mut rhs_type = IT::new(false, true, vec![rhs.clone()]);
 
            let result = unsafe{ IT::infer_subtree_for_single_type(
 
            let rhs_type = IT::new(false, true, vec![rhs.clone()]);
 
            let result = IT::infer_subtree_for_single_type(
 
                &mut lhs_type, 0, &rhs_type.parts, 0
 
            ) };
 
            );
 
            assert_eq!(SingleInferenceResult::Modified, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts);
 
        }
 
    }
 

	
 
    #[test]
 
    fn test_multi_part_inference() {
 
        let pairs = [
 
            (vec![ITP::ArrayLike, ITP::NumberLike], vec![ITP::Slice, ITP::Byte]),
 
            (vec![ITP::Unknown], vec![ITP::Input, ITP::Array, ITP::String]),
 
            (vec![ITP::PortLike, ITP::Int], vec![ITP::Input, ITP::Int]),
 
            (vec![ITP::Unknown], vec![ITP::Output, ITP::Int]),
 
            (
 
                vec![ITP::Instance(Id::new(0), 2), ITP::Input, ITP::Unknown, ITP::Output, ITP::Unknown],
 
                vec![ITP::Instance(Id::new(0), 2), ITP::Input, ITP::Array, ITP::Int, ITP::Output, ITP::Int]
 
            )
 
        ];
 

	
 
        for (lhs, rhs) in pairs.iter() {
 
            let mut lhs_type = IT::new(false, false, lhs.clone());
 
            let mut rhs_type = IT::new(false, false, rhs.clone());
 
            let result = unsafe{ IT::infer_subtrees_for_both_types(
 
                &mut lhs_type, 0, &mut rhs_type, 0
 
            ) };
 
            assert_eq!(DualInferenceResult::First, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts);
 

	
 
            let mut lhs_type = IT::new(false, false, lhs.clone());
 
            let mut rhs_type = IT::new(false, false, rhs.clone());
 
            let result = unsafe{ IT::infer_subtree_for_single_type(
 
            let rhs_type = IT::new(false, false, rhs.clone());
 
            let result = IT::infer_subtree_for_single_type(
 
                &mut lhs_type, 0, &rhs_type.parts, 0
 
            ) };
 
            );
 
            assert_eq!(SingleInferenceResult::Modified, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts)
 
        }
 
    }
 
}
 
\ No newline at end of file
src/protocol/parser/type_table.rs
Show inline comments
 
/**
 
TypeTable
 

	
 
Contains the type table: a datastructure that, when compilation succeeds,
 
contains a concrete type definition for each AST type definition. In general
 
terms the type table will go through the following phases during the compilation
 
process:
 

	
 
    1. The base type definitions are resolved after the parser phase has
 
        finished. This implies that the AST is fully constructed, but not yet
 
        annotated.
 
    2. With the base type definitions resolved, the validation/linker phase will
 
        use the type table (together with the symbol table) to disambiguate
 
        terms (e.g. does an expression refer to a variable, an enum, a constant,
 
        etc.)
 
    3. During the type checking/inference phase the type table is used to ensure
 
        that the AST contains valid use of types in expressions and statements.
 
        At the same time type inference will find concrete instantiations of
 
        polymorphic types, these will be stored in the type table as monomorphed
 
        instantiations of a generic type.
 
    4. After type checking and inference (and possibly when constructing byte
 
        code) the type table will construct a type graph and solidify each
 
        non-polymorphic type and monomorphed instantiations of polymorphic types
 
        into concrete types.
 

	
 
So a base type is defined by its (optionally polymorphic) representation in the
 
AST. A concrete type has concrete types for each of the polymorphic arguments. A
 
struct, enum or union may have polymorphic arguments but not actually be a
 
polymorphic type. This happens when the polymorphic arguments are not used in
 
the type definition itself. Similarly for functions/components: but here we just
 
check the arguments/return type of the signature.
 

	
 
Apart from base types and concrete types, we also use the term "embedded type"
 
for types that are embedded within another type, such as a type of a struct
 
struct field or of a union variant. Embedded types may themselves have
 
polymorphic arguments and therefore form an embedded type tree.
 

	
 
NOTE: for now a polymorphic definition of a function/component is illegal if the
 
    polymorphic arguments are not used in the arguments/return type. It should
 
    be legal, but we disallow it for now.
 

	
 
TODO: Allow potentially cyclic datatypes and reject truly cyclic datatypes.
 
TODO: Allow for the full potential of polymorphism
 
TODO: Detect "true" polymorphism: for datatypes like structs/enum/unions this
 
    is simple. For functions we need to check the entire body. Do it here? Or
 
    do it somewhere else?
 
TODO: Do we want to check fn argument collision here, or in validation phase?
 
TODO: Make type table an on-demand thing instead of constructing all base types.
 
TODO: Cleanup everything, feels like a lot can be written cleaner and with less
 
    assumptions on each function call.
 
// TODO: Review all comments
 
*/
 

	
 
use std::fmt::{Formatter, Result as FmtResult};
 
use std::collections::{HashMap, VecDeque};
 

	
 
use crate::protocol::ast::*;
 
use crate::protocol::parser::symbol_table::{SymbolTable, Symbol};
 
use crate::protocol::inputsource::*;
 
use crate::protocol::parser::*;
 

	
 
//------------------------------------------------------------------------------
 
// Defined Types
 
//------------------------------------------------------------------------------
 

	
 
#[derive(Copy, Clone, PartialEq, Eq)]
 
pub enum TypeClass {
 
    Enum,
 
    Union,
 
    Struct,
 
    Function,
 
    Component
 
}
 

	
 
impl TypeClass {
 
    pub(crate) fn display_name(&self) -> &'static str {
 
        match self {
 
            TypeClass::Enum => "enum",
 
            TypeClass::Union => "enum",
 
            TypeClass::Struct => "struct",
 
            TypeClass::Function => "function",
 
            TypeClass::Component => "component",
 
        }
 
    }
 

	
 
    pub(crate) fn is_data_type(&self) -> bool {
 
        *self == TypeClass::Enum || *self == TypeClass::Union || *self == TypeClass::Struct
 
    }
 

	
 
    pub(crate) fn is_proc_type(&self) -> bool {
 
        *self == TypeClass::Function || *self == TypeClass::Component
 
    }
 
}
 

	
 
impl std::fmt::Display for TypeClass {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
 
        write!(f, "{}", self.display_name())
 
    }
 
}
 

	
 
/// Struct wrapping around a potentially polymorphic type. If the type does not
 
/// have any polymorphic arguments then it will not have any monomorphs and
 
/// `is_polymorph` will be set to `false`. A type with polymorphic arguments
 
/// only has `is_polymorph` set to `true` if the polymorphic arguments actually
 
/// appear in the types associated types (function return argument, struct
 
/// field, enum variant, etc.). Otherwise the polymorphic argument is just a
 
/// marker and does not influence the bytesize of the type.
 
pub struct DefinedType {
 
    pub(crate) ast_root: RootId,
 
    pub(crate) ast_definition: DefinitionId,
 
    pub(crate) definition: DefinedTypeVariant,
 
    pub(crate) poly_args: Vec<PolyArg>,
 
    pub(crate) is_polymorph: bool,
 
    pub(crate) is_pointerlike: bool,
 
    // TODO: @optimize
 
    pub(crate) monomorphs: Vec<Vec<ConcreteType>>,
 
}
 

	
 
impl DefinedType {
 
    fn add_monomorph(&mut self, types: Vec<ConcreteType>) {
 
        debug_assert!(!self.has_monomorph(&types), "monomorph already exists");
 
        self.monomorphs.push(types);
 
    }
 

	
 
    fn has_monomorph(&self, types: &Vec<ConcreteType>) -> bool {
 
        debug_assert_eq!(self.poly_args.len(), types.len(), "mismatch in number of polymorphic types");
 
        for monomorph in &self.monomorphs {
 
            if monomorph == types { return true; }
 
        }
 

	
 
        return false;
 
    }
 
}
 

	
 
pub enum DefinedTypeVariant {
 
    Enum(EnumType),
 
    Union(UnionType),
 
    Struct(StructType),
 
    Function(FunctionType),
 
    Component(ComponentType)
 
}
 

	
 
pub struct PolyArg {
 
    identifier: Identifier,
 
    /// Whether the polymorphic argument is used directly in the definition of
 
    /// the type (not including bodies of function/component types)
 
    is_in_use: bool,
 
}
 

	
 
impl DefinedTypeVariant {
 
    pub(crate) fn type_class(&self) -> TypeClass {
 
        match self {
 
            DefinedTypeVariant::Enum(_) => TypeClass::Enum,
 
            DefinedTypeVariant::Union(_) => TypeClass::Union,
 
            DefinedTypeVariant::Struct(_) => TypeClass::Struct,
 
            DefinedTypeVariant::Function(_) => TypeClass::Function,
 
            DefinedTypeVariant::Component(_) => TypeClass::Component
 
        }
 
    }
 
}
 

	
 
/// `EnumType` is the classical C/C++ enum type. It has various variants with
 
/// an assigned integer value. The integer values may be user-defined,
 
/// compiler-defined, or a mix of the two. If a user assigns the same enum
 
/// value multiple times, we assume the user is an expert and we consider both
 
/// variants to be equal to one another.
 
pub struct EnumType {
 
    variants: Vec<EnumVariant>,
 
    representation: PrimitiveType,
 
}
 

	
 
// TODO: Also support maximum u64 value
 
pub struct EnumVariant {
 
    identifier: Identifier,
 
    value: i64,
 
}
 

	
 
/// `UnionType` is the algebraic datatype (or sum type, or discriminated union).
 
/// A value is an element of the union, identified by its tag, and may contain
 
/// a single subtype.
 
pub struct UnionType {
 
    variants: Vec<UnionVariant>,
 
    tag_representation: PrimitiveType
 
}
 

	
 
pub struct UnionVariant {
 
    identifier: Identifier,
 
    parser_type: Option<ParserTypeId>,
 
    tag_value: i64,
 
}
 

	
 
pub struct StructType {
 
    fields: Vec<StructField>,
 
}
 

	
 
pub struct StructField {
 
    identifier: Identifier,
 
    parser_type: ParserTypeId,
 
}
 

	
 
pub struct FunctionType {
 
    pub return_type: ParserTypeId,
 
    pub arguments: Vec<FunctionArgument>
 
}
 

	
 
pub struct ComponentType {
 
    variant: ComponentVariant,
 
    arguments: Vec<FunctionArgument>
 
    pub variant: ComponentVariant,
 
    pub arguments: Vec<FunctionArgument>
 
}
 

	
 
pub struct FunctionArgument {
 
    identifier: Identifier,
 
    parser_type: ParserTypeId,
 
}
 

	
 
//------------------------------------------------------------------------------
 
// Type table
 
//------------------------------------------------------------------------------
 

	
 
// TODO: @cleanup Do I really need this, doesn't make the code that much cleaner
 
struct TypeIterator {
 
    breadcrumbs: Vec<(RootId, DefinitionId)>
 
}
 

	
 
impl TypeIterator {
 
    fn new() -> Self {
 
        Self{ breadcrumbs: Vec::with_capacity(32) }
 
    }
 

	
 
    fn reset(&mut self, root_id: RootId, definition_id: DefinitionId) {
 
        self.breadcrumbs.clear();
 
        self.breadcrumbs.push((root_id, definition_id))
 
    }
 

	
 
    fn push(&mut self, root_id: RootId, definition_id: DefinitionId) {
 
        self.breadcrumbs.push((root_id, definition_id));
 
    }
 

	
 
    fn contains(&self, root_id: RootId, definition_id: DefinitionId) -> bool {
 
        for (stored_root_id, stored_definition_id) in self.breadcrumbs.iter() {
 
            if *stored_root_id == root_id && *stored_definition_id == definition_id { return true; }
 
        }
 

	
 
        return false
 
    }
 

	
 
    fn top(&self) -> Option<(RootId, DefinitionId)> {
 
        self.breadcrumbs.last().map(|(r, d)| (*r, *d))
 
    }
 

	
 
    fn pop(&mut self) {
 
        debug_assert!(!self.breadcrumbs.is_empty());
 
        self.breadcrumbs.pop();
 
    }
 
}
 

	
 
/// Result from attempting to resolve a `ParserType` using the symbol table and
 
/// the type table.
 
enum ResolveResult {
 
    /// ParserType is a builtin type
 
    BuiltIn,
 
    /// ParserType points to a polymorphic argument, contains the index of the
 
    /// polymorphic argument in the outermost definition (e.g. we may have 
 
    /// structs nested three levels deep, but in the innermost struct we can 
 
    /// only use the polyargs that are specified in the type definition of the
 
    /// outermost struct).
 
    PolyArg(usize),
 
    /// ParserType points to a user-defined type that is already resolved in the
 
    /// type table.
 
    Resolved((RootId, DefinitionId)),
 
    /// ParserType points to a user-defined type that is not yet resolved into
 
    /// the type table.
 
    Unresolved((RootId, DefinitionId))
 
}
 

	
 
pub(crate) struct TypeTable {
 
    /// Lookup from AST DefinitionId to a defined type. Considering possible
 
    /// polymorphs is done inside the `DefinedType` struct.
 
    lookup: HashMap<DefinitionId, DefinedType>,
 
    /// Iterator over `(module, definition)` tuples used as workspace to make sure
 
    /// that each base definition of all a type's subtypes are resolved.
 
    iter: TypeIterator,
 
    /// Iterator over `parser type`s during the process where `parser types` are
 
    /// resolved into a `(module, definition)` tuple.
 
    parser_type_iter: VecDeque<ParserTypeId>,
 
}
 

	
 
pub(crate) struct TypeCtx<'a> {
 
    symbols: &'a SymbolTable,
 
    heap: &'a mut Heap,
 
    modules: &'a [LexedModule]
 
}
 

	
 
impl<'a> TypeCtx<'a> {
 
    pub(crate) fn new(symbols: &'a SymbolTable, heap: &'a mut Heap, modules: &'a [LexedModule]) -> Self {
 
        Self{ symbols, heap, modules }
 
    }
 
}
 

	
 
impl TypeTable {
 
    /// Construct a new type table without any resolved types. Types will be
 
    /// resolved on-demand.
 
    pub(crate) fn new(ctx: &mut TypeCtx) -> Result<Self, ParseError2> {
 
        // Make sure we're allowed to cast root_id to index into ctx.modules
 
        if cfg!(debug_assertions) {
 
            for (index, module) in ctx.modules.iter().enumerate() {
 
                debug_assert_eq!(index, module.root_id.index as usize);
 
            }
 
        }
 

	
 
        // Use context to guess hashmap size
 
        let reserve_size = ctx.heap.definitions.len();
 
        let mut table = Self{
 
            lookup: HashMap::with_capacity(reserve_size),
 
            iter: TypeIterator::new(),
 
            parser_type_iter: VecDeque::with_capacity(64),
 
        };
 

	
 
        // TODO: @cleanup Rework this hack
 
        for root_idx in 0..ctx.modules.len() {
 
            let last_definition_idx = ctx.heap[ctx.modules[root_idx].root_id].definitions.len();
 
            for definition_idx in 0..last_definition_idx {
 
                let definition_id = ctx.heap[ctx.modules[root_idx].root_id].definitions[definition_idx];
 
                table.resolve_base_definition(ctx, definition_id)?;
 
            }
 
        }
 

	
 
        debug_assert_eq!(table.lookup.len(), reserve_size, "mismatch in reserved size of type table");
 

	
 
        Ok(table)
 
    }
 

	
 
    /// Retrieves base definition from type table. We must be able to retrieve
 
    /// it as we resolve all base types upon type table construction (for now).
 
    /// However, in the future we might do on-demand type resolving, so return
 
    /// an option anyway
 
    pub(crate) fn get_base_definition(&self, definition_id: &DefinitionId) -> Option<&DefinedType> {
 
        self.lookup.get(&definition_id)
 
    }
 

	
 
    /// Instantiates a monomorph for a given base definition.
 
    pub(crate) fn instantiate_monomorph(&mut self, definition_id: &DefinitionId, monomorph: &Vec<ConcreteType>) {
 
    pub(crate) fn add_monomorph(&mut self, definition_id: &DefinitionId, types: Vec<ConcreteType>) {
 
        debug_assert!(
 
            self.lookup.contains_key(definition_id),
 
            "attempting to instantiate monomorph of definition unknown to type table"
 
        );
 

	
 
        let definition = self.lookup.get_mut(definition_id).unwrap();
 
        debug_assert_eq!(
 
            monomorph.len(), definition.poly_args.len(),
 
            "attempting to instantiate monomorph with {} types, but definition requires {}",
 
            monomorph.len(), definition.poly_args.len()
 
        definition.add_monomorph(types);
 
    }
 

	
 
    /// Checks if a given definition already has a specific monomorph
 
    pub(crate) fn has_monomorph(&mut self, definition_id: &DefinitionId, types: &Vec<ConcreteType>) -> bool {
 
        debug_assert!(
 
            self.lookup.contains_key(definition_id),
 
            "attempting to check monomorph existence of definition unknown to type table"
 
        );
 

	
 
        definition.monomorphs.push(monomorph.clone())
 
        let definition = self.lookup.get(definition_id).unwrap();
 
        definition.has_monomorph(types)
 
    }
 

	
 
    /// This function will resolve just the basic definition of the type, it
 
    /// will not handle any of the monomorphized instances of the type.
 
    fn resolve_base_definition<'a>(&'a mut self, ctx: &mut TypeCtx, definition_id: DefinitionId) -> Result<(), ParseError2> {
 
        // Check if we have already resolved the base definition
 
        if self.lookup.contains_key(&definition_id) { return Ok(()); }
 

	
 
        let root_id = Self::find_root_id(ctx, definition_id);
 
        self.iter.reset(root_id, definition_id);
 

	
 
        while let Some((root_id, definition_id)) = self.iter.top() {
 
            // We have a type to resolve
 
            let definition = &ctx.heap[definition_id];
 

	
 
            let can_pop_breadcrumb = match definition {
 
                // TODO: @cleanup Borrow rules hax
 
                Definition::Enum(_) => self.resolve_base_enum_definition(ctx, root_id, definition_id),
 
                Definition::Struct(_) => self.resolve_base_struct_definition(ctx, root_id, definition_id),
 
                Definition::Component(_) => self.resolve_base_component_definition(ctx, root_id, definition_id),
 
                Definition::Function(_) => self.resolve_base_function_definition(ctx, root_id, definition_id),
 
            }?;
 

	
 
            // Otherwise: `ingest_resolve_result` has pushed a new breadcrumb
 
            // that we must follow before we can resolve the current type
 
            if can_pop_breadcrumb {
 
                self.iter.pop();
 
            }
 
        }
 

	
 
        // We must have resolved the type
 
        debug_assert!(self.lookup.contains_key(&definition_id), "base type not resolved");
 
        Ok(())
 
    }
 

	
 
    /// Resolve the basic enum definition to an entry in the type table. It will
 
    /// not instantiate any monomorphized instances of polymorphic enum
 
    /// definitions. If a subtype has to be resolved first then this function
 
    /// will return `false` after calling `ingest_resolve_result`.
 
    fn resolve_base_enum_definition(&mut self, ctx: &mut TypeCtx, root_id: RootId, definition_id: DefinitionId) -> Result<bool, ParseError2> {
 
        debug_assert!(ctx.heap[definition_id].is_enum());
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base enum already resolved");
 
        
 
        let definition = ctx.heap[definition_id].as_enum();
 

	
 
        // Check if the enum should be implemented as a classic enumeration or
 
        // a tagged union. Keep track of variant index for error messages. Make
 
        // sure all embedded types are resolved.
 
        let mut first_tag_value = None;
 
        let mut first_int_value = None;
 
        for variant in &definition.variants {
 
            match &variant.value {
 
                EnumVariantValue::None => {},
 
                EnumVariantValue::Integer(_) => if first_int_value.is_none() {
 
                    first_int_value = Some(variant.position);
 
                },
 
                EnumVariantValue::Type(variant_type_id) => {
 
                    if first_tag_value.is_none() {
 
                        first_tag_value = Some(variant.position);
 
                    }
 

	
 
                    // Check if the embedded type needs to be resolved
 
                    let resolve_result = self.resolve_base_parser_type(ctx, &definition.poly_vars, root_id, *variant_type_id)?;
 
                    if !self.ingest_resolve_result(ctx, resolve_result)? {
 
                        return Ok(false)
 
                    }
 
                }
 
            }
 
        }
 

	
 
        if first_tag_value.is_some() && first_int_value.is_some() {
 
            // Not illegal, but useless and probably a programmer mistake
 
            let module_source = &ctx.modules[root_id.index as usize].source;
 
            let tag_pos = first_tag_value.unwrap();
 
            let int_pos = first_int_value.unwrap();
 
            return Err(
 
                ParseError2::new_error(
 
                    module_source, definition.position,
 
                    "Illegal combination of enum integer variant(s) and enum union variant(s)"
 
                )
 
                    .with_postfixed_info(module_source, int_pos, "Assigning an integer value here")
 
                    .with_postfixed_info(module_source, tag_pos, "Embedding a type in a union variant here")
 
            );
 
        }
 

	
 
        // Enumeration is legal
 
        if first_tag_value.is_some() {
 
            // Implement as a tagged union
 

	
 
            // Determine the union variants
 
            let mut tag_value = -1;
 
            let mut variants = Vec::with_capacity(definition.variants.len());
 
            for variant in &definition.variants {
 
                tag_value += 1;
 
                let parser_type = match &variant.value {
 
                    EnumVariantValue::None => {
 
                        None
 
                    },
 
                    EnumVariantValue::Type(parser_type_id) => {
 
                        // Type should be resolvable, we checked this above
 
                        Some(*parser_type_id)
 
                    },
 
                    EnumVariantValue::Integer(_) => {
 
                        debug_assert!(false, "Encountered `Integer` variant after asserting enum is a discriminated union");
 
                        unreachable!();
 
                    }
 
                };
 

	
 
                variants.push(UnionVariant{
 
                    identifier: variant.identifier.clone(),
 
                    parser_type,
 
                    tag_value,
 
                })
 
            }
 

	
 
            // Ensure union names and polymorphic args do not conflict
 
            self.check_identifier_collision(
 
                ctx, root_id, &variants, |variant| &variant.identifier, "enum variant"
 
            )?;
 
            self.check_poly_args_collision(ctx, root_id, &definition.poly_vars)?;
 

	
 
            let mut poly_args = self.create_initial_poly_args(&definition.poly_vars);
 
            for variant in &variants {
 
                if let Some(embedded) = variant.parser_type {
 
                    self.check_and_resolve_embedded_type_and_modify_poly_args(ctx, definition_id, &mut poly_args, root_id, embedded)?;
 
                }
 
            }
 
            let is_polymorph = poly_args.iter().any(|arg| arg.is_in_use);
 

	
 
            // Insert base definition in type table
 
            self.lookup.insert(definition_id, DefinedType {
 
                ast_root: root_id,
 
                ast_definition: definition_id,
 
                definition: DefinedTypeVariant::Union(UnionType{
 
                    variants,
 
                    tag_representation: Self::enum_tag_type(-1, tag_value),
 
                }),
 
                poly_args,
 
                is_polymorph,
 
                is_pointerlike: false, // TODO: @cyclic_types
 
                monomorphs: Vec::new()
 
            });
 
        } else {
 
            // Implement as a regular enum
 
            let mut enum_value = -1;
 
            let mut min_enum_value = 0;
 
            let mut max_enum_value = 0;
 
            let mut variants = Vec::with_capacity(definition.variants.len());
 
            for variant in &definition.variants {
 
                enum_value += 1;
 
                match &variant.value {
 
                    EnumVariantValue::None => {
 
                        variants.push(EnumVariant{
 
                            identifier: variant.identifier.clone(),
 
                            value: enum_value,
 
                        });
 
                    },
 
                    EnumVariantValue::Integer(override_value) => {
 
                        enum_value = *override_value;
 
                        variants.push(EnumVariant{
 
                            identifier: variant.identifier.clone(),
 
                            value: enum_value,
 
                        });
 
                    },
 
                    EnumVariantValue::Type(_) => {
 
                        debug_assert!(false, "Encountered `Type` variant after asserting enum is not a discriminated union");
 
                        unreachable!();
 
                    }
 
                }
 
                if enum_value < min_enum_value { min_enum_value = enum_value; }
 
                else if enum_value > max_enum_value { max_enum_value = enum_value; }
 
            }
 

	
 
            // Ensure enum names and polymorphic args do not conflict
 
            self.check_identifier_collision(
 
                ctx, root_id, &variants, |variant| &variant.identifier, "enum variant"
 
            )?;
 
            self.check_poly_args_collision(ctx, root_id, &definition.poly_vars)?;
 

	
 
            // Note: although we cannot have embedded type dependent on the
 
            // polymorphic variables, they might still be present as tokens
 
            let definition_id = definition.this.upcast();
 
            self.lookup.insert(definition_id, DefinedType {
 
                ast_root: root_id,
 
                ast_definition: definition_id,
 
                definition: DefinedTypeVariant::Enum(EnumType{
 
                    variants,
 
                    representation: Self::enum_tag_type(min_enum_value, max_enum_value)
 
                }),
 
                poly_args: self.create_initial_poly_args(&definition.poly_vars),
 
                is_polymorph: false,
 
                is_pointerlike: false,
src/protocol/parser/visitor_linker.rs
Show inline comments
 
@@ -303,1235 +303,1209 @@ impl Visitor2 for ValidityAndLinkerVisitor {
 
                    start_while: id,
 
                    position,
 
                    next: None,
 
                }
 
            });
 
            let stmt = &mut ctx.heap[id];
 
            stmt.end_while = Some(end_while_id);
 
            stmt.in_sync = self.in_sync.clone();
 

	
 
            self.insert_buffer.push((self.relative_pos_in_block + 1, end_while_id.upcast()));
 
        } else {
 
            let (test_id, body_id) = {
 
                let stmt = &ctx.heap[id];
 
                (stmt.test, stmt.body)
 
            };
 
            let old_while = self.in_while.replace(id);
 
            debug_assert_eq!(self.expr_parent, ExpressionParent::None);
 
            self.expr_parent = ExpressionParent::While(id);
 
            self.visit_expr(ctx, test_id)?;
 
            self.expr_parent = ExpressionParent::None;
 

	
 
            self.visit_stmt(ctx, body_id)?;
 
            self.in_while = old_while;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_break_stmt(&mut self, ctx: &mut Ctx, id: BreakStatementId) -> VisitorResult {
 
        if self.performing_breadth_pass {
 
            // Should be able to resolve break statements with a label in the
 
            // breadth pass, no need to do after resolving all labels
 
            let target_end_while = {
 
                let stmt = &ctx.heap[id];
 
                let target_while_id = self.resolve_break_or_continue_target(ctx, stmt.position, &stmt.label)?;
 
                let target_while = &ctx.heap[target_while_id];
 
                debug_assert!(target_while.end_while.is_some());
 
                target_while.end_while.unwrap()
 
            };
 

	
 
            let stmt = &mut ctx.heap[id];
 
            stmt.target = Some(target_end_while);
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_continue_stmt(&mut self, ctx: &mut Ctx, id: ContinueStatementId) -> VisitorResult {
 
        if self.performing_breadth_pass {
 
            let target_while_id = {
 
                let stmt = &ctx.heap[id];
 
                self.resolve_break_or_continue_target(ctx, stmt.position, &stmt.label)?
 
            };
 

	
 
            let stmt = &mut ctx.heap[id];
 
            stmt.target = Some(target_while_id)
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_synchronous_stmt(&mut self, ctx: &mut Ctx, id: SynchronousStatementId) -> VisitorResult {
 
        if self.performing_breadth_pass {
 
            // Check for validity of synchronous statement
 
            let cur_sync_position = ctx.heap[id].position;
 
            if self.in_sync.is_some() {
 
                // Nested synchronous statement
 
                let old_sync = &ctx.heap[self.in_sync.unwrap()];
 
                return Err(
 
                    ParseError2::new_error(&ctx.module.source, cur_sync_position, "Illegal nested synchronous statement")
 
                        .with_postfixed_info(&ctx.module.source, old_sync.position, "It is nested in this synchronous statement")
 
                );
 
            }
 

	
 
            if !self.def_type.is_primitive() {
 
                return Err(ParseError2::new_error(
 
                    &ctx.module.source, cur_sync_position,
 
                    "Synchronous statements may only be used in primitive components"
 
                ));
 
            }
 

	
 
            // Append SynchronousEnd pseudo-statement
 
            let sync_end_id = ctx.heap.alloc_end_synchronous_statement(|this| EndSynchronousStatement{
 
                this,
 
                position: cur_sync_position,
 
                start_sync: id,
 
                next: None,
 
            });
 
            let sync_start = &mut ctx.heap[id];
 
            sync_start.end_sync = Some(sync_end_id);
 
            self.insert_buffer.push((self.relative_pos_in_block + 1, sync_end_id.upcast()));
 
        } else {
 
            let sync_body = ctx.heap[id].body;
 
            let old = self.in_sync.replace(id);
 
            self.visit_stmt_with_hint(ctx, sync_body, Some(id))?;
 
            self.in_sync = old;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_return_stmt(&mut self, ctx: &mut Ctx, id: ReturnStatementId) -> VisitorResult {
 
        if self.performing_breadth_pass {
 
            let stmt = &ctx.heap[id];
 
            if !self.def_type.is_function() {
 
                return Err(
 
                    ParseError2::new_error(&ctx.module.source, stmt.position, "Return statements may only appear in function bodies")
 
                );
 
            }
 
        } else {
 
            // If here then we are within a function
 
            debug_assert_eq!(self.expr_parent, ExpressionParent::None);
 
            self.expr_parent = ExpressionParent::Return(id);
 
            self.visit_expr(ctx, ctx.heap[id].expression)?;
 
            self.expr_parent = ExpressionParent::None;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_assert_stmt(&mut self, ctx: &mut Ctx, id: AssertStatementId) -> VisitorResult {
 
        let stmt = &ctx.heap[id];
 
        if self.performing_breadth_pass {
 
            if self.def_type.is_function() {
 
                // TODO: We probably want to allow this. Mark the function as
 
                //  using asserts, and then only allow calls to these functions
 
                //  within components. Such a marker will cascade through any
 
                //  functions that then call an asserting function
 
                return Err(
 
                    ParseError2::new_error(&ctx.module.source, stmt.position, "Illegal assert statement in a function")
 
                );
 
            }
 

	
 
            // We are in a component of some sort, but we also need to be within a
 
            // synchronous statement
 
            if self.in_sync.is_none() {
 
                return Err(
 
                    ParseError2::new_error(&ctx.module.source, stmt.position, "Illegal assert statement outside of a synchronous block")
 
                );
 
            }
 
        } else {
 
            debug_assert_eq!(self.expr_parent, ExpressionParent::None);
 
            let expr_id = stmt.expression;
 

	
 
            self.expr_parent = ExpressionParent::Assert(id);
 
            self.visit_expr(ctx, expr_id)?;
 
            self.expr_parent = ExpressionParent::None;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_goto_stmt(&mut self, ctx: &mut Ctx, id: GotoStatementId) -> VisitorResult {
 
        if !self.performing_breadth_pass {
 
            // Must perform goto label resolving after the breadth pass, this
 
            // way we are able to find all the labels in current and outer
 
            // scopes.
 
            let target_id = self.find_label(ctx, &ctx.heap[id].label)?;
 
            ctx.heap[id].target = Some(target_id);
 

	
 
            let target = &ctx.heap[target_id];
 
            if self.in_sync != target.in_sync {
 
                // We can only goto the current scope or outer scopes. Because
 
                // nested sync statements are not allowed so if the value does
 
                // not match, then we must be inside a sync scope
 
                debug_assert!(self.in_sync.is_some());
 
                let goto_stmt = &ctx.heap[id];
 
                let sync_stmt = &ctx.heap[self.in_sync.unwrap()];
 
                return Err(
 
                    ParseError2::new_error(&ctx.module.source, goto_stmt.position, "Goto may not escape the surrounding synchronous block")
 
                        .with_postfixed_info(&ctx.module.source, target.position, "This is the target of the goto statement")
 
                        .with_postfixed_info(&ctx.module.source, sync_stmt.position, "Which will jump past this statement")
 
                );
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_new_stmt(&mut self, ctx: &mut Ctx, id: NewStatementId) -> VisitorResult {
 
        // Link the call expression following the new statement
 
        if self.performing_breadth_pass {
 
            // TODO: Cleanup error messages, can be done cleaner
 
            // Make sure new statement occurs within a composite component
 
            let call_expr_id = ctx.heap[id].expression;
 
            if !self.def_type.is_composite() {
 
                let new_stmt = &ctx.heap[id];
 
                return Err(
 
                    ParseError2::new_error(&ctx.module.source, new_stmt.position, "Instantiating components may only be done in composite components")
 
                );
 
            }
 

	
 
            // No fancy recursive parsing, must be followed by a call expression
 
            let definition_id = {
 
                let call_expr = &ctx.heap[call_expr_id];
 
                if let Method::Symbolic(symbolic) = &call_expr.method {
 
                    let found_symbol = self.find_symbol_of_type(
 
                        ctx.module.root_id, &ctx.symbols, &ctx.types,
 
                        &symbolic.identifier, TypeClass::Component
 
                    );
 

	
 
                    match found_symbol {
 
                        FindOfTypeResult::Found(definition_id) => definition_id,
 
                        FindOfTypeResult::TypeMismatch(got_type_class) => {
 
                            return Err(ParseError2::new_error(
 
                                &ctx.module.source, symbolic.identifier.position,
 
                                &format!("New must instantiate a component, this identifier points to a {}", got_type_class)
 
                            ))
 
                        },
 
                        FindOfTypeResult::NotFound => {
 
                            return Err(ParseError2::new_error(
 
                                &ctx.module.source, symbolic.identifier.position,
 
                                "Could not find a defined component with this name"
 
                            ))
 
                        }
 
                    }
 
                } else {
 
                    return Err(
 
                        ParseError2::new_error(&ctx.module.source, call_expr.position, "Must instantiate a component")
 
                    );
 
                }
 
            };
 

	
 
            // Modify new statement's symbolic call to point to the appropriate
 
            // definition.
 
            let call_expr = &mut ctx.heap[call_expr_id];
 
            match &mut call_expr.method {
 
                Method::Symbolic(method) => method.definition = Some(definition_id),
 
                _ => unreachable!()
 
            // We make sure that we point to a symbolic method. Checking that it
 
            // points to a component is done in the depth pass.
 
            let call_expr = &ctx.heap[call_expr_id];
 
            if let Method::Symbolic(_) = &call_expr.method {
 
                // We're fine
 
            } else {
 
                return Err(
 
                    ParseError2::new_error(&ctx.module.source, call_expr.position, "Must instantiate a component")
 
                );
 
            }
 
        } else {
 
            // Performing depth pass. The function definition should have been
 
            // resolved in the breadth pass, now we recurse to evaluate the
 
            // arguments
 
            // TODO: @cleanup Maybe just call `visit_call_expr`?
 
            // Just call `visit_call_expr`. We do some extra work we don't have
 
            // to, but this prevents silly mistakes.
 
            let call_expr_id = ctx.heap[id].expression;
 
            let call_expr = &mut ctx.heap[call_expr_id];
 
            call_expr.parent = ExpressionParent::New(id);
 

	
 
            let old_num_exprs = self.expression_buffer.len();
 
            self.expression_buffer.extend(&call_expr.arguments);
 
            let new_num_exprs = self.expression_buffer.len();
 

	
 
            let old_expr_parent = self.expr_parent;
 

	
 
            for arg_expr_idx in old_num_exprs..new_num_exprs {
 
                let arg_expr_id = self.expression_buffer[arg_expr_idx];
 
                self.expr_parent = ExpressionParent::Expression(call_expr_id.upcast(), arg_expr_idx as u32);
 
                self.visit_expr(ctx, arg_expr_id)?;
 
            }
 

	
 
            self.expression_buffer.truncate(old_num_exprs);
 
            self.expr_parent = old_expr_parent;
 
            debug_assert_eq!(self.expr_parent, ExpressionParent::None);
 
            self.expr_parent = ExpressionParent::New(id);
 
            self.visit_call_expr(ctx, call_expr_id)?;
 
            self.expr_parent = ExpressionParent::None;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_expr_stmt(&mut self, ctx: &mut Ctx, id: ExpressionStatementId) -> VisitorResult {
 
        if !self.performing_breadth_pass {
 
            let expr_id = ctx.heap[id].expression;
 

	
 
            debug_assert_eq!(self.expr_parent, ExpressionParent::None);
 
            self.expr_parent = ExpressionParent::ExpressionStmt(id);
 
            self.visit_expr(ctx, expr_id)?;
 
            self.expr_parent = ExpressionParent::None;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 

	
 
    //--------------------------------------------------------------------------
 
    // Expression visitors
 
    //--------------------------------------------------------------------------
 

	
 
    fn visit_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 

	
 
        let upcast_id = id.upcast();
 
        let assignment_expr = &mut ctx.heap[id];
 

	
 
        let left_expr_id = assignment_expr.left;
 
        let right_expr_id = assignment_expr.right;
 
        let old_expr_parent = self.expr_parent;
 
        assignment_expr.parent = old_expr_parent;
 

	
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 0);
 
        self.visit_expr(ctx, left_expr_id)?;
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 1);
 
        self.visit_expr(ctx, right_expr_id)?;
 
        self.expr_parent = old_expr_parent;
 
        Ok(())
 
    }
 

	
 
    fn visit_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 
        let upcast_id = id.upcast();
 
        let conditional_expr = &mut ctx.heap[id];
 

	
 
        let test_expr_id = conditional_expr.test;
 
        let true_expr_id = conditional_expr.true_expression;
 
        let false_expr_id = conditional_expr.false_expression;
 

	
 
        let old_expr_parent = self.expr_parent;
 
        conditional_expr.parent = old_expr_parent;
 

	
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 0);
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 1);
 
        self.visit_expr(ctx, true_expr_id)?;
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 2);
 
        self.visit_expr(ctx, false_expr_id)?;
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 
        let upcast_id = id.upcast();
 
        let binary_expr = &mut ctx.heap[id];
 
        let left_expr_id = binary_expr.left;
 
        let right_expr_id = binary_expr.right;
 

	
 
        let old_expr_parent = self.expr_parent;
 
        binary_expr.parent = old_expr_parent;
 

	
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 0);
 
        self.visit_expr(ctx, left_expr_id)?;
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 1);
 
        self.visit_expr(ctx, right_expr_id)?;
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_unary_expr(&mut self, ctx: &mut Ctx, id: UnaryExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 

	
 
        let unary_expr = &mut ctx.heap[id];
 
        let expr_id = unary_expr.expression;
 

	
 
        let old_expr_parent = self.expr_parent;
 
        unary_expr.parent = old_expr_parent;
 

	
 
        self.expr_parent = ExpressionParent::Expression(id.upcast(), 0);
 
        self.visit_expr(ctx, expr_id)?;
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 
        let upcast_id = id.upcast();
 
        let indexing_expr = &mut ctx.heap[id];
 

	
 
        let subject_expr_id = indexing_expr.subject;
 
        let index_expr_id = indexing_expr.index;
 

	
 
        let old_expr_parent = self.expr_parent;
 
        indexing_expr.parent = old_expr_parent;
 

	
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 0);
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 1);
 
        self.visit_expr(ctx, index_expr_id)?;
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 
        let upcast_id = id.upcast();
 
        let slicing_expr = &mut ctx.heap[id];
 

	
 
        let subject_expr_id = slicing_expr.subject;
 
        let from_expr_id = slicing_expr.from_index;
 
        let to_expr_id = slicing_expr.to_index;
 

	
 
        let old_expr_parent = self.expr_parent;
 
        slicing_expr.parent = old_expr_parent;
 

	
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 0);
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 1);
 
        self.visit_expr(ctx, from_expr_id)?;
 
        self.expr_parent = ExpressionParent::Expression(upcast_id, 2);
 
        self.visit_expr(ctx, to_expr_id)?;
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 

	
 
        let select_expr = &mut ctx.heap[id];
 
        let expr_id = select_expr.subject;
 

	
 
        let old_expr_parent = self.expr_parent;
 
        select_expr.parent = old_expr_parent;
 

	
 
        self.expr_parent = ExpressionParent::Expression(id.upcast(), 0);
 
        self.visit_expr(ctx, expr_id)?;
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_array_expr(&mut self, ctx: &mut Ctx, id: ArrayExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 

	
 
        let upcast_id = id.upcast();
 
        let array_expr = &mut ctx.heap[id];
 

	
 
        let old_num_exprs = self.expression_buffer.len();
 
        self.expression_buffer.extend(&array_expr.elements);
 
        let new_num_exprs = self.expression_buffer.len();
 

	
 
        let old_expr_parent = self.expr_parent;
 
        array_expr.parent = old_expr_parent;
 

	
 
        for field_expr_idx in old_num_exprs..new_num_exprs {
 
            let field_expr_id = self.expression_buffer[field_expr_idx];
 
            self.expr_parent = ExpressionParent::Expression(upcast_id, field_expr_idx as u32);
 
            self.visit_expr(ctx, field_expr_id)?;
 
        }
 

	
 
        self.expression_buffer.truncate(old_num_exprs);
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_constant_expr(&mut self, ctx: &mut Ctx, id: ConstantExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 

	
 
        let constant_expr = &mut ctx.heap[id];
 
        constant_expr.parent = self.expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 

	
 
        let call_expr = &mut ctx.heap[id];
 
        let num_expr_args = call_expr.arguments.len();
 

	
 
        // Resolve the method to the appropriate definition and check the
 
        // legality of the particular method call.
 
        // TODO: @cleanup Unify in some kind of signature call, see similar
 
        //  cleanup comments with this `match` format.
 
        let num_definition_args;
 
        match &mut call_expr.method {
 
            Method::Create => {
 
                num_definition_args = 1;
 
            },
 
            Method::Fires => {
 
                if !self.def_type.is_primitive() {
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, call_expr.position,
 
                        "A call to 'fires' may only occur in primitive component definitions"
 
                    ));
 
                }
 
                if self.in_sync.is_none() {
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, call_expr.position,
 
                        "A call to 'fires' may only occur inside synchronous blocks"
 
                    ));
 
                }
 
                num_definition_args = 1;
 
            },
 
            Method::Get => {
 
                if !self.def_type.is_primitive() {
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, call_expr.position,
 
                        "A call to 'get' may only occur in primitive component definitions"
 
                    ));
 
                }
 
                if self.in_sync.is_none() {
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, call_expr.position,
 
                        "A call to 'get' may only occur inside synchronous blocks"
 
                    ));
 
                }
 
                num_definition_args = 1;
 
            },
 
            Method::Put => {
 
                if !self.def_type.is_primitive() {
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, call_expr.position,
 
                        "A call to 'put' may only occur in primitive component definitions"
 
                    ));
 
                }
 
                if self.in_sync.is_none() {
 
                    return Err(ParseError2::new_error(
 
                        &ctx.module.source, call_expr.position,
 
                        "A call to 'put' may only occur inside synchronous blocks"
 
                    ));
 
                }
 
                num_definition_args = 2;
 
            }
 
            Method::Symbolic(symbolic) => {
 
                // Find symbolic method
 
                let (verb, expected_type) = if let ExpressionParent::New(_) = self.expr_parent {
 
                    // Expect to find a component
 
                    ("instantiated", TypeClass::Component)
 
                } else {
 
                    // Expect to find a function
 
                    ("called", TypeClass::Function)
 
                };
 

	
 
                let found_symbol = self.find_symbol_of_type(
 
                    ctx.module.root_id, &ctx.symbols, &ctx.types,
 
                    &symbolic.identifier, TypeClass::Function
 
                    &symbolic.identifier, expected_type
 
                );
 
                let definition_id = match found_symbol {
 
                    FindOfTypeResult::Found(definition_id) => definition_id,
 
                    FindOfTypeResult::TypeMismatch(got_type_class) => {
 
                        return Err(ParseError2::new_error(
 
                            &ctx.module.source, symbolic.identifier.position,
 
                            &format!("Only functions can be called, this identifier points to a {}", got_type_class)
 
                            &format!(
 
                                "Only {}s can be {}, this identifier points to a {}",
 
                                expected_type, verb, got_type_class
 
                            )
 
                        ))
 
                    },
 
                    FindOfTypeResult::NotFound => {
 
                        return Err(ParseError2::new_error(
 
                            &ctx.module.source, symbolic.identifier.position,
 
                            &format!("Could not find a function with this name")
 
                            &format!("Could not find a {} with this name", expected_type)
 
                        ))
 
                    }
 
                };
 

	
 
                symbolic.definition = Some(definition_id);
 
                match &ctx.types.get_base_definition(&definition_id).unwrap().definition {
 
                    DefinedTypeVariant::Function(definition) => {
 
                        num_definition_args = definition.arguments.len();
 
                    },
 
                    DefinedTypeVariant::Component(definition) => {
 
                        num_definition_args = definition.arguments.len();
 
                    }
 
                    _ => unreachable!(),
 
                }
 
            }
 
        }
 

	
 
        // Check the poly args and the number of variables in the call
 
        // expression
 
        self.visit_call_poly_args(ctx, id)?;
 
        let call_expr = &mut ctx.heap[id];
 
        if num_expr_args != num_definition_args {
 
            return Err(ParseError2::new_error(
 
                &ctx.module.source, call_expr.position,
 
                &format!(
 
                    "This call expects {} arguments, but {} were provided",
 
                    num_definition_args, num_expr_args
 
                )
 
            ));
 
        }
 

	
 
        // Recurse into all of the arguments and set the expression's parent
 
        let upcast_id = id.upcast();
 

	
 
        let old_num_exprs = self.expression_buffer.len();
 
        self.expression_buffer.extend(&call_expr.arguments);
 
        let new_num_exprs = self.expression_buffer.len();
 

	
 
        let old_expr_parent = self.expr_parent;
 
        call_expr.parent = old_expr_parent;
 

	
 
        for arg_expr_idx in old_num_exprs..new_num_exprs {
 
            let arg_expr_id = self.expression_buffer[arg_expr_idx];
 
            self.expr_parent = ExpressionParent::Expression(upcast_id, arg_expr_idx as u32);
 
            self.visit_expr(ctx, arg_expr_id)?;
 
        }
 

	
 
        self.expression_buffer.truncate(old_num_exprs);
 
        self.expr_parent = old_expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitorResult {
 
        debug_assert!(!self.performing_breadth_pass);
 

	
 
        let var_expr = &ctx.heap[id];
 
        let variable_id = self.find_variable(ctx, self.relative_pos_in_block, &var_expr.identifier)?;
 
        let var_expr = &mut ctx.heap[id];
 
        var_expr.declaration = Some(variable_id);
 
        var_expr.parent = self.expr_parent;
 

	
 
        Ok(())
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // ParserType visitors
 
    //--------------------------------------------------------------------------
 

	
 
    fn visit_parser_type(&mut self, ctx: &mut Ctx, id: ParserTypeId) -> VisitorResult {
 
        let old_num_types = self.parser_type_buffer.len();
 
        match self.visit_parser_type_without_buffer_cleanup(ctx, id) {
 
            Ok(_) => {
 
                debug_assert_eq!(self.parser_type_buffer.len(), old_num_types);
 
                Ok(())
 
            },
 
            Err(err) => {
 
                self.parser_type_buffer.truncate(old_num_types);
 
                Err(err)
 
            }
 
        }
 
    }
 
}
 

	
 
enum FindOfTypeResult {
 
    // Identifier was exactly matched, type matched as well
 
    Found(DefinitionId),
 
    // Identifier was matched, but the type differs from the expected one
 
    TypeMismatch(&'static str),
 
    // Identifier could not be found
 
    NotFound,
 
}
 

	
 
impl ValidityAndLinkerVisitor {
 
    //--------------------------------------------------------------------------
 
    // Special traversal
 
    //--------------------------------------------------------------------------
 

	
 
    /// Will visit a statement with a hint about its wrapping statement. This is
 
    /// used to distinguish block statements with a wrapping synchronous
 
    /// statement from normal block statements.
 
    fn visit_stmt_with_hint(&mut self, ctx: &mut Ctx, id: StatementId, hint: Option<SynchronousStatementId>) -> VisitorResult {
 
        if let Statement::Block(block_stmt) = &ctx.heap[id] {
 
            let block_id = block_stmt.this;
 
            self.visit_block_stmt_with_hint(ctx, block_id, hint)
 
        } else {
 
            self.visit_stmt(ctx, id)
 
        }
 
    }
 

	
 
    fn visit_block_stmt_with_hint(&mut self, ctx: &mut Ctx, id: BlockStatementId, hint: Option<SynchronousStatementId>) -> VisitorResult {
 
        if self.performing_breadth_pass {
 
            // Performing a breadth pass, so don't traverse into the statements
 
            // of the block.
 
            return Ok(())
 
        }
 

	
 
        // Set parent scope and relative position in the parent scope. Remember
 
        // these values to set them back to the old values when we're done with
 
        // the traversal of the block's statements.
 
        let body = &mut ctx.heap[id];
 
        body.parent_scope = self.cur_scope.clone();
 
        body.relative_pos_in_parent = self.relative_pos_in_block;
 

	
 
        let old_scope = self.cur_scope.replace(match hint {
 
            Some(sync_id) => Scope::Synchronous((sync_id, id)),
 
            None => Scope::Regular(id),
 
        });
 
        let old_relative_pos = self.relative_pos_in_block;
 

	
 
        // Copy statement IDs into buffer
 
        let old_num_stmts = self.statement_buffer.len();
 
        {
 
            let body = &ctx.heap[id];
 
            self.statement_buffer.extend_from_slice(&body.statements);
 
        }
 
        let new_num_stmts = self.statement_buffer.len();
 

	
 
        // Perform the breadth-first pass. Its main purpose is to find labeled
 
        // statements such that we can find the `goto`-targets immediately when
 
        // performing the depth pass
 
        self.performing_breadth_pass = true;
 
        for stmt_idx in old_num_stmts..new_num_stmts {
 
            self.relative_pos_in_block = (stmt_idx - old_num_stmts) as u32;
 
            self.visit_stmt(ctx, self.statement_buffer[stmt_idx])?;
 
        }
 

	
 
        if !self.insert_buffer.is_empty() {
 
            let body = &mut ctx.heap[id];
 
            for (insert_idx, (pos, stmt)) in self.insert_buffer.drain(..).enumerate() {
 
                body.statements.insert(pos as usize + insert_idx, stmt);
 
            }
 
        }
 

	
 
        // And the depth pass. Because we're not actually visiting any inserted
 
        // nodes because we're using the statement buffer, we may safely use the
 
        // relative_pos_in_block counter.
 
        self.performing_breadth_pass = false;
 
        for stmt_idx in old_num_stmts..new_num_stmts {
 
            self.relative_pos_in_block = (stmt_idx - old_num_stmts) as u32;
 
            self.visit_stmt(ctx, self.statement_buffer[stmt_idx])?;
 
        }
 

	
 
        self.cur_scope = old_scope;
 
        self.relative_pos_in_block = old_relative_pos;
 

	
 
        // Pop statement buffer
 
        debug_assert!(self.insert_buffer.is_empty(), "insert buffer not empty after depth pass");
 
        self.statement_buffer.truncate(old_num_stmts);
 

	
 
        Ok(())
 
    }
 

	
 
    /// Visits a particular ParserType in the AST and resolves temporary and
 
    /// implicitly inferred types into the appropriate tree. Note that a
 
    /// ParserType node is a tree. Only call this function on the root node of
 
    /// that tree to prevent doing work more than once.
 
    fn visit_parser_type_without_buffer_cleanup(&mut self, ctx: &mut Ctx, id: ParserTypeId) -> VisitorResult {
 
        use ParserTypeVariant as PTV;
 
        debug_assert!(!self.performing_breadth_pass);
 

	
 
        let init_num_types = self.parser_type_buffer.len();
 
        self.parser_type_buffer.push(id);
 

	
 
        'resolve_loop: while self.parser_type_buffer.len() > init_num_types {
 
            let parser_type_id = self.parser_type_buffer.pop().unwrap();
 
            let parser_type = &ctx.heap[parser_type_id];
 

	
 
            let (symbolic_pos, symbolic_variant, num_inferred_to_allocate) = match &parser_type.variant {
 
                PTV::Message | PTV::Bool |
 
                PTV::Byte | PTV::Short | PTV::Int | PTV::Long |
 
                PTV::String |
 
                PTV::IntegerLiteral | PTV::Inferred => {
 
                    // Builtin types or types that do not require recursion
 
                    continue 'resolve_loop;
 
                },
 
                PTV::Array(subtype_id) |
 
                PTV::Input(subtype_id) |
 
                PTV::Output(subtype_id) => {
 
                    // Requires recursing
 
                    self.parser_type_buffer.push(*subtype_id);
 
                    continue 'resolve_loop;
 
                },
 
                PTV::Symbolic(symbolic) => {
 
                    // Retrieve poly_vars from function/component definition to
 
                    // match against.
 
                    let (definition_id, poly_vars) = match self.def_type {
 
                        DefinitionType::None => unreachable!(),
 
                        DefinitionType::Primitive(id) => (id.upcast(), &ctx.heap[id].poly_vars),
 
                        DefinitionType::Composite(id) => (id.upcast(), &ctx.heap[id].poly_vars),
 
                        DefinitionType::Function(id) => (id.upcast(), &ctx.heap[id].poly_vars),
 
                    };
 

	
 
                    let mut symbolic_variant = None;
 
                    for (poly_var_idx, poly_var) in poly_vars.iter().enumerate() {
 
                        if symbolic.identifier.value == poly_var.value {
 
                            // Type refers to a polymorphic variable.
 
                            // TODO: @hkt Maybe allow higher-kinded types?
 
                            if !symbolic.poly_args.is_empty() {
 
                                return Err(ParseError2::new_error(
 
                                    &ctx.module.source, symbolic.identifier.position,
 
                                    "Polymorphic arguments to a polymorphic variable (higher-kinded types) are not allowed (yet)"
 
                                ));
 
                            }
 
                            symbolic_variant = Some(SymbolicParserTypeVariant::PolyArg(definition_id, poly_var_idx));
 
                        }
 
                    }
 

	
 
                    if let Some(symbolic_variant) = symbolic_variant {
 
                        // Identifier points to a symbolic type
 
                        (symbolic.identifier.position, symbolic_variant, 0)
 
                    } else {
 
                        // Must be a user-defined type, otherwise an error
 
                        let found_type = find_type_definition(
 
                            &ctx.symbols, &ctx.types, ctx.module.root_id, &symbolic.identifier
 
                        ).as_parse_error(&ctx.module.source)?;
 
                        symbolic_variant = Some(SymbolicParserTypeVariant::Definition(found_type.ast_definition));
 

	
 
                        // TODO: @function_ptrs: Allow function pointers at some
 
                        //  point in the future
 
                        if found_type.definition.type_class().is_proc_type() {
 
                            return Err(ParseError2::new_error(
 
                                &ctx.module.source, symbolic.identifier.position,
 
                                &format!(
 
                                    "This identifier points to a {} type, expected a datatype",
 
                                    found_type.definition.type_class()
 
                                )
 
                            ));
 
                        }
 

	
 
                        // If the type is polymorphic then we have two cases: if
 
                        // the programmer did not specify the polyargs then we
 
                        // assume we're going to infer all of them. Otherwise we
 
                        // make sure that they match in count.
 
                        if !found_type.poly_args.is_empty() && symbolic.poly_args.is_empty() {
 
                            // All inferred
 
                            (
 
                                symbolic.identifier.position,
 
                                SymbolicParserTypeVariant::Definition(found_type.ast_definition),
 
                                found_type.poly_args.len()
 
                            )
 
                        } else if symbolic.poly_args.len() != found_type.poly_args.len() {
 
                            return Err(ParseError2::new_error(
 
                                &ctx.module.source, symbolic.identifier.position,
 
                                &format!(
 
                                    "Expected {} polymorphic arguments (or none, to infer them), but {} were specified",
 
                                    found_type.poly_args.len(), symbolic.poly_args.len()
 
                                )
 
                            ))
 
                        } else {
 
                            // If here then the type is not polymorphic, or all
 
                            // types are properly specified by the user.
 
                            for specified_poly_arg in &symbolic.poly_args {
 
                                self.parser_type_buffer.push(*specified_poly_arg);
 
                            }
 

	
 
                            (
 
                                symbolic.identifier.position,
 
                                SymbolicParserTypeVariant::Definition(found_type.ast_definition),
 
                                0
 
                            )
 
                        }
 
                    }
 
                }
 
            };
 

	
 
            // If here then type is symbolic, perform a mutable borrow to set
 
            // the target of the symbolic type.
 
            for _ in 0..num_inferred_to_allocate {
 
                // TODO: @hack, not very user friendly to manually allocate
 
                //  `inferred` ParserTypes with the InputPosition of the
 
                //  symbolic type's identifier.
 
                // We reuse the `parser_type_buffer` to temporarily store these
 
                // and we'll take them out later
 
                self.parser_type_buffer.push(ctx.heap.alloc_parser_type(|this| ParserType{
 
                    this,
 
                    pos: symbolic_pos,
 
                    variant: ParserTypeVariant::Inferred,
 
                }));
 
            }
 

	
 
            if let PTV::Symbolic(symbolic) = &mut ctx.heap[id].variant {
 
            if let PTV::Symbolic(symbolic) = &mut ctx.heap[parser_type_id].variant {
 
                for _ in 0..num_inferred_to_allocate {
 
                    symbolic.poly_args.push(self.parser_type_buffer.pop().unwrap());
 
                }
 
                symbolic.variant = Some(symbolic_variant);
 
            } else {
 
                unreachable!();
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Utilities
 
    //--------------------------------------------------------------------------
 

	
 
    /// Adds a local variable to the current scope. It will also annotate the
 
    /// `Local` in the AST with its relative position in the block.
 
    fn checked_local_add(&mut self, ctx: &mut Ctx, relative_pos: u32, id: LocalId) -> Result<(), ParseError2> {
 
        debug_assert!(self.cur_scope.is_some());
 

	
 
        // Make sure we do not conflict with any global symbols
 
        {
 
            let ident = &ctx.heap[id].identifier;
 
            if let Some(symbol) = ctx.symbols.resolve_symbol(ctx.module.root_id, &ident.value) {
 
                return Err(
 
                    ParseError2::new_error(&ctx.module.source, ident.position, "Local variable declaration conflicts with symbol")
 
                        .with_postfixed_info(&ctx.module.source, symbol.position, "Conflicting symbol is found here")
 
                );
 
            }
 
        }
 

	
 
        let local = &mut ctx.heap[id];
 
        local.relative_pos_in_block = relative_pos;
 

	
 
        // Make sure we do not shadow any variables in any of the scopes. Note
 
        // that variables in parent scopes may be declared later
 
        let local = &ctx.heap[id];
 
        let mut scope = self.cur_scope.as_ref().unwrap();
 
        let mut local_relative_pos = self.relative_pos_in_block;
 

	
 
        loop {
 
            debug_assert!(scope.is_block(), "scope is not a block");
 
            let block = &ctx.heap[scope.to_block()];
 
            for other_local_id in &block.locals {
 
                let other_local = &ctx.heap[*other_local_id];
 
                // Position check in case another variable with the same name
 
                // is defined in a higher-level scope, but later than the scope
 
                // in which the current variable resides.
 
                if local.this != *other_local_id &&
 
                    local_relative_pos >= other_local.relative_pos_in_block &&
 
                    local.identifier.value == other_local.identifier.value {
 
                    // Collision within this scope
 
                    return Err(
 
                        ParseError2::new_error(&ctx.module.source, local.position, "Local variable name conflicts with another variable")
 
                            .with_postfixed_info(&ctx.module.source, other_local.position, "Previous variable is found here")
 
                    );
 
                }
 
            }
 

	
 
            // Current scope is fine, move to parent scope if any
 
            debug_assert!(block.parent_scope.is_some(), "block scope does not have a parent");
 
            scope = block.parent_scope.as_ref().unwrap();
 
            if let Scope::Definition(definition_id) = scope {
 
                // At outer scope, check parameters of function/component
 
                for parameter_id in ctx.heap[*definition_id].parameters() {
 
                    let parameter = &ctx.heap[*parameter_id];
 
                    if local.identifier.value == parameter.identifier.value {
 
                        return Err(
 
                            ParseError2::new_error(&ctx.module.source, local.position, "Local variable name conflicts with parameter")
 
                                .with_postfixed_info(&ctx.module.source, parameter.position, "Parameter definition is found here")
 
                        );
 
                    }
 
                }
 

	
 
                break;
 
            }
 

	
 
            // If here, then we are dealing with a block-like parent block
 
            local_relative_pos = ctx.heap[scope.to_block()].relative_pos_in_parent;
 
        }
 

	
 
        // No collisions at all
 
        let block = &mut ctx.heap[self.cur_scope.as_ref().unwrap().to_block()];
 
        block.locals.push(id);
 

	
 
        Ok(())
 
    }
 

	
 
    /// Finds a variable in the visitor's scope that must appear before the
 
    /// specified relative position within that block.
 
    fn find_variable(&self, ctx: &Ctx, mut relative_pos: u32, identifier: &NamespacedIdentifier) -> Result<VariableId, ParseError2> {
 
        debug_assert!(self.cur_scope.is_some());
 
        debug_assert!(identifier.num_namespaces > 0);
 

	
 
        // TODO: Update once globals are possible as well
 
        if identifier.num_namespaces > 1 {
 
            todo!("Implement namespaced constant seeking")
 
        }
 

	
 
        // TODO: May still refer to an alias of a global symbol using a single
 
        //  identifier in the namespace.
 
        // No need to use iterator over namespaces if here
 
        let mut scope = self.cur_scope.as_ref().unwrap();
 
        
 
        loop {
 
            debug_assert!(scope.is_block());
 
            let block = &ctx.heap[scope.to_block()];
 
            
 
            for local_id in &block.locals {
 
                let local = &ctx.heap[*local_id];
 
                
 
                if local.relative_pos_in_block < relative_pos && local.identifier.value == identifier.value {
 
                    return Ok(local_id.upcast());
 
                }
 
            }
 

	
 
            debug_assert!(block.parent_scope.is_some());
 
            scope = block.parent_scope.as_ref().unwrap();
 
            if !scope.is_block() {
 
                // Definition scope, need to check arguments to definition
 
                match scope {
 
                    Scope::Definition(definition_id) => {
 
                        let definition = &ctx.heap[*definition_id];
 
                        for parameter_id in definition.parameters() {
 
                            let parameter = &ctx.heap[*parameter_id];
 
                            if parameter.identifier.value == identifier.value {
 
                                return Ok(parameter_id.upcast());
 
                            }
 
                        }
 
                    },
 
                    _ => unreachable!(),
 
                }
 

	
 
                // Variable could not be found
 
                return Err(ParseError2::new_error(
 
                    &ctx.module.source, identifier.position, "This variable is not declared"
 
                ));
 
            } else {
 
                relative_pos = block.relative_pos_in_parent;
 
            }
 
        }
 
    }
 

	
 
    /// Adds a particular label to the current scope. Will return an error if
 
    /// there is another label with the same name visible in the current scope.
 
    fn checked_label_add(&mut self, ctx: &mut Ctx, id: LabeledStatementId) -> Result<(), ParseError2> {
 
        debug_assert!(self.cur_scope.is_some());
 

	
 
        // Make sure label is not defined within the current scope or any of the
 
        // parent scope.
 
        let label = &ctx.heap[id];
 
        let mut scope = self.cur_scope.as_ref().unwrap();
 

	
 
        loop {
 
            debug_assert!(scope.is_block(), "scope is not a block");
 
            let block = &ctx.heap[scope.to_block()];
 
            for other_label_id in &block.labels {
 
                let other_label = &ctx.heap[*other_label_id];
 
                if other_label.label.value == label.label.value {
 
                    // Collision
 
                    return Err(
 
                        ParseError2::new_error(&ctx.module.source, label.position, "Label name conflicts with another label")
 
                            .with_postfixed_info(&ctx.module.source, other_label.position, "Other label is found here")
 
                    );
 
                }
 
            }
 

	
 
            debug_assert!(block.parent_scope.is_some(), "block scope does not have a parent");
 
            scope = block.parent_scope.as_ref().unwrap();
 
            if !scope.is_block() {
 
                break;
 
            }
 
        }
 

	
 
        // No collisions
 
        let block = &mut ctx.heap[self.cur_scope.as_ref().unwrap().to_block()];
 
        block.labels.push(id);
 

	
 
        Ok(())
 
    }
 

	
 
    /// Finds a particular labeled statement by its identifier. Once found it
 
    /// will make sure that the target label does not skip over any variable
 
    /// declarations within the scope in which the label was found.
 
    fn find_label(&self, ctx: &Ctx, identifier: &Identifier) -> Result<LabeledStatementId, ParseError2> {
 
        debug_assert!(self.cur_scope.is_some());
 

	
 
        let mut scope = self.cur_scope.as_ref().unwrap();
 
        loop {
 
            debug_assert!(scope.is_block(), "scope is not a block");
 
            let relative_scope_pos = ctx.heap[scope.to_block()].relative_pos_in_parent;
 

	
 
            let block = &ctx.heap[scope.to_block()];
 
            for label_id in &block.labels {
 
                let label = &ctx.heap[*label_id];
 
                if label.label.value == identifier.value {
 
                    for local_id in &block.locals {
 
                        // TODO: Better to do this in control flow analysis, it
 
                        //  is legal to skip over a variable declaration if it
 
                        //  is not actually being used. I might be missing
 
                        //  something here when laying out the bytecode...
 
                        let local = &ctx.heap[*local_id];
 
                        if local.relative_pos_in_block > relative_scope_pos && local.relative_pos_in_block < label.relative_pos_in_block {
 
                            return Err(
 
                                ParseError2::new_error(&ctx.module.source, identifier.position, "This target label skips over a variable declaration")
 
                                    .with_postfixed_info(&ctx.module.source, label.position, "Because it jumps to this label")
 
                                    .with_postfixed_info(&ctx.module.source, local.position, "Which skips over this variable")
 
                            );
 
                        }
 
                    }
 
                    return Ok(*label_id);
 
                }
 
            }
 

	
 
            debug_assert!(block.parent_scope.is_some(), "block scope does not have a parent");
 
            scope = block.parent_scope.as_ref().unwrap();
 
            if !scope.is_block() {
 
                return Err(ParseError2::new_error(&ctx.module.source, identifier.position, "Could not find this label"));
 
            }
 

	
 
        }
 
    }
 

	
 
    /// Finds a particular symbol in the symbol table which must correspond to
 
    /// a definition of a particular type.
 
    // Note: root_id, symbols and types passed in explicitly to prevent
 
    //  borrowing errors
 
    fn find_symbol_of_type(
 
        &self, root_id: RootId, symbols: &SymbolTable, types: &TypeTable,
 
        identifier: &NamespacedIdentifier, expected_type_class: TypeClass
 
    ) -> FindOfTypeResult {
 
        // Find symbol associated with identifier
 
        let symbol = symbols.resolve_namespaced_symbol(root_id, &identifier);
 
        if symbol.is_none() { return FindOfTypeResult::NotFound; }
 

	
 
        let (symbol, iter) = symbol.unwrap();
 
        if iter.num_remaining() != 0 { return FindOfTypeResult::NotFound; }
 

	
 
        match &symbol.symbol {
 
            Symbol::Definition((_, definition_id)) => {
 
                // Make sure definition is of the expected type
 
                let definition_type = types.get_base_definition(&definition_id);
 
                debug_assert!(definition_type.is_some(), "Found symbol '{}' in symbol table, but not in type table", String::from_utf8_lossy(&identifier.value));
 
                let definition_type_class = definition_type.unwrap().definition.type_class();
 

	
 
                if definition_type_class != expected_type_class {
 
                    FindOfTypeResult::TypeMismatch(definition_type_class.display_name())
 
                } else {
 
                    FindOfTypeResult::Found(*definition_id)
 
                }
 
            },
 
            Symbol::Namespace(_) => FindOfTypeResult::TypeMismatch("namespace"),
 
        }
 
    }
 

	
 
    /// This function will check if the provided while statement ID has a block
 
    /// statement that is one of our current parents.
 
    fn has_parent_while_scope(&self, ctx: &Ctx, id: WhileStatementId) -> bool {
 
        debug_assert!(self.cur_scope.is_some());
 
        let mut scope = self.cur_scope.as_ref().unwrap();
 
        let while_stmt = &ctx.heap[id];
 
        loop {
 
            debug_assert!(scope.is_block());
 
            let block = scope.to_block();
 
            if while_stmt.body == block.upcast() {
 
                return true;
 
            }
 

	
 
            let block = &ctx.heap[block];
 
            debug_assert!(block.parent_scope.is_some(), "block scope does not have a parent");
 
            scope = block.parent_scope.as_ref().unwrap();
 
            if !scope.is_block() {
 
                return false;
 
            }
 
        }
 
    }
 

	
 
    /// This function should be called while dealing with break/continue
 
    /// statements. It will try to find the targeted while statement, using the
 
    /// target label if provided. If a valid target is found then the loop's
 
    /// ID will be returned, otherwise a parsing error is constructed.
 
    /// The provided input position should be the position of the break/continue
 
    /// statement.
 
    fn resolve_break_or_continue_target(&self, ctx: &Ctx, position: InputPosition, label: &Option<Identifier>) -> Result<WhileStatementId, ParseError2> {
 
        let target = match label {
 
            Some(label) => {
 
                let target_id = self.find_label(ctx, label)?;
 

	
 
                // Make sure break target is a while statement
 
                let target = &ctx.heap[target_id];
 
                if let Statement::While(target_stmt) = &ctx.heap[target.body] {
 
                    // Even though we have a target while statement, the break might not be
 
                    // present underneath this particular labeled while statement
 
                    if !self.has_parent_while_scope(ctx, target_stmt.this) {
 
                        ParseError2::new_error(&ctx.module.source, label.position, "Break statement is not nested under the target label's while statement")
 
                            .with_postfixed_info(&ctx.module.source, target.position, "The targeted label is found here");
 
                    }
 

	
 
                    target_stmt.this
 
                } else {
 
                    return Err(
 
                        ParseError2::new_error(&ctx.module.source, label.position, "Incorrect break target label, it must target a while loop")
 
                            .with_postfixed_info(&ctx.module.source, target.position, "The targeted label is found here")
 
                    );
 
                }
 
            },
 
            None => {
 
                // Use the enclosing while statement, the break must be
 
                // nested within that while statement
 
                if self.in_while.is_none() {
 
                    return Err(
 
                        ParseError2::new_error(&ctx.module.source, position, "Break statement is not nested under a while loop")
 
                    );
 
                }
 

	
 
                self.in_while.unwrap()
 
            }
 
        };
 

	
 
        // We have a valid target for the break statement. But we need to
 
        // make sure we will not break out of a synchronous block
 
        {
 
            let target_while = &ctx.heap[target];
 
            if target_while.in_sync != self.in_sync {
 
                // Break is nested under while statement, so can only escape a
 
                // sync block if the sync is nested inside the while statement.
 
                debug_assert!(self.in_sync.is_some());
 
                let sync_stmt = &ctx.heap[self.in_sync.unwrap()];
 
                return Err(
 
                    ParseError2::new_error(&ctx.module.source, position, "Break may not escape the surrounding synchronous block")
 
                        .with_postfixed_info(&ctx.module.source, target_while.position, "The break escapes out of this loop")
 
                        .with_postfixed_info(&ctx.module.source, sync_stmt.position, "And would therefore escape this synchronous block")
 
                );
 
            }
 
        }
 

	
 
        Ok(target)
 
    }
 

	
 
    fn visit_call_poly_args(&mut self, ctx: &mut Ctx, call_id: CallExpressionId) -> VisitorResult {
 
        let call_expr = &ctx.heap[call_id];
 

	
 
        // Determine the polyarg signature
 
        let num_expected_poly_args = match &call_expr.method {
 
            Method::Create => {
 
                0
 
            },
 
            Method::Fires => {
 
                1
 
            },
 
            Method::Get => {
 
                1
 
            },
 
            Method::Put => {
 
                1
 
            }
 
            Method::Symbolic(symbolic) => {
 
                let definition = &ctx.heap[symbolic.definition.unwrap()];
 
                if let Definition::Function(definition) = definition {
 
                    definition.poly_vars.len()
 
                } else {
 
                    debug_assert!(false, "expected function while visiting call poly args");
 
                    unreachable!();
 
                match definition {
 
                    Definition::Function(definition) => definition.poly_vars.len(),
 
                    Definition::Component(definition) => definition.poly_vars.len(),
 
                    _ => {
 
                        debug_assert!(false, "expected function or component definition while visiting call poly args");
 
                        unreachable!();
 
                    }
 
                }
 
            }
 
        };
 

	
 
        // We allow zero polyargs to imply all args are inferred. Otherwise the
 
        // number of arguments must be equal
 
        if call_expr.poly_args.is_empty() {
 
            if num_expected_poly_args != 0 {
 
                // Infer all polyargs
 
                // TODO: @cleanup Not nice to use method position as implicitly
 
                //  inferred parser type pos.
 
                let pos = call_expr.position();
 
                for _ in 0..num_expected_poly_args {
 
                    self.parser_type_buffer.push(ctx.heap.alloc_parser_type(|this| ParserType {
 
                        this,
 
                        pos,
 
                        variant: ParserTypeVariant::Inferred,
 
                    }));
 
                }
 

	
 
                let call_expr = &mut ctx.heap[call_id];
 
                call_expr.poly_args.reserve(num_expected_poly_args);
 
                for _ in 0..num_expected_poly_args {
 
                    call_expr.poly_args.push(self.parser_type_buffer.pop().unwrap());
 
                }
 
            }
 
            Ok(())
 
        } else if call_expr.poly_args.len() == num_expected_poly_args {
 
            // Number of args is not 0, so parse all the specified ParserTypes
 
            let old_num_types = self.parser_type_buffer.len();
 
            self.parser_type_buffer.extend(&call_expr.poly_args);
 
            while self.parser_type_buffer.len() > old_num_types {
 
                let parser_type_id = self.parser_type_buffer.pop().unwrap();
 
                self.visit_parser_type(ctx, parser_type_id)?;
 
            }
 
            self.parser_type_buffer.truncate(old_num_types);
 
            Ok(())
 
        } else {
 
            return Err(ParseError2::new_error(
 
                &ctx.module.source, call_expr.position,
 
                &format!(
 
                    "Expected {} polymorphic arguments (or none, to infer them), but {} were specified",
 
                    num_expected_poly_args, call_expr.poly_args.len()
 
                )
 
            ));
 
        }
 
    }
 
}
 
\ No newline at end of file
src/runtime/tests.rs
Show inline comments
 
use crate as reowolf;
 
use crossbeam_utils::thread::scope;
 
use reowolf::{
 
    error::*,
 
    EndpointPolarity::{Active, Passive},
 
    Polarity::{Getter, Putter},
 
    *,
 
};
 
use std::{fs::File, net::SocketAddr, path::Path, sync::Arc, time::Duration};
 
//////////////////////////////////////////
 
const MS100: Option<Duration> = Some(Duration::from_millis(100));
 
const MS300: Option<Duration> = Some(Duration::from_millis(300));
 
const SEC1: Option<Duration> = Some(Duration::from_secs(1));
 
const SEC5: Option<Duration> = Some(Duration::from_secs(5));
 
const SEC15: Option<Duration> = Some(Duration::from_secs(15));
 
fn next_test_addr() -> SocketAddr {
 
    use std::{
 
        net::{Ipv4Addr, SocketAddrV4},
 
        sync::atomic::{AtomicU16, Ordering::SeqCst},
 
    };
 
    static TEST_PORT: AtomicU16 = AtomicU16::new(5_000);
 
    let port = TEST_PORT.fetch_add(1, SeqCst);
 
    SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into()
 
}
 
fn file_logged_connector(connector_id: ConnectorId, dir_path: &Path) -> Connector {
 
    file_logged_configured_connector(connector_id, dir_path, MINIMAL_PROTO.clone())
 
}
 
fn file_logged_configured_connector(
 
    connector_id: ConnectorId,
 
    dir_path: &Path,
 
    pd: Arc<ProtocolDescription>,
 
) -> Connector {
 
    let _ = std::fs::create_dir_all(dir_path).expect("Failed to create log output dir");
 
    let path = dir_path.join(format!("cid_{:?}.txt", connector_id));
 
    let file = File::create(path).expect("Failed to create log output file!");
 
    let file_logger = Box::new(FileLogger::new(connector_id, file));
 
    Connector::new(file_logger, pd, connector_id)
 
}
 
static MINIMAL_PDL: &'static [u8] = b"
 
primitive together(in ia, in ib, out oa, out ob){
 
primitive together(in<msg> ia, in<msg> ib, out<msg> oa, out<msg> ob){
 
  while(true) synchronous {
 
    if(fires(ia)) {
 
      put(oa, get(ia));
 
      put(ob, get(ib));
 
    }
 
  } 
 
}
 
";
 
lazy_static::lazy_static! {
 
    static ref MINIMAL_PROTO: Arc<ProtocolDescription> = {
 
        Arc::new(reowolf::ProtocolDescription::parse(MINIMAL_PDL).unwrap())
 
    };
 
}
 
static TEST_MSG_BYTES: &'static [u8] = b"hello";
 
lazy_static::lazy_static! {
 
    static ref TEST_MSG: Payload = {
 
        Payload::from(TEST_MSG_BYTES)
 
    };
 
}
 
fn new_u8_buffer(cap: usize) -> Vec<u8> {
 
    let mut v = Vec::with_capacity(cap);
 
    // Safe! len will cover owned bytes in valid state
 
    unsafe { v.set_len(cap) }
 
    v
 
}
 
//////////////////////////////////////////
 

	
 
#[test]
 
fn basic_connector() {
 
    Connector::new(Box::new(DummyLogger), MINIMAL_PROTO.clone(), 0);
 
}
 

	
 
#[test]
 
fn basic_logged_connector() {
 
    let test_log_path = Path::new("./logs/basic_logged_connector");
 
    file_logged_connector(0, test_log_path);
 
}
 

	
 
#[test]
 
fn new_port_pair() {
 
    let test_log_path = Path::new("./logs/new_port_pair");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [_, _] = c.new_port_pair();
 
    let [_, _] = c.new_port_pair();
 
}
 

	
 
#[test]
 
fn new_sync() {
 
    let test_log_path = Path::new("./logs/new_sync");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [o, i] = c.new_port_pair();
 
    c.add_component(b"sync", &[i, o]).unwrap();
 
}
 

	
 
#[test]
 
fn new_net_port() {
 
    let test_log_path = Path::new("./logs/new_net_port");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let sock_addrs = [next_test_addr()];
 
    let _ = c.new_net_port(Getter, sock_addrs[0], Passive).unwrap();
 
    let _ = c.new_net_port(Putter, sock_addrs[0], Active).unwrap();
 
}
 

	
 
#[test]
 
fn trivial_connect() {
 
    let test_log_path = Path::new("./logs/trivial_connect");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    c.connect(SEC1).unwrap();
 
}
 

	
 
#[test]
 
fn single_node_connect() {
 
    let test_log_path = Path::new("./logs/single_node_connect");
 
    let sock_addrs = [next_test_addr()];
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let _ = c.new_net_port(Getter, sock_addrs[0], Passive).unwrap();
 
    let _ = c.new_net_port(Putter, sock_addrs[0], Active).unwrap();
 
    c.connect(SEC1).unwrap();
 
}
 

	
 
#[test]
 
fn minimal_net_connect() {
 
    let test_log_path = Path::new("./logs/minimal_net_connect");
 
    let sock_addrs = [next_test_addr()];
 
    scope(|s| {
 
        s.spawn(|_| {
 
            let mut c = file_logged_connector(0, test_log_path);
 
            let _ = c.new_net_port(Getter, sock_addrs[0], Active).unwrap();
 
            c.connect(SEC1).unwrap();
 
        });
 
        s.spawn(|_| {
 
            let mut c = file_logged_connector(1, test_log_path);
 
            let _ = c.new_net_port(Putter, sock_addrs[0], Passive).unwrap();
 
            c.connect(SEC1).unwrap();
 
        });
 
    })
 
    .unwrap();
 
}
 

	
 
#[test]
 
fn put_no_sync() {
 
    let test_log_path = Path::new("./logs/put_no_sync");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [o, _] = c.new_port_pair();
 
    c.connect(SEC1).unwrap();
 
    c.put(o, TEST_MSG.clone()).unwrap();
 
}
 

	
 
#[test]
 
fn wrong_polarity_bad() {
 
    let test_log_path = Path::new("./logs/wrong_polarity_bad");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [_, i] = c.new_port_pair();
 
    c.connect(SEC1).unwrap();
 
    c.put(i, TEST_MSG.clone()).unwrap_err();
 
}
 

	
 
#[test]
 
fn dup_put_bad() {
 
    let test_log_path = Path::new("./logs/dup_put_bad");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [o, _] = c.new_port_pair();
 
    c.connect(SEC1).unwrap();
 
    c.put(o, TEST_MSG.clone()).unwrap();
 
    c.put(o, TEST_MSG.clone()).unwrap_err();
 
}
 

	
 
#[test]
 
fn trivial_sync() {
 
    let test_log_path = Path::new("./logs/trivial_sync");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    c.connect(SEC1).unwrap();
 
    c.sync(SEC1).unwrap();
 
}
 

	
 
#[test]
 
fn unconnected_gotten_err() {
 
    let test_log_path = Path::new("./logs/unconnected_gotten_err");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [_, i] = c.new_port_pair();
 
    assert_eq!(reowolf::error::GottenError::NoPreviousRound, c.gotten(i).unwrap_err());
 
}
 

	
 
#[test]
 
fn connected_gotten_err_no_round() {
 
    let test_log_path = Path::new("./logs/connected_gotten_err_no_round");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [_, i] = c.new_port_pair();
 
    c.connect(SEC1).unwrap();
 
    assert_eq!(reowolf::error::GottenError::NoPreviousRound, c.gotten(i).unwrap_err());
 
}
 

	
 
#[test]
 
fn connected_gotten_err_ungotten() {
 
    let test_log_path = Path::new("./logs/connected_gotten_err_ungotten");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [_, i] = c.new_port_pair();
 
    c.connect(SEC1).unwrap();
 
    c.sync(SEC1).unwrap();
 
    assert_eq!(reowolf::error::GottenError::PortDidntGet, c.gotten(i).unwrap_err());
 
}
 

	
 
#[test]
 
fn native_polarity_checks() {
 
    let test_log_path = Path::new("./logs/native_polarity_checks");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [o, i] = c.new_port_pair();
 
    c.connect(SEC1).unwrap();
 
    // fail...
 
    c.get(o).unwrap_err();
 
    c.put(i, TEST_MSG.clone()).unwrap_err();
 
    // succeed..
 
    c.get(i).unwrap();
 
    c.put(o, TEST_MSG.clone()).unwrap();
 
}
 

	
 
#[test]
 
fn native_multiple_gets() {
 
    let test_log_path = Path::new("./logs/native_multiple_gets");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [_, i] = c.new_port_pair();
 
    c.connect(SEC1).unwrap();
 
    c.get(i).unwrap();
 
    c.get(i).unwrap_err();
 
}
 

	
 
#[test]
 
fn next_batch() {
 
    let test_log_path = Path::new("./logs/next_batch");
 
    let mut c = file_logged_connector(0, test_log_path);
 
    c.next_batch().unwrap_err();
 
    c.connect(SEC1).unwrap();
 
@@ -668,785 +668,788 @@ fn multi_recover() {
 
                assert_eq!(res.is_ok(), succeeds);
 
            }
 
        });
 
    })
 
    .unwrap();
 
}
 

	
 
#[test]
 
fn udp_self_connect() {
 
    let test_log_path = Path::new("./logs/udp_self_connect");
 
    let sock_addrs = [next_test_addr(), next_test_addr()];
 
    let mut c = file_logged_connector(0, test_log_path);
 
    c.new_udp_mediator_component(sock_addrs[0], sock_addrs[1]).unwrap();
 
    c.new_udp_mediator_component(sock_addrs[1], sock_addrs[0]).unwrap();
 
    c.connect(SEC1).unwrap();
 
}
 

	
 
#[test]
 
fn solo_udp_put_success() {
 
    let test_log_path = Path::new("./logs/solo_udp_put_success");
 
    let sock_addrs = [next_test_addr(), next_test_addr()];
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [p0, _] = c.new_udp_mediator_component(sock_addrs[0], sock_addrs[1]).unwrap();
 
    c.connect(SEC1).unwrap();
 
    c.put(p0, TEST_MSG.clone()).unwrap();
 
    c.sync(MS300).unwrap();
 
}
 

	
 
#[test]
 
fn solo_udp_get_fail() {
 
    let test_log_path = Path::new("./logs/solo_udp_get_fail");
 
    let sock_addrs = [next_test_addr(), next_test_addr()];
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [_, p0] = c.new_udp_mediator_component(sock_addrs[0], sock_addrs[1]).unwrap();
 
    c.connect(SEC1).unwrap();
 
    c.get(p0).unwrap();
 
    c.sync(MS300).unwrap_err();
 
}
 

	
 
#[ignore]
 
#[test]
 
fn reowolf_to_udp() {
 
    let test_log_path = Path::new("./logs/reowolf_to_udp");
 
    let sock_addrs = [next_test_addr(), next_test_addr()];
 
    let barrier = std::sync::Barrier::new(2);
 
    scope(|s| {
 
        s.spawn(|_| {
 
            barrier.wait();
 
            // reowolf thread
 
            let mut c = file_logged_connector(0, test_log_path);
 
            let [p0, _] = c.new_udp_mediator_component(sock_addrs[0], sock_addrs[1]).unwrap();
 
            c.connect(SEC1).unwrap();
 
            c.put(p0, TEST_MSG.clone()).unwrap();
 
            c.sync(MS300).unwrap();
 
            barrier.wait();
 
        });
 
        s.spawn(|_| {
 
            barrier.wait();
 
            // udp thread
 
            let udp = std::net::UdpSocket::bind(sock_addrs[1]).unwrap();
 
            udp.connect(sock_addrs[0]).unwrap();
 
            let mut buf = new_u8_buffer(256);
 
            let len = udp.recv(&mut buf).unwrap();
 
            assert_eq!(TEST_MSG_BYTES, &buf[0..len]);
 
            barrier.wait();
 
        });
 
    })
 
    .unwrap();
 
}
 

	
 
#[ignore]
 
#[test]
 
fn udp_to_reowolf() {
 
    let test_log_path = Path::new("./logs/udp_to_reowolf");
 
    let sock_addrs = [next_test_addr(), next_test_addr()];
 
    let barrier = std::sync::Barrier::new(2);
 
    scope(|s| {
 
        s.spawn(|_| {
 
            barrier.wait();
 
            // reowolf thread
 
            let mut c = file_logged_connector(0, test_log_path);
 
            let [_, p0] = c.new_udp_mediator_component(sock_addrs[0], sock_addrs[1]).unwrap();
 
            c.connect(SEC1).unwrap();
 
            c.get(p0).unwrap();
 
            c.sync(SEC5).unwrap();
 
            assert_eq!(c.gotten(p0).unwrap().as_slice(), TEST_MSG_BYTES);
 
            barrier.wait();
 
        });
 
        s.spawn(|_| {
 
            barrier.wait();
 
            // udp thread
 
            let udp = std::net::UdpSocket::bind(sock_addrs[1]).unwrap();
 
            udp.connect(sock_addrs[0]).unwrap();
 
            for _ in 0..15 {
 
                udp.send(TEST_MSG_BYTES).unwrap();
 
                std::thread::sleep(MS100.unwrap());
 
            }
 
            barrier.wait();
 
        });
 
    })
 
    .unwrap();
 
}
 

	
 
#[test]
 
fn udp_reowolf_swap() {
 
    let test_log_path = Path::new("./logs/udp_reowolf_swap");
 
    let sock_addrs = [next_test_addr(), next_test_addr()];
 
    let barrier = std::sync::Barrier::new(2);
 
    scope(|s| {
 
        s.spawn(|_| {
 
            barrier.wait();
 
            // reowolf thread
 
            let mut c = file_logged_connector(0, test_log_path);
 
            let [p0, p1] = c.new_udp_mediator_component(sock_addrs[0], sock_addrs[1]).unwrap();
 
            c.connect(SEC1).unwrap();
 
            c.put(p0, TEST_MSG.clone()).unwrap();
 
            c.get(p1).unwrap();
 
            c.sync(SEC5).unwrap();
 
            assert_eq!(c.gotten(p1).unwrap().as_slice(), TEST_MSG_BYTES);
 
            barrier.wait();
 
        });
 
        s.spawn(|_| {
 
            barrier.wait();
 
            // udp thread
 
            let udp = std::net::UdpSocket::bind(sock_addrs[1]).unwrap();
 
            udp.connect(sock_addrs[0]).unwrap();
 
            let mut buf = new_u8_buffer(256);
 
            for _ in 0..5 {
 
                std::thread::sleep(Duration::from_millis(60));
 
                udp.send(TEST_MSG_BYTES).unwrap();
 
            }
 
            let len = udp.recv(&mut buf).unwrap();
 
            assert_eq!(TEST_MSG_BYTES, &buf[0..len]);
 
            barrier.wait();
 
        });
 
    })
 
    .unwrap();
 
}
 

	
 
#[test]
 
fn example_pres_3() {
 
    let test_log_path = Path::new("./logs/example_pres_3");
 
    let sock_addrs = [next_test_addr(), next_test_addr()];
 
    scope(|s| {
 
        s.spawn(|_| {
 
            // "amy"
 
            let mut c = file_logged_connector(0, test_log_path);
 
            let p0 = c.new_net_port(Putter, sock_addrs[0], Active).unwrap();
 
            let p1 = c.new_net_port(Putter, sock_addrs[1], Active).unwrap();
 
            c.connect(SEC1).unwrap();
 
            // put {A} and FAIL
 
            c.put(p0, TEST_MSG.clone()).unwrap();
 
            c.sync(SEC1).unwrap_err();
 
            // put {B} and FAIL
 
            c.put(p1, TEST_MSG.clone()).unwrap();
 
            c.sync(SEC1).unwrap_err();
 
            // put {A, B} and SUCCEED
 
            c.put(p0, TEST_MSG.clone()).unwrap();
 
            c.put(p1, TEST_MSG.clone()).unwrap();
 
            c.sync(SEC1).unwrap();
 
        });
 
        s.spawn(|_| {
 
            // "bob"
 
            let mut c = file_logged_connector(1, test_log_path);
 
            let p0 = c.new_net_port(Getter, sock_addrs[0], Passive).unwrap();
 
            let p1 = c.new_net_port(Getter, sock_addrs[1], Passive).unwrap();
 
            c.connect(SEC1).unwrap();
 
            for _ in 0..2 {
 
                // get {A, B} and FAIL
 
                c.get(p0).unwrap();
 
                c.get(p1).unwrap();
 
                c.sync(SEC1).unwrap_err();
 
            }
 
            // get {A, B} and SUCCEED
 
            c.get(p0).unwrap();
 
            c.get(p1).unwrap();
 
            c.sync(SEC1).unwrap();
 
        });
 
    })
 
    .unwrap();
 
}
 

	
 
#[test]
 
fn ac_not_b() {
 
    let test_log_path = Path::new("./logs/ac_not_b");
 
    let sock_addrs = [next_test_addr(), next_test_addr()];
 
    scope(|s| {
 
        s.spawn(|_| {
 
            // "amy"
 
            let mut c = file_logged_connector(0, test_log_path);
 
            let p0 = c.new_net_port(Putter, sock_addrs[0], Active).unwrap();
 
            let p1 = c.new_net_port(Putter, sock_addrs[1], Active).unwrap();
 
            c.connect(SEC1).unwrap();
 
            c.connect(SEC5).unwrap();
 

	
 
            // put both A and B
 
            c.put(p0, TEST_MSG.clone()).unwrap();
 
            c.put(p1, TEST_MSG.clone()).unwrap();
 
            c.sync(SEC1).unwrap_err();
 
        });
 
        s.spawn(|_| {
 
            // "bob"
 
            let pdl = b"
 
            primitive ac_not_b(in a, in b, out c){
 
            primitive ac_not_b(in<msg> a, in<msg> b, out<msg> c){
 
                // forward A to C but keep B silent
 
                synchronous{ put(c, get(a)); }
 
            }";
 
            let pd = Arc::new(reowolf::ProtocolDescription::parse(pdl).unwrap());
 
            let mut c = file_logged_configured_connector(1, test_log_path, pd);
 
            let p0 = c.new_net_port(Getter, sock_addrs[0], Passive).unwrap();
 
            let p1 = c.new_net_port(Getter, sock_addrs[1], Passive).unwrap();
 
            let [a, b] = c.new_port_pair();
 

	
 
            c.add_component(b"ac_not_b", &[p0, p1, a]).unwrap();
 

	
 
            c.connect(SEC1).unwrap();
 

	
 
            c.get(b).unwrap();
 
            c.sync(SEC1).unwrap_err();
 
        });
 
    })
 
    .unwrap();
 
}
 

	
 
#[test]
 
fn many_rounds_net() {
 
    let test_log_path = Path::new("./logs/many_rounds_net");
 
    let sock_addrs = [next_test_addr()];
 
    const NUM_ROUNDS: usize = 1_000;
 
    scope(|s| {
 
        s.spawn(|_| {
 
            let mut c = file_logged_connector(0, test_log_path);
 
            let p0 = c.new_net_port(Putter, sock_addrs[0], Active).unwrap();
 
            c.connect(SEC1).unwrap();
 
            for _ in 0..NUM_ROUNDS {
 
                c.put(p0, TEST_MSG.clone()).unwrap();
 
                c.sync(SEC1).unwrap();
 
            }
 
        });
 
        s.spawn(|_| {
 
            let mut c = file_logged_connector(1, test_log_path);
 
            let p0 = c.new_net_port(Getter, sock_addrs[0], Passive).unwrap();
 
            c.connect(SEC1).unwrap();
 
            for _ in 0..NUM_ROUNDS {
 
                c.get(p0).unwrap();
 
                c.sync(SEC1).unwrap();
 
            }
 
        });
 
    })
 
    .unwrap();
 
}
 
#[test]
 
fn many_rounds_mem() {
 
    let test_log_path = Path::new("./logs/many_rounds_mem");
 
    const NUM_ROUNDS: usize = 1_000;
 
    let mut c = file_logged_connector(0, test_log_path);
 
    let [p0, p1] = c.new_port_pair();
 
    c.connect(SEC1).unwrap();
 
    for _ in 0..NUM_ROUNDS {
 
        c.put(p0, TEST_MSG.clone()).unwrap();
 
        c.get(p1).unwrap();
 
        c.sync(SEC1).unwrap();
 
    }
 
}
 

	
 
#[test]
 
fn pdl_reo_lossy() {
 
    let pdl = b"
 
    primitive lossy(in a, out b) {
 
    primitive lossy(in<msg> a, out<msg> b) {
 
        while(true) synchronous {
 
            msg m = null;
 
            if(fires(a)) {
 
                m = get(a);
 
                if(fires(b)) {
 
                    put(b, m);
 
                }
 
            }
 
        }
 
    }
 
    ";
 
    reowolf::ProtocolDescription::parse(pdl).unwrap();
 
}
 

	
 
#[test]
 
fn pdl_reo_fifo1() {
 
    let pdl = b"
 
    primitive fifo1(in a, out b) {
 
    primitive fifo1(in<msg> a, out<msg> b) {
 
        msg m = null;
 
        while(true) synchronous {
 
            if(m == null) {
 
                if(fires(a)) m=get(a);
 
            } else {
 
                if(fires(b)) put(b, m);
 
                m = null;
 
            }
 
        }
 
    }
 
    ";
 
    reowolf::ProtocolDescription::parse(pdl).unwrap();
 
}
 

	
 
#[test]
 
fn pdl_reo_fifo1full() {
 
    let test_log_path = Path::new("./logs/pdl_reo_fifo1full");
 
    let pdl = b"
 
    primitive fifo1full(in a, out b) {
 
    primitive fifo1full(in<msg> a, out<msg> b) {
 
        msg m = create(0);
 
        while(true) synchronous {
 
            if(m == null) {
 
                if(fires(a)) m=get(a);
 
            } else {
 
                if(fires(b)) put(b, m);
 
                m = null;
 
            }
 
        }
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 
    let [_p0, g0] = c.new_port_pair();
 
    let [p1, g1] = c.new_port_pair();
 
    c.add_component(b"fifo1full", &[g0, p1]).unwrap();
 
    c.connect(None).unwrap();
 
    c.get(g1).unwrap();
 
    c.sync(None).unwrap();
 
    assert_eq!(0, c.gotten(g1).unwrap().len());
 
}
 

	
 
#[test]
 
fn pdl_msg_consensus() {
 
    let test_log_path = Path::new("./logs/pdl_msg_consensus");
 
    let pdl = b"
 
    primitive msgconsensus(in a, in b) {
 
    primitive msgconsensus(in<msg> a, in<msg> b) {
 
        while(true) synchronous {
 
            msg x = get(a);
 
            msg y = get(b);
 
            assert(x == y);
 
        }
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 
    let [p0, g0] = c.new_port_pair();
 
    let [p1, g1] = c.new_port_pair();
 
    c.add_component(b"msgconsensus", &[g0, g1]).unwrap();
 
    c.connect(None).unwrap();
 
    c.put(p0, Payload::from(b"HELLO" as &[_])).unwrap();
 
    c.put(p1, Payload::from(b"HELLO" as &[_])).unwrap();
 
    c.sync(SEC1).unwrap();
 

	
 
    c.put(p0, Payload::from(b"HEY" as &[_])).unwrap();
 
    c.put(p1, Payload::from(b"HELLO" as &[_])).unwrap();
 
    c.sync(SEC1).unwrap_err();
 
}
 

	
 
#[test]
 
fn sequencer3_prim() {
 
    let test_log_path = Path::new("./logs/sequencer3_prim");
 
    let pdl = b"
 
    primitive sequencer3(out a, out b, out c) {
 
    primitive sequencer3(out<msg> a, out<msg> b, out<msg> c) {
 
        int i = 0;
 
        while(true) synchronous {
 
            out to = a;
 
            if     (i==1) to = b;
 
            else if(i==2) to = c;
 
            if(fires(to)) {
 
                put(to, create(0));
 
                i = (i + 1)%3;
 
            }
 
        }
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 

	
 
    // setup a session between (a) native, and (b) sequencer3, connected by 3 ports.
 
    let [p0, g0] = c.new_port_pair();
 
    let [p1, g1] = c.new_port_pair();
 
    let [p2, g2] = c.new_port_pair();
 
    c.add_component(b"sequencer3", &[p0, p1, p2]).unwrap();
 
    c.connect(None).unwrap();
 

	
 
    let which_of_three = move |c: &mut Connector| {
 
        // setup three sync batches. sync. return which succeeded
 
        c.get(g0).unwrap();
 
        c.next_batch().unwrap();
 
        c.get(g1).unwrap();
 
        c.next_batch().unwrap();
 
        c.get(g2).unwrap();
 
        c.sync(None).unwrap()
 
    };
 

	
 
    const TEST_ROUNDS: usize = 50;
 
    // check that the batch index for rounds 0..TEST_ROUNDS are [0, 1, 2, 0, 1, 2, ...]
 
    for expected_batch_idx in (0..=2).cycle().take(TEST_ROUNDS) {
 
        // silent round
 
        assert_eq!(0, c.sync(None).unwrap());
 
        // non silent round
 
        assert_eq!(expected_batch_idx, which_of_three(&mut c));
 
    }
 
}
 

	
 
#[test]
 
fn sequencer3_comp() {
 
    let test_log_path = Path::new("./logs/sequencer3_comp");
 
    let pdl = b"
 
    primitive fifo1_init(msg m, in a, out b) {
 
    primitive fifo1_init<T>(T m, in<T> a, out<T> b) {
 
        while(true) synchronous {
 
            if(m != null && fires(b)) {
 
                put(b, m);
 
                m = null;
 
            } else if (m == null && fires(a)) {
 
                m = get(a);
 
            }
 
        }
 
    }
 
    composite fifo1_full(in a, out b) {
 
    composite fifo1_full<T>(in<T> a, out<T> b) {
 
        new fifo1_init(create(0), a, b);
 
    }
 
    composite fifo1(in a, out b) {
 
    composite fifo1<T>(in<T> a, out<T> b) {
 
        new fifo1_init(null, a, b);
 
    }
 
    composite sequencer3(out a, out b, out c) {
 
    composite sequencer3(out<msg> a, out<msg> b, out<msg> c) {
 
        channel d -> e;
 
        channel f -> g;
 
        channel h -> i;
 
        channel j -> k;
 
        channel l -> m;
 
        channel n -> o;
 

	
 
        new fifo1_full(o, d);
 
        new replicator(e, f, a);
 
        new fifo1(g, h);
 
        new replicator(i, j, b);
 
        new fifo1(k, l);
 
        new replicator(m, n, c);
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 

	
 
    // setup a session between (a) native, and (b) sequencer3, connected by 3 ports.
 
    let [p0, g0] = c.new_port_pair();
 
    let [p1, g1] = c.new_port_pair();
 
    let [p2, g2] = c.new_port_pair();
 
    c.add_component(b"sequencer3", &[p0, p1, p2]).unwrap();
 
    c.connect(None).unwrap();
 

	
 
    let which_of_three = move |c: &mut Connector| {
 
        // setup three sync batches. sync. return which succeeded
 
        c.get(g0).unwrap();
 
        c.next_batch().unwrap();
 
        c.get(g1).unwrap();
 
        c.next_batch().unwrap();
 
        c.get(g2).unwrap();
 
        c.sync(SEC1).unwrap()
 
    };
 

	
 
    const TEST_ROUNDS: usize = 50;
 
    // check that the batch index for rounds 0..TEST_ROUNDS are [0, 1, 2, 0, 1, 2, ...]
 
    for expected_batch_idx in (0..=2).cycle().take(TEST_ROUNDS) {
 
        // silent round
 
        assert_eq!(0, c.sync(SEC1).unwrap());
 
        // non silent round
 
        assert_eq!(expected_batch_idx, which_of_three(&mut c));
 
    }
 
}
 

	
 
enum XRouterItem {
 
    Silent,
 
    GetA,
 
    GetB,
 
}
 
// Hardcoded pseudo-random sequence of round behaviors for the native component
 
const XROUTER_ITEMS: &[XRouterItem] = {
 
    use XRouterItem::{GetA as A, GetB as B, Silent as S};
 
    &[
 
        B, A, S, B, A, A, B, S, B, S, A, A, S, B, B, S, B, S, B, B, S, B, B, A, B, B, A, B, A, B,
 
        S, B, S, B, S, A, S, B, A, S, B, A, B, S, B, S, B, S, S, B, B, A, A, A, S, S, S, B, A, A,
 
        A, S, S, B, B, B, A, B, S, S, A, A, B, A, B, B, A, A, A, B, A, B, S, A, B, S, A, A, B, S,
 
    ]
 
};
 

	
 
#[test]
 
fn xrouter_prim() {
 
    let test_log_path = Path::new("./logs/xrouter_prim");
 
    let pdl = b"
 
    primitive xrouter(in a, out b, out c) {
 
    primitive xrouter(in<msg> a, out<msg> b, out<msg> c) {
 
        while(true) synchronous {
 
            if(fires(a)) {
 
                if(fires(b)) put(b, get(a));
 
                else         put(c, get(a));
 
            }
 
        }
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 

	
 
    // setup a session between (a) native, and (b) xrouter2, connected by 3 ports.
 
    let [p0, g0] = c.new_port_pair();
 
    let [p1, g1] = c.new_port_pair();
 
    let [p2, g2] = c.new_port_pair();
 
    c.add_component(b"xrouter", &[g0, p1, p2]).unwrap();
 
    c.connect(None).unwrap();
 

	
 
    let now = std::time::Instant::now();
 
    for item in XROUTER_ITEMS.iter() {
 
        match item {
 
            XRouterItem::Silent => {}
 
            XRouterItem::GetA => {
 
                c.put(p0, TEST_MSG.clone()).unwrap();
 
                c.get(g1).unwrap();
 
            }
 
            XRouterItem::GetB => {
 
                c.put(p0, TEST_MSG.clone()).unwrap();
 
                c.get(g2).unwrap();
 
            }
 
        }
 
        assert_eq!(0, c.sync(SEC1).unwrap());
 
    }
 
    println!("PRIM {:?}", now.elapsed());
 
}
 
#[test]
 
fn xrouter_comp() {
 
    let test_log_path = Path::new("./logs/xrouter_comp");
 
    let pdl = b"
 
    primitive lossy(in a, out b) {
 
    primitive lossy<T>(in<T> a, out<T> b) {
 
        while(true) synchronous {
 
            if(fires(a)) {
 
                msg m = get(a);
 
                auto m = get(a);
 
                if(fires(b)) put(b, m);
 
            }
 
        }
 
    }
 
    primitive sync_drain(in a, in b) {
 
    primitive sync_drain<T>(in<T> a, in<T> b) {
 
        while(true) synchronous {
 
            if(fires(a)) {
 
                get(a);
 
                get(b);
 
            }
 
        }
 
    }
 
    composite xrouter(in a, out b, out c) {
 
    composite xrouter(in<msg> a, out<msg> b, out<msg> c) {
 
        channel d -> e;
 
        channel f -> g;
 
        channel h -> i;
 
        channel j -> k;
 
        channel l -> m;
 
        channel n -> o;
 
        channel p -> q;
 
        channel r -> s;
 
        channel t -> u;
 

	
 
        new replicator(a, d, f);
 
        new replicator(g, t, h);
 
        new lossy(e, l);
 
        new lossy(i, j);
 
        new replicator(m, b, p);
 
        new replicator(k, n, c);
 
        new merger(q, o, r);
 
        new sync_drain(u, s);
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 

	
 
    // setup a session between (a) native, and (b) xrouter2, connected by 3 ports.
 
    let [p0, g0] = c.new_port_pair();
 
    let [p1, g1] = c.new_port_pair();
 
    let [p2, g2] = c.new_port_pair();
 
    c.add_component(b"xrouter", &[g0, p1, p2]).unwrap();
 
    c.connect(None).unwrap();
 

	
 
    let now = std::time::Instant::now();
 
    for item in XROUTER_ITEMS.iter() {
 
        match item {
 
            XRouterItem::Silent => {}
 
            XRouterItem::GetA => {
 
                c.put(p0, TEST_MSG.clone()).unwrap();
 
                c.get(g1).unwrap();
 
            }
 
            XRouterItem::GetB => {
 
                c.put(p0, TEST_MSG.clone()).unwrap();
 
                c.get(g2).unwrap();
 
            }
 
        }
 
        assert_eq!(0, c.sync(SEC1).unwrap());
 
    }
 
    println!("COMP {:?}", now.elapsed());
 
}
 

	
 
#[test]
 
fn count_stream() {
 
    let test_log_path = Path::new("./logs/count_stream");
 
    let pdl = b"
 
    primitive count_stream(out o) {
 
    primitive count_stream(out<msg> o) {
 
        msg m = create(1);
 
        m[0] = 0;
 
        while(true) synchronous {
 
            put(o, m);
 
            m[0] += 1;
 
        }
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 

	
 
    // setup a session between (a) native, and (b) sequencer3, connected by 3 ports.
 
    let [p0, g0] = c.new_port_pair();
 
    c.add_component(b"count_stream", &[p0]).unwrap();
 
    c.connect(None).unwrap();
 

	
 
    for expecting in 0u8..16 {
 
        c.get(g0).unwrap();
 
        c.sync(None).unwrap();
 
        assert_eq!(&[expecting], c.gotten(g0).unwrap().as_slice());
 
    }
 
}
 

	
 
#[test]
 
fn for_msg_byte() {
 
    let test_log_path = Path::new("./logs/for_msg_byte");
 
    let pdl = b"
 
    primitive for_msg_byte(out o) {
 
    primitive for_msg_byte(out<msg> o) {
 
        byte i = 0;
 
        int idx = 0;
 
        while(i<8) {
 
            msg m = create(1);
 
            m[0] = i;
 
            m[idx] = i;
 
            synchronous put(o, m);
 
            i++;
 
        }
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 

	
 
    // setup a session between (a) native, and (b) sequencer3, connected by 3 ports.
 
    let [p0, g0] = c.new_port_pair();
 
    c.add_component(b"for_msg_byte", &[p0]).unwrap();
 
    c.connect(None).unwrap();
 

	
 
    for expecting in 0u8..8 {
 
        c.get(g0).unwrap();
 
        c.sync(None).unwrap();
 
        assert_eq!(&[expecting], c.gotten(g0).unwrap().as_slice());
 
    }
 
    c.sync(None).unwrap();
 
}
 

	
 
#[test]
 
fn eq_causality() {
 
    let test_log_path = Path::new("./logs/eq_causality");
 
    let pdl = b"
 
    primitive eq(in a, in b, out c) {
 
    primitive eq(in<msg> a, in<msg> b, out<msg> c) {
 
        msg ma = null;
 
        msg mb = null;
 
        while(true) synchronous {
 
            if(fires(a)) {
 
                // b and c also fire!
 
                // left first!
 
                ma = get(a);
 
                put(c, ma);
 
                mb = get(b);
 
                assert(ma == mb);
 
            }
 
        }
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 

	
 
    /*
 
    [native]p0-->g0[eq]p1--.
 
                 g1        |
 
                 ^---------`
 
    */
 
    let [p0, g0] = c.new_port_pair();
 
    let [p1, g1] = c.new_port_pair();
 
    c.add_component(b"eq", &[g0, g1, p1]).unwrap();
 

	
 
    /*
 
                  V--------.
 
                 g2        |
 
    [native]p2-->g3[eq]p3--`
 
    */
 
    let [p2, g2] = c.new_port_pair();
 
    let [p3, g3] = c.new_port_pair();
 
    c.add_component(b"eq", &[g3, g2, p3]).unwrap();
 
    c.connect(None).unwrap();
 

	
 
    for _ in 0..4 {
 
        // everything is fine with LEFT FIRST
 
        c.put(p0, TEST_MSG.clone()).unwrap();
 
        c.sync(MS100).unwrap();
 

	
 
        // no solution when left is NOT FIRST
 
        c.put(p2, TEST_MSG.clone()).unwrap();
 
        c.sync(MS100).unwrap_err();
 
    }
 
}
 

	
 
#[test]
 
fn eq_no_causality() {
 
    let test_log_path = Path::new("./logs/eq_no_causality");
 
    let pdl = b"
 
    composite eq(in<msg> a, in<msg> b, out<msg> c) {
 
        channel leftfirsto -> leftfirsti;
 
        new eqinner(a, b, c, leftfirsto, leftfirsti);
 
    }
 
    primitive eqinner(in<msg> a, in<msg> b, out<msg> c, out<msg> leftfirsto, in<msg> leftfirsti) {
 
        msg ma = null;
 
        msg mb = null;
 
        while(true) synchronous {
 
            if(fires(a)) {
 
                // b and c also fire!
 
                if(fires(leftfirsti)) {
 
                    // left first! DO USE DUMMY
 
                    ma = get(a);
 
                    put(c, ma);
 
                    mb = get(b);
 

	
 
                    // using dummy!
 
                    put(leftfirsto, ma);
 
                    get(leftfirsti);
 
                } else {
 
                    // right first! DON'T USE DUMMY
 
                    mb = get(b);
 
                    put(c, mb);
 
                    ma = get(a);
 
                }
 
                assert(ma == mb);
 
            }
 
        }
 
    }
 
    T some_function<T>(int a, int b) {
 
        T something = a;
 
        return something;
 
    }
 
    primitive quick_test(in<int> a, in<int> b) {
 
        // msg ma = null;
 
        auto test1 = 0;
 
        auto test2 = 0;
 
        auto ma = some_function(test1, test2);
 
        while(true) synchronous {
 
            if (fires(a)) {
 
                ma = get(a);
 
            }
 
            if (fires(b)) {
 
                ma = get(b);
 
            }
 
            if (fires(a) && fires(b)) {
 
                ma = get(a) + get(b);
 
            }
 
        }
 
    }
 
    ";
 
    let pd = reowolf::ProtocolDescription::parse(pdl).unwrap();
 
    let mut c = file_logged_configured_connector(0, test_log_path, Arc::new(pd));
 

	
 
    /*
 
    [native]p0-->g0[eq]p1--.
 
                 g1        |
 
                 ^---------`
 
    */
 
    let [p0, g0] = c.new_port_pair();
 
    let [p1, g1] = c.new_port_pair();
 
    c.add_component(b"eq", &[g0, g1, p1]).unwrap();
 

	
 
    /*
 
                  V--------.
 
                 g2        |
 
    [native]p2-->g3[eq]p3--`
 
    */
 
    let [p2, g2] = c.new_port_pair();
 
    let [p3, g3] = c.new_port_pair();
 
    c.add_component(b"eq", &[g3, g2, p3]).unwrap();
 
    c.connect(None).unwrap();
 

	
 
    for _ in 0..32 {
 
        // ok when they send
 
        c.put(p0, TEST_MSG.clone()).unwrap();
 
        c.put(p2, TEST_MSG.clone()).unwrap();
 
        c.sync(SEC1).unwrap();
 
        // ok when they don't
 
        c.sync(SEC1).unwrap();
 
    }
 
}
0 comments (0 inline, 0 general)